import sys import html import shlex from urllib.parse import urlparse, urlunparse import feedparser with open(sys.argv[1]) as file, open(sys.argv[2], 'w+') as out: for i in file: inp = shlex.split(i, comments=True) if 'blog' in inp[1:]: try: d = feedparser.parse(inp[0]) except Exception as e: print(inp[0], 'raised', e, file=sys.stderr) continue feedurl = d['href'] if not d['entries']: print(feedurl, 'has no entries, skipping', file=sys.stderr) continue if 'links' in d['feed']: url = next(filter(lambda i: i['type'] == 'text/html', d['feed']['links']))['href'] else: url = list(urlparse(feedurl)) url[2] = '' url = urlunparse(url) print(f'No mention of main page on {feedurl}, please see {url} or enter main page url: ', file=sys.stderr, end='', flush=True) url = input().strip() or url desc = d['feed'].get('description') text = f'
  • {html.escape(d["feed"]["title"])} (feed)' if desc := d['feed'].get('description'): text += f': {html.escape(d["feed"]["description"])}' else: text += '' text += '
  • \n' print(text, end='') out.write(text)