blankx.gitlab.io/genbloglist.py

37 lines
1.5 KiB
Python

import sys
import html
import shlex
from urllib.parse import urlparse, urlunparse
import feedparser
with open(sys.argv[1]) as file, open(sys.argv[2], 'w+') as out:
for i in file:
inp = shlex.split(i, comments=True)
if 'blog' in inp[1:]:
try:
d = feedparser.parse(inp[0])
except Exception as e:
print(inp[0], 'raised', e, file=sys.stderr)
continue
feedurl = d['href']
if not d['entries']:
print(feedurl, 'has no entries, skipping', file=sys.stderr)
continue
if 'links' in d['feed']:
url = next(filter(lambda i: i['type'] == 'text/html', d['feed']['links']))['href']
else:
url = list(urlparse(feedurl))
url[2] = ''
url = urlunparse(url)
print(f'No mention of main page on {feedurl}, please see {url} or enter main page url: ', file=sys.stderr, end='', flush=True)
url = input().strip() or url
desc = d['feed'].get('description')
text = f'<li><b><a href="{html.escape(url)}">{html.escape(d["feed"]["title"])}</a> (<a href="{html.escape(feedurl)}">feed</a>)'
if desc := d['feed'].get('description'):
text += f':</b> {html.escape(d["feed"]["description"])}'
else:
text += '</b>'
text += '</li>\n'
print(text, end='')
out.write(text)