amber-ebooks/functions.py

113 lines
3.5 KiB
Python
Raw Normal View History

2018-10-09 01:11:51 +00:00
#!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import markovify
2019-01-11 12:47:42 +00:00
from bs4 import BeautifulSoup
from random import randint
import re, multiprocessing, sqlite3, shutil, os, html
2018-10-09 01:11:51 +00:00
def make_sentence(output, cfg):
class nlt_fixed(markovify.NewlineText): # modified version of NewlineText that never rejects sentences
2018-10-09 01:11:51 +00:00
def test_sentence_input(self, sentence):
return True # all sentences are valid <3
2018-10-09 01:11:51 +00:00
shutil.copyfile("toots.db", "toots-copy.db") # create a copy of the database because reply.py will be using the main one
2018-10-09 01:11:51 +00:00
db = sqlite3.connect("toots-copy.db")
db.text_factory = str
2018-10-09 01:11:51 +00:00
c = db.cursor()
if cfg['learn_from_cw']:
toots = c.execute("SELECT content FROM `toots` ORDER BY RANDOM() LIMIT 10000").fetchall()
else:
toots = c.execute("SELECT content FROM `toots` WHERE cw = 0 ORDER BY RANDOM() LIMIT 10000").fetchall()
2018-10-09 01:11:51 +00:00
2020-03-08 09:46:07 +00:00
if len(toots) == 0:
2019-07-10 11:25:07 +00:00
output.send("Database is empty! Try running main.py.")
return
2020-05-27 12:31:16 +00:00
nlt = markovify.NewlineText if cfg['overlap_ratio_enabled'] else nlt_fixed
model = nlt(
2020-03-08 09:46:07 +00:00
"\n".join([toot[0] for toot in toots])
)
2020-05-27 12:31:16 +00:00
2020-03-08 09:46:07 +00:00
db.close()
os.remove("toots-copy.db")
2019-07-10 11:25:07 +00:00
if cfg['limit_length']:
sentence_len = randint(cfg['length_lower_limit'], cfg['length_upper_limit'])
2018-10-09 01:11:51 +00:00
sentence = None
tries = 0
while sentence is None and tries < 10:
sentence = model.make_short_sentence(
max_chars=500,
tries=10000,
max_overlap_ratio=cfg['overlap_ratio'] if cfg['overlap_ratio_enabled'] else 0.7,
max_words=sentence_len if cfg['limit_length'] else None
)
tries = tries + 1
2019-01-11 12:47:42 +00:00
# optionally remove mentions
if cfg['mention_handling'] == 1:
2019-04-29 04:38:44 +00:00
sentence = re.sub(r"^\S*@\u200B\S*\s?", "", sentence)
elif cfg['mention_handling'] == 0:
2019-04-29 04:38:44 +00:00
sentence = re.sub(r"\S*@\u200B\S*\s?", "", sentence)
2019-01-11 12:47:42 +00:00
# optionally regenerate the post if it has a filtered word. TODO: case-insensitivity, scuntthorpe problem
if cfg['word_filter'] == 1:
try:
fp = open('./filter.txt')
for word in fp:
word = re.sub("\n", "", word)
2022-03-27 17:17:49 +00:00
if word.lower() in sentence:
sentence=""
finally:
fp.close()
2018-10-09 01:11:51 +00:00
output.send(sentence)
def make_toot(cfg):
2018-10-09 01:11:51 +00:00
toot = None
2019-05-19 13:06:31 +00:00
pin, pout = multiprocessing.Pipe(False)
p = multiprocessing.Process(target=make_sentence, args=[pout, cfg])
2019-05-19 13:06:31 +00:00
p.start()
p.join(5) # wait 5 seconds to get something
if p.is_alive(): # if it's still trying to make a toot after 5 seconds
2019-05-19 13:06:31 +00:00
p.terminate()
p.join()
else:
toot = pin.recv()
if toot is None:
toot = "post failed"
2019-07-02 10:43:34 +00:00
return toot
def extract_toot(toot):
toot = re.sub("<br>", "\n", toot)
toot = html.unescape(toot) # convert HTML escape codes to text
soup = BeautifulSoup(toot, "html.parser")
for lb in soup.select("br"): # replace <br> with linebreak
lb.name = "\n"
for p in soup.select("p"): # ditto for <p>
p.name = "\n"
for ht in soup.select("a.hashtag"): # convert hashtags from links to text
ht.unwrap()
for link in soup.select("a"): # convert <a href='https://example.com>example.com</a> to just https://example.com
2020-05-27 12:31:16 +00:00
if 'href' in link:
# apparently not all a tags have a href, which is understandable if you're doing normal web stuff, but on a social media platform??
link.replace_with(link["href"])
text = soup.get_text()
text = re.sub(r"https://([^/]+)/(@[^\s]+)", r"\2@\1", text) # put mastodon-style mentions back in
text = re.sub(r"https://([^/]+)/users/([^\s/]+)", r"@\2@\1", text) # put pleroma-style mentions back in
text = text.rstrip("\n") # remove trailing newline(s)
return text