amber-ebooks/functions.py

87 lines
3.0 KiB
Python
Raw Normal View History

2018-10-09 01:11:51 +00:00
#!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import markovify
2019-01-11 12:47:42 +00:00
from bs4 import BeautifulSoup
2019-01-11 12:57:18 +00:00
import re, multiprocessing, sqlite3, shutil, os, json
2018-10-09 01:11:51 +00:00
def make_sentence(output):
2019-01-11 12:47:42 +00:00
class nlt_fixed(markovify.NewlineText): #modified version of NewlineText that never rejects sentences
2018-10-09 01:11:51 +00:00
def test_sentence_input(self, sentence):
return True #all sentences are valid <3
2019-01-11 12:47:42 +00:00
shutil.copyfile("toots.db", "toots-copy.db") #create a copy of the database because reply.py will be using the main one
2018-10-09 01:11:51 +00:00
db = sqlite3.connect("toots-copy.db")
db.text_factory=str
c = db.cursor()
toots = c.execute("SELECT content FROM `toots` ORDER BY RANDOM() LIMIT 10000").fetchall()
2018-10-09 01:11:51 +00:00
toots_str = ""
for toot in toots:
toots_str += "\n{}".format(toot[0])
model = nlt_fixed(toots_str)
toots_str = None
db.close()
os.remove("toots-copy.db")
sentence = None
tries = 0
while sentence is None and tries < 10:
sentence = model.make_short_sentence(500, tries=10000)
tries = tries + 1
2019-01-11 12:47:42 +00:00
sentence = re.sub("^(?:@\u202B[^ ]* )*", "", sentence) #remove leading pings (don't say "@bob blah blah" but still say "blah @bob blah")
sentence = re.sub("^(?:@\u200B[^ ]* )*", "", sentence)
2018-10-09 01:11:51 +00:00
output.send(sentence)
def make_toot(force_markov = False, args = None):
return make_toot_markov()
def make_toot_markov(query = None):
2018-10-09 01:11:51 +00:00
tries = 0
toot = None
2019-01-11 12:47:42 +00:00
while toot == None and tries < 10: #try to make a toot 10 times
2018-10-09 01:11:51 +00:00
pin, pout = multiprocessing.Pipe(False)
2018-10-27 06:43:45 +00:00
p = multiprocessing.Process(target = make_sentence, args = [pout])
2018-10-09 01:11:51 +00:00
p.start()
2019-01-11 12:47:42 +00:00
p.join(10) #wait 10 seconds to get something
if p.is_alive(): #if it's still trying to make a toot after 10 seconds
2018-10-09 01:11:51 +00:00
p.terminate()
p.join()
toot = None
2019-01-11 12:47:42 +00:00
tries = tries + 1 #give up, and increment tries by one
2018-10-09 01:11:51 +00:00
else:
toot = pin.recv()
2019-01-11 12:47:42 +00:00
if toot == None: #if we've tried and failed ten times, just give up
2019-01-11 12:16:04 +00:00
toot = "Toot generation failed! Contact Lynne (lynnesbian@fedi.lynnesbian.space) for assistance."
2018-10-09 01:11:51 +00:00
return {
2019-01-11 12:47:42 +00:00
"toot": toot,
"media": None
2018-10-09 01:11:51 +00:00
}
def extract_toot(toot):
toot = toot.replace("&apos;", "'") #convert HTML stuff to normal stuff
toot = toot.replace("&quot;", '"') #ditto
soup = BeautifulSoup(toot, "html.parser")
for lb in soup.select("br"): #replace <br> with linebreak
lb.insert_after("\n")
lb.decompose()
for p in soup.select("p"): #ditto for <p>
p.insert_after("\n")
p.unwrap()
for ht in soup.select("a.hashtag"): #make hashtags no longer links, just text
ht.unwrap()
for link in soup.select("a"): #ocnvert <a href='https://example.com>example.com</a> to just https://example.com
link.insert_after(link["href"])
link.decompose()
text = soup.get_text()
text = re.sub("https://([^/]+)/(@[^ ]+)", r"\2@\1", text) #put mastodon-style mentions back in
text = re.sub("https://([^/]+)/users/([^ ]+)", r"@\2@\1", text) #put pleroma-style mentions back in
text = text.rstrip("\n") #remove trailing newline
return text