2 changed files with 8 additions and 100 deletions
@ -1,107 +1,14 @@
|
||||
import markovify |
||||
import os |
||||
import re |
||||
import textstat |
||||
import pronouncing |
||||
#import re |
||||
#import textstat |
||||
#import pronouncing |
||||
from mastodon import Mastodon |
||||
|
||||
with open ('corpus.txt') as corpus: |
||||
mmodel = markovify.NewlineText(corpus) |
||||
|
||||
shanty = "" |
||||
|
||||
while len(shanty.strip().split("\n")) < 9: |
||||
|
||||
first_and_third_dont_rhyme = True |
||||
second_and_fourth_dont_rhyme = True |
||||
|
||||
while first_and_third_dont_rhyme or second_and_fourth_dont_rhyme: |
||||
|
||||
first_line_rhymes_n = 0 |
||||
first_line_syllables = 0 |
||||
|
||||
while first_line_rhymes_n < 4 or first_line_syllables == 0 or first_line_syllables > 10: |
||||
|
||||
first_line = re.sub(r'[,;]$', '', mmodel.make_sentence(tries=10000)) |
||||
|
||||
first_line_syllables = textstat.syllable_count(first_line) |
||||
|
||||
first_line_words = first_line.split(" ") |
||||
|
||||
first_line_lastword = first_line_words[len(first_line_words)-1] |
||||
|
||||
first_line_lastword = re.sub(r'[^A-Za-z]', '', first_line_lastword) |
||||
|
||||
first_line_rhymes = pronouncing.rhymes(first_line_lastword) |
||||
|
||||
first_line_rhymes_n = len(first_line_rhymes) |
||||
|
||||
second_line_rhymes_n = 0 |
||||
second_line_syllables = 0 |
||||
|
||||
while second_line_rhymes_n < 4 or second_line_syllables == 0 or second_line_syllables > 10: |
||||
|
||||
second_line = re.sub(r'[,;]$', '', mmodel.make_sentence(tries=10000)) |
||||
|
||||
second_line_syllables = textstat.syllable_count(second_line) |
||||
|
||||
second_line_words = second_line.split(" ") |
||||
|
||||
second_line_lastword = second_line_words[len(second_line_words)-1] |
||||
|
||||
second_line_lastword = re.sub(r'[^A-Za-z]', '', second_line_lastword) |
||||
|
||||
second_line_rhymes = pronouncing.rhymes(second_line_lastword) |
||||
|
||||
second_line_rhymes_n = len(second_line_rhymes) |
||||
|
||||
third_line_lastword = "" |
||||
third_line_syllables = 0 |
||||
third_line_tries = 0 |
||||
|
||||
while (third_line_syllables != first_line_syllables or first_and_third_dont_rhyme) and third_line_tries < 2000: |
||||
|
||||
third_line = re.sub(r'[,;]$', '', mmodel.make_sentence(tries=10000)) |
||||
|
||||
third_line_syllables = textstat.syllable_count(third_line) |
||||
|
||||
third_line_words = third_line.split(" ") |
||||
|
||||
third_line_lastword = third_line_words[len(third_line_words)-1] |
||||
|
||||
third_line_lastword = re.sub(r'[^A-Za-z]', '', third_line_lastword) |
||||
|
||||
first_and_third_dont_rhyme = first_line_rhymes.count(third_line_lastword) == 0 |
||||
|
||||
third_line_tries = third_line_tries + 1 |
||||
|
||||
fourth_line_lastword = "" |
||||
fourth_line_syllables = 0 |
||||
fourth_line_tries = 0 |
||||
|
||||
while (fourth_line_syllables != second_line_syllables or second_and_fourth_dont_rhyme) and fourth_line_tries < 2000: |
||||
|
||||
fourth_line = re.sub(r'[,;]$', '', mmodel.make_sentence(tries=10000)) |
||||
|
||||
fourth_line_syllables = textstat.syllable_count(fourth_line) |
||||
|
||||
fourth_line_words = fourth_line.split(" ") |
||||
|
||||
fourth_line_lastword = fourth_line_words[len(fourth_line_words)-1] |
||||
|
||||
fourth_line_lastword = re.sub(r'[^A-Za-z]', '', fourth_line_lastword) |
||||
|
||||
second_and_fourth_dont_rhyme = second_line_rhymes.count(fourth_line_lastword) == 0 |
||||
|
||||
fourth_line_tries = fourth_line_tries + 1 |
||||
|
||||
shanty = shanty + first_line + '\n' + second_line + '\n' + third_line + '\n' + fourth_line + '\n\n' |
||||
|
||||
shanty = shanty.strip() |
||||
shanty = "Is this thing on?\n" |
||||
|
||||
mastodon = Mastodon( |
||||
access_token = 'shantybot_usercred.secret', |
||||
api_base_url = 'https://botsin.space' |
||||
access_token = 'token', |
||||
api_base_url = 'https://server' |
||||
) |
||||
|
||||
mastodon.status_post(shanty, spoiler_text="A sea shanty written by a bot") |
||||
mastodon.status_post(shanty, spoiler_text="Spoiler") |
||||
|
||||
Loading…
Reference in new issue