from googletrans import Translator
import spacy
import gradio as gr
import nltk
from nltk.corpus import wordnet
import wikipedia
import re
import time
import random
import os
import zipfile
import ffmpeg
from gtts import gTTS
#from io import BytesIO
from collections import Counter
from PIL import Image, ImageDraw, ImageFont
import numpy as np
from docx import Document
import textwrap
import pandas as pd
#Uncomment these for Huggingface
nltk.download('maxent_ne_chunker') #Chunker
nltk.download('stopwords') #Stop Words List (Mainly Roman Languages)
nltk.download('words') #200 000+ Alphabetical order list
nltk.download('punkt') #Tokenizer
nltk.download('verbnet') #For Description of Verbs
nltk.download('omw')
nltk.download('omw-1.4') #Multilingual Wordnet
nltk.download('wordnet') #For Definitions, Antonyms and Synonyms
nltk.download('shakespeare')
nltk.download('dolch') #Sight words
nltk.download('names') #People Names NER
nltk.download('gazetteers') #Location NER
nltk.download('opinion_lexicon') #Sentiment words
nltk.download('averaged_perceptron_tagger') #Parts of Speech Tagging
spacy.cli.download("en_core_web_sm")
spacy.cli.download('ko_core_news_sm')
spacy.cli.download('ja_core_news_sm')
spacy.cli.download('zh_core_web_sm')
nlp = spacy.load('en_core_web_sm')
translator = Translator()
def Sentencechunker(sentence):
Sentchunks = sentence.split(" ")
chunks = []
for i in range(len(Sentchunks)):
chunks.append(" ".join(Sentchunks[:i+1]))
return " | ".join(chunks)
def ReverseSentenceChunker(sentence):
reversed_sentence = " ".join(reversed(sentence.split()))
chunks = Sentencechunker(reversed_sentence)
return chunks
def three_words_chunk(sentence):
words = sentence.split()
chunks = [words[i:i+3] for i in range(len(words)-2)]
chunks = [" ".join(chunk) for chunk in chunks]
return " | ".join(chunks)
def keep_nouns_verbs(sentence):
doc = nlp(sentence)
nouns_verbs = []
for token in doc:
if token.pos_ in ['NOUN','VERB','PUNCT']:
nouns_verbs.append(token.text)
return " ".join(nouns_verbs)
def unique_word_count(text="", state=None):
if state is None:
state = {}
words = text.split()
word_counts = state
for word in words:
if word in word_counts:
word_counts[word] += 1
else:
word_counts[word] = 1
sorted_word_counts = sorted(word_counts.items(), key=lambda x: x[1], reverse=True)
return sorted_word_counts,
def Wordchunker(word):
chunks = []
for i in range(len(word)):
chunks.append(word[:i+1])
return chunks
def BatchWordChunk(sentence):
words = sentence.split(" ")
FinalOutput = ""
Currentchunks = ""
ChunksasString = ""
for word in words:
ChunksasString = ""
Currentchunks = Wordchunker(word)
for chunk in Currentchunks:
ChunksasString += chunk + " "
FinalOutput += "\n" + ChunksasString
return FinalOutput
# Translate from English to French
langdest = gr.Dropdown(choices=["af", "de", "es", "ko", "ja", "zh-cn"], label="Choose Language", value="de")
ChunkModeDrop = gr.Dropdown(choices=["Chunks", "Reverse", "Three Word Chunks", "Spelling Chunks"], label="Choose Chunk Type", value="Chunks")
def FrontRevSentChunk (Chunkmode, Translate, Text, langdest):
FinalOutput = ""
TransFinalOutput = ""
if Chunkmode=="Chunks":
FinalOutput += Sentencechunker(Text)
if Chunkmode=="Reverse":
FinalOutput += ReverseSentenceChunker(Text)
if Chunkmode=="Three Word Chunks":
FinalOutput += three_words_chunk(Text)
if Chunkmode=="Spelling Chunks":
FinalOutput += BatchWordChunk(Text)
if Translate:
TransFinalOutput = FinalOutput
translated = translator.translate(TransFinalOutput, dest=langdest)
FinalOutput += "\n" + translated.text
return FinalOutput
# Define a function to filter out non-verb, noun, or adjective words
def filter_words(words):
# Use NLTK to tag each word with its part of speech
tagged_words = nltk.pos_tag(words)
# Define a set of parts of speech to keep (verbs, nouns, adjectives)
keep_pos = {'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ', 'NN', 'NNS', 'NNP', 'NNPS', 'JJ', 'JJR', 'JJS'}
# Filter the list to only include words with the desired parts of speech
filtered_words = [word for word, pos in tagged_words if pos in keep_pos]
return filtered_words
def SepHypandSynExpansion(text):
# Tokenize the text
tokens = nltk.word_tokenize(text)
NoHits = ""
FinalOutput = ""
# Find synonyms and hypernyms of each word in the text
for token in tokens:
synonyms = []
hypernyms = []
for synset in wordnet.synsets(token):
synonyms += synset.lemma_names()
hypernyms += [hypernym.name() for hypernym in synset.hypernyms()]
if not synonyms and not hypernyms:
NoHits += f"{token} | "
else:
FinalOutput += "\n" f"{token}: hypernyms={hypernyms}, synonyms={synonyms} \n"
NoHits = set(NoHits.split(" | "))
NoHits = filter_words(NoHits)
NoHits = "Words to pay special attention to: \n" + str(NoHits)
return NoHits, FinalOutput
def WikiSearch(term):
termtoks = term.split(" ")
for item in termtoks:
# Search for the term on Wikipedia and get the first result
result = wikipedia.search(item, results=20)
return result
def create_dictionary(word_list, word_dict = {}):
word_list = set(word_list.split(" "))
for word in word_list:
key = word[:2]
if key not in word_dict:
word_dict[key] = [word]
else:
word_dict[key].append(word)
return word_dict
def merge_lines(roman_file, w4w_file, full_mean_file, macaronic_file):
files = [roman_file, w4w_file, full_mean_file, macaronic_file]
merged_lines = []
with open(roman_file.name, "r") as f1, open(w4w_file.name, "r") as f2, \
open(full_mean_file.name, "r") as f3, open(macaronic_file.name, "r") as f4:
for lines in zip(f1, f2, f3, f4):
merged_line = "\n".join(line.strip() for line in lines)
merged_lines.append(merged_line)
return "\n".join(merged_lines)
TTSLangOptions = gr.Dropdown(choices=["en", "de", "es", "ja", "ko", "zh-cn"], value="en", label="choose the language of the srt/text accent")
TTSLangOptions2 = gr.Dropdown(choices=["en", "de", "es", "ja", "ko", "zh-cn"], value="en", label="choose the language of the srt/text accent")
def TTSforListeningPractice(text, language = "en", Repeat10x = False):
if Repeat10x:
text = text * 10
speech = gTTS(text=text, lang=language, slow="False")
speech.save("CurrentTTSFile.mp3")
#file = BytesIO()
#speech.write_to_fp(file)
#file.seek(0)
return "CurrentTTSFile.mp3" #file
def AutoChorusInvestigator(sentences):
sentences = sentences.splitlines()
# Use Counter to count the number of occurrences of each sentence
sentence_counts = Counter(sentences)
# Identify duplicate sentences
duplicates = [s for s, count in sentence_counts.items() if count > 1]
FinalOutput = ""
if len(duplicates) == 0:
FinalOutput += "No duplicate sentences found in the file."
else:
FinalOutput += "The following sentences appear more than once in the file:"
for sentence in duplicates:
FinalOutput += "\n" + sentence
return FinalOutput
def AutoChorusPerWordScheduler(sentences):
words = set(sentences.split(" "))
wordsoneattime =[]
practicestring = ""
FinalOutput = "This is supposed to output the words in repetition format (i.e. schedule for repitition) \nCurrent Idea = 1 new word every min and 1 old word every second" + "\n\nWords: \n"
for word in words:
wordsoneattime.append(word)
for i in range(0, 59):
practicestring += word + " "
practicestring += random.choice(wordsoneattime) + " "
FinalOutput += word + "\n "
practicestring += "\n"
FinalOutput += practicestring
return FinalOutput
def group_words(inlist):
inlisttoks = inlist.split(" ")
inlistset = set(inlisttoks)
word_groups = []
current_group = []
for word in inlisttoks:
current_group.append(word)
if len(current_group) == 10:
word_groups.append(current_group)
current_group = []
if current_group:
word_groups.append(current_group)
current_group_index = 0
current_group_time = 0
while True:
if current_group_time == 60:
current_group_index = (current_group_index + 1) % len(word_groups)
current_group_time = 0
else:
if current_group_time % 10 == 0:
random.shuffle(word_groups[current_group_index])
current_group_time += 10
yield " ".join(word_groups[current_group_index])
time.sleep(10)
def split_verbs_nouns(text):
nlp = spacy.load("en_core_web_sm")
doc = nlp(text)
verbs_nouns = []
other_words = []
pos_string = []
for token in doc:
if token.pos_ in ["VERB", "NOUN"]:
verbs_nouns.append(token.text)
elif token.text in [punct.text for punct in doc if punct.is_punct]:
verbs_nouns.append(token.text)
other_words.append(token.text)
else:
other_words.append(token.text)
pos_string.append(token.pos_)
verbs_nouns_text = " ".join(verbs_nouns)
other_words_text = " ".join(other_words)
pos_string_text = " ".join(pos_string)
return pos_string_text, verbs_nouns_text, other_words_text
SRTLangOptions = gr.Dropdown(choices=["en", "ja", "ko", "zh-cn"], value="en", label="choose the language of the srt")
def save_string_to_file(string_to_save, file_name, srtdocx):
with open(file_name, 'w', encoding='utf-8') as file:
file.write(string_to_save)
if srtdocx == "True":
with open(file_name.split('.')[0] + '.srt', 'w', encoding='utf-8') as file:
file.write(string_to_save)
srtdocument = Document()
srtdocument.add_paragraph(string_to_save)
srtdocument.save('SplitSRT.docx')
def split_srt_file(text, lang): #file_path):
# Open the SRT file and read its contents
#with open(file_path, 'r') as f:
# srt_contents = f.read()
if lang == "en": nlp = spacy.load('en_core_web_sm')
if lang == "ja": nlp = spacy.load('ja_core_news_sm')
if lang == "ko": nlp = spacy.load('ko_core_news_sm')
if lang == "zn-cn": nlp = spacy.load('zn_core_web_sm')
srt_contents = text
# Split the SRT file by timestamp
srt_sections = srt_contents.split('\n\n')
srt_sections_POSversion = []
subaswordlist = ""
# Loop through each section of the SRT file
for i in range(len(srt_sections)):
# Split the section into its timestamp and subtitle text
section_lines = srt_sections[i].split('\n')
timestamp = section_lines[1]
subtitle_text = ' | '.join(section_lines[2:])
sub_split_line = nlp(subtitle_text)
subtitle_textPOSversion = ""
subtitle_text = ""
# Replace spaces in the subtitle text with " | "
#subtitle_text = subtitle_text.replace(' ', ' | ')
for token in sub_split_line:
subtitle_text += token.text + " | "
subaswordlist += token.text + " "
subtitle_textPOSversion += token.pos_ + " | "
# Reconstruct the section with the updated subtitle text
srt_sections[i] = f"{section_lines[0]}\n{timestamp}\n{subtitle_text[3:]}"
srt_sections_POSversion.append(f"{section_lines[0]}\n{timestamp}\n{subtitle_textPOSversion[3:]}\n\n")
SplitSRT = '\n\n'.join(srt_sections)
SplitPOSsrt = ''.join(srt_sections_POSversion)
save_string_to_file(SplitSRT, "SplitSRT.txt", "True")
save_string_to_file(SplitPOSsrt, "SplitPOSsrt.txt", "False")
subaswordlist = set(subaswordlist.split(" "))
subaswordlistOutput = ""
for word in subaswordlist:
subaswordlistOutput += "\n | " + word
subaswordlistOutput = str(len(subaswordlist)) + "\n" + subaswordlistOutput
# Join the SRT sections back together into a single string
return subaswordlistOutput, ["SplitSRT.docx", "SplitSRT.txt", "SplitSRT.srt", "SplitPOSsrt.txt"], SplitSRT, SplitPOSsrt
def find_string_positions(s, string):
positions = []
start = 0
while True:
position = s.find(string, start)
if position == -1:
break
positions.append(position)
start = position + len(string)
return positions
def splittext(string):
string_no_formaterror = string.replace(" -- > ", " --> ")
split_positions = find_string_positions(string_no_formaterror, " --> ")
split_strings = []
prepos = 0
for pos in split_positions:
pos -= 12
split_strings.append((string[prepos:pos])) #, string[pos:]))
prepos = pos
FinalOutput = ""
stoutput = ""
linenumber = 1
#print(linenumber)
for item in split_strings[1:]:
stoutput = item[0:29] + "\n" + item[30:]
stspaces = find_string_positions(stoutput, " ")
FinalOutput += str(linenumber) + "\n" + stoutput[:stspaces[-2]] + "\n"
FinalOutput += "\n"
linenumber += 1
return FinalOutput[2:]
def VideotoSegment(video_file, subtitle_file):
# Read the subtitle file and extract the timings for each subtitle
timings = []
for line in subtitle_file:
if '-->' in line:
start, end = line.split('-->')
start_time = start.strip().replace(',', '.')
end_time = end.strip().replace(',', '.')
timings.append((start_time, end_time))
# Cut the video into segments based on the subtitle timings
video_segments = []
for i, (start_time, end_time) in enumerate(timings):
output_file = f'segment_{i}.mp4'
ffmpeg.input(video_file, ss=start_time, to=end_time).output(output_file, codec='copy').run()
video_segments.append(output_file)
# Convert each segment to an MP3 audio file using FFmpeg
audio_segments = []
for i in range(len(timings)):
output_file = f'segment_{i}.mp3'
ffmpeg.input(video_segments[i]).output(output_file, codec='libmp3lame', qscale='4').run()
audio_segments.append(output_file)
# Create a ZIP archive containing all of the segmented files
zip_file = zipfile.ZipFile('segmented_files.zip', 'w')
for segment in video_segments + audio_segments:
zip_file.write(segment)
os.remove(segment)
zip_file.close()
# Return the ZIP archive for download
return 'segmented_files.zip'
def text_to_dropdown(text, id=None): #TextCompFormat
lines = text.strip().split("\n")
html = " \n"
return html
def text_to_links(text): #TextCompFormat
lines = text.strip().split("\n")
html = ""
for line in lines:
if line.startswith("http"):
html += f" -- -- | \n"
else:
html += line + "Not a link
\n"
return html
HTMLCompMode = gr.Dropdown(choices=["Dropdown", "Links"], value="Links")
def TextCompFormat(text, HTMLCompMode):
FinalOutput = ""
if HTMLCompMode == "Dropdown":
FinalOutput = text_to_dropdown(text)
if HTMLCompMode == "Links":
FinalOutput = text_to_links(text)
return FinalOutput
def create_collapsiblebutton(button_id, button_caption, div_content):
button_html = f''
div_html = f'
Make jokes while following rules for a syllogism jokes game: The game can be played with any number of people. One person starts by stating a syllogism, which is a logical argument that consists of three parts: a major premise, a minor premise, and a conclusion. The next person must then state a syllogism that has the same conclusion as the first syllogism, but with different major and minor premises. The game continues in this way until someone cannot think of a new syllogism. The person who makes the last valid syllogism wins the game.
Lets try this I will give you the english word and you find the <
Timing Practice - Repitition: Run from it, Dread it, Repitition is inevitable - Thanos --> Repitition of reaction - Foreign in eyes/ears native in mind (For beginners) | Repitition is a multitask activity like driving must be subconcious process to show mastery
") gr.HTML(""" -- Open LLM Leaderboard -- | -- Whisper JAX -- | -- Google Translate -- | -- Modelscope Text to Video -- | -- stable-diffusion 2 -- | -- stable-diffusion 1 -- | -- karlo 1 -- | -- Bark (TTS) -- | -- Offline Text Model Demos -- | -- SAM with Clip -- | -- Eleven Labs -- | -- Animate an Image -- | -- Clone a voice -- | -- OpenAI pricing -- | -- Image Training Data Search -- | -- Huggingface Chat -- | -- 128x128 Stable Diffusion (Fast) -- | -- Search 95 million research abstracts -- | -- Tiny Stories Dataset -- | -- Visualglm6b - Discuss images -- | -- RAM and Tag2Text -- | -- Potat1 Text2vid -- | """) with gr.Row(): with gr.Column(scale=1): with gr.Tab("Rep - Gradio"): gr.HTML("""Gradio Version Below """) with gr.Tab("Rep - Gradio"): gr.Interface(fn=group_words, inputs=groupinput_text, outputs=groupoutput_text, description="Word Grouping and Rotation - Group a list of words into sets of 10 and rotate them every 60 seconds.") #.queue() with gr.Tab("Navigation"): gr.HTML("Picture AnnotationFastest way to learn words = is to have your own sound reference --> probably why babies learn fast as they make random noise
If you know the flow of the song you can remember the spelling easier
Essentially if the sounds are repeated or long notes they are easy to remember
") gr.Interface(fn=AutoChorusInvestigator, inputs="text", outputs="text", description="Paste Full Lyrics to try find only chorus lines") gr.Interface(fn=AutoChorusPerWordScheduler, inputs="text", outputs="text", description="Create order of repitition for tts practice") with gr.Column(scale=1): gr.HTML("""Reading - Caption images (SD/Dalle-E)For Transcripts to any video on youtube use the link below ⬇️
https://huggingface.co/spaces/RASMUS/Whisper-youtube-crosslingual-subtitles | https://huggingface.co/spaces/vumichien/whisper-speaker-diarization""") #gr.HTML("If Space not loaded its because of offline devopment errors please message for edit
Spell multiple words simultaneously for simultaneous access
Spelling Simplification - Use a dual language list? | Spelling is the end goal, you already know many letter orders called words so you need leverage them to remember random sequences") gr.Interface(fn=create_dictionary, inputs="text", outputs="text", title="Sort Text by first two letters") gr.Interface(fn=keep_nouns_verbs, inputs=["text"], outputs="text", description="Noun and Verbs only (Plus punctuation)") gr.Interface(fn=FrontRevSentChunk, inputs=[ChunkModeDrop, "checkbox", "text", langdest], outputs="text", description="Chunks creator") with gr.Tab("Unknown Tracker"): gr.HTML("Repitition of things you know is a waste of time when theres stuff you dont know
In Language the goal is bigger vocab --> Knowledge equivalent = question answer pairs but to get to those you need related information pairs
Vocab = Glossary + all non text wall(lists, diagrams, etc.)
") gr.Textbox("Placeholder for a function that creates a set list and can takes a list for known words and auto find replaces the stuff you know out of the content") gr.Textbox("Place holder for a translate to english interface so that highlighting can still work as only english supported for now") gr.Interface(fn=UnknownTrackTexttoApp, inputs="text", outputs=["html", "text", "file"], description="Use the text from here to create lists you use for the TTS section") with gr.Tab("Unique word ID - use in Infranodus"): gr.Interface(fn=unique_word_count, inputs="text", outputs="text", description="Wordcounter") gr.Interface(fn=SepHypandSynExpansion, inputs="text", outputs=["text", "text"], description="Word suggestions - Analyse the unique words in infranodus") gr.Interface(fn=WikiSearch, inputs="text", outputs="text", description="One word at a time Unique word suggestions (wiki articles)") with gr.Tab("Automating related information linking"): gr.HTML("Questions - Tacking and suggesting questions to ask = new education") with gr.Tab("Thinking Practice"): with gr.Tab("Sentence to Format"): gr.Interface(fn=split_verbs_nouns , inputs="text", outputs=["text", "text", "text"], description="Comprehension reading and Sentence Format Creator") with gr.Tab("Knowledge Ideas - Notetaking"): gr.HTML("""Good knowledge = ability to answer questions --> find Questions you cant answer and look for hidden answer within them
My One Word Theory = We only use more words than needed when we have to or are bored --> Headings exist because title is not sufficient, subheadings exist because headings are not sufficient, Book Text exists because subheadings are not sufficient
Big Picture = Expand the Heading and the subheadings and compare them to each other
Application of Knowledge = App Version of the text (eg. Jupyter Notebooks) is what you create and learn first
""") gr.Interface(fn=TextCompFormat, inputs=["textarea", HTMLCompMode], outputs="text", description="Convert Text to HTML Dropdown or Links which you paste in any html file") gr.Interface(fn=create_collapsiblebutton, inputs=["textbox", "textbox", "textarea"], outputs="textarea", description="Button and Div HTML Generator, Generate the HTML for a button and the corresponding div element.") with gr.Tab("Automated Reading Assitant"): gr.Textbox('Parts of Speech based | Automating the Notetaking Tab either directly or using visual llm to use this interface efficiently') gr.HTML("Types of comprehension agentSpaces Test - Still Undercontruction --> Next Milestone is Turning this interface handsfree | Knowledge is a Language but productive knowledge is find replace as well | LingQ is good option for per word state management
Arrows app json creator for easy knowledge graphing and spacy POS graph? --> Questions? -->
ChatGPT Turns Learning into a read only what you dont know ask only what you dont know feedback loop --> All you have to do is keep track of what prompts you have asked in the past
""") gr.HTML("Target 0: Mnemonics as title of images --> Comprehensible input
Target 1: Dual audio at word Level while using repitition to train random recall --> Word level Time
Target 2: Video --> Split by sentence --> each word repeated (60) + each phrase (10) + each sentence (10) --> TTS file for practice --> State Management/Known word Tracker
-----------------------
The trick is minimum one minute of focus on a new word --> Listening is hard because there are new word within seconds and you need repeated focus on each to learn
Audio = best long form attention mechanism AS it is ANTICIPATION (Awareness of something before it happens like knowing song Lyrics) FOCUSED - Attention (Focused Repitition) + Exposure (Random Repitition)
Listening is hard due to different word order and word combinations (collocations more important than single words)