Spaces:
Sleeping
Sleeping
Upload 3 files
Browse files- app.py +44 -0
- text_generator.h5 +3 -0
- tokenizer.pickle +3 -0
app.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from keras.models import load_model
|
3 |
+
from tensorflow.keras.preprocessing.sequence import pad_sequences
|
4 |
+
import pickle
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
max_sequence_len = 40
|
8 |
+
|
9 |
+
st.title("Next Word Prediction using LSTM")
|
10 |
+
seed_text = st.text_input('Enter initial text to start generating next words')
|
11 |
+
next_words_count = st.slider('How many words to generate', 1, 8, 4)
|
12 |
+
model = load_model('text_generator.h5')
|
13 |
+
|
14 |
+
with open("tokenizer.pickle", "rb") as handle:
|
15 |
+
tokenizer = pickle.read(handle)
|
16 |
+
|
17 |
+
def generate_text(seed_text, next_words, model, max_sequence_len):
|
18 |
+
|
19 |
+
for _ in range(next_words):
|
20 |
+
token_list = tokenizer.texts_to_sequences([seed_text])[0]
|
21 |
+
|
22 |
+
token_list = pad_sequences([token_list], maxlen = max_sequence_len - 1, padding='pre')
|
23 |
+
|
24 |
+
predicted = model.predict(token_list, verbose=0)
|
25 |
+
|
26 |
+
classes_x = np.argmax(predicted,axis=1)
|
27 |
+
|
28 |
+
output_word = ""
|
29 |
+
for word,index in tokenizer.word_index.items():
|
30 |
+
if index == classes_x:
|
31 |
+
output_word = word
|
32 |
+
break
|
33 |
+
seed_text += " "+output_word
|
34 |
+
return seed_text.title()
|
35 |
+
|
36 |
+
if st.button("Submit", type="primary"):
|
37 |
+
output = generate_text(seed_text, next_words_count,model,max_sequence_len)
|
38 |
+
st.write(output)
|
39 |
+
|
40 |
+
|
41 |
+
|
42 |
+
|
43 |
+
|
44 |
+
|
text_generator.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f3e920a7ddcdaed698f74b6647e582a70dea540569adbfeae84966c9a61ce9a6
|
3 |
+
size 22930136
|
tokenizer.pickle
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3e9b15fba8b8707bf03b18603193b182b0b0ff46478b157edde18636b773be2b
|
3 |
+
size 328855
|