Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -73,6 +73,7 @@ from tensorflow.keras.models import Sequential, model_from_json
|
|
73 |
import tensorflow as tf
|
74 |
from tensorflow.keras.preprocessing.sequence import pad_sequences
|
75 |
import spacy
|
|
|
76 |
#from spacy import en_core_web_lg
|
77 |
#import en_core_web_lg
|
78 |
#nlp = en_core_web_lg.load()
|
@@ -171,6 +172,10 @@ def main():
|
|
171 |
|
172 |
# pipeline_test_output = loaded_vectorizer.transform(class_list)
|
173 |
# predicted = loaded_model.predict(pipeline_test_output)
|
|
|
|
|
|
|
|
|
174 |
text_embedding = np.zeros((len(word_index) + 1, 300))
|
175 |
for word, i in word_index.items():
|
176 |
text_embedding[i] = nlp(word).vector
|
|
|
73 |
import tensorflow as tf
|
74 |
from tensorflow.keras.preprocessing.sequence import pad_sequences
|
75 |
import spacy
|
76 |
+
from tensorflow.keras.preprocessing.text import Tokenizer
|
77 |
#from spacy import en_core_web_lg
|
78 |
#import en_core_web_lg
|
79 |
#nlp = en_core_web_lg.load()
|
|
|
172 |
|
173 |
# pipeline_test_output = loaded_vectorizer.transform(class_list)
|
174 |
# predicted = loaded_model.predict(pipeline_test_output)
|
175 |
+
|
176 |
+
tokenizer = Tokenizer(num_words=100000)
|
177 |
+
tokenizer.fit_on_texts(class_list)
|
178 |
+
word_index = tokenizer.word_index
|
179 |
text_embedding = np.zeros((len(word_index) + 1, 300))
|
180 |
for word, i in word_index.items():
|
181 |
text_embedding[i] = nlp(word).vector
|