Spaces:
Running
Running
File size: 2,191 Bytes
59faeae 56266ec 039b503 99300a7 039b503 d36a83c 3f656fb 5400027 56266ec 0b15904 56266ec 0b15904 56266ec 5400027 8e3c42c 5400027 56266ec 5400027 8e3c42c 5400027 56266ec f5d4e06 56266ec 6844ad4 56266ec f5d4e06 99300a7 56266ec 6844ad4 f5d4e06 6844ad4 f5d4e06 6844ad4 518d46d 4d2949d 6844ad4 f5d4e06 e4b9572 138836e e4b9572 9627035 880e759 f5d4e06 880e759 f5d4e06 880e759 13b8bac 880e759 3679935 13b8bac 880e759 3679935 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 |
# import gradio as gr
import tensorflow as tf
import numpy as np
from keras.models import load_model
from tensorflow.keras.preprocessing.text import Tokenizer
import pickle
from tensorflow.keras.preprocessing.sequence import pad_sequences
import os
from pathlib import Path
import pandas as pd
import plotly.express as px
import keras
import unicodedata as ud
from underthesea import word_tokenize
from phoBERT import BERT_predict
import time
LSTM_model = tf.keras.models.load_model('lstm_model.tf')
GRU_model = tf.keras.models.load_model('gru_model.tf')
def LSTM_predict(x):
t1 = time.time()
pred_proba = LSTM_model.predict([x])[0]
t2 = time.time()
print(f'{t2-t1}s')
pred_proba = [round(i,2) for i in pred_proba]
return pred_proba
def GRU_predict(x):
t1 = time.time()
pred_proba = GRU_model.predict([x])[0]
t2 = time.time()
print(f'{t2-t1}s')
pred_proba = [round(i,2) for i in pred_proba]
return pred_proba
def tokenize(x):
x = ud.normalize('NFKC', x)
x = word_tokenize(x, format="text")
return x
def judge(x):
result = []
x = tokenize(x)
lstm_pred = LSTM_predict(x)
gru_pred = GRU_predict(x)
result_lstm = np.round(lstm_pred, 2)
result_gru = np.round(gru_pred, 2)
for i in range(6):
result.append((result_lstm[i]+result_gru[i])/2)
return (result)
def judgePlus(x):
result = []
x = tokenize(x)
lstm_pred = LSTM_predict(x)
gru_pred = GRU_predict(x)
try:
bert_pred = BERT_predict(x)
except:
bert_pred = np.average([lstm_pred, gru_pred], axis=0)
result_lstm = np.round(lstm_pred, 2)
result_gru = np.round(gru_pred, 2)
result_bert = np.round(bert_pred, 2)
if((result_lstm[0]+result_gru[0])<(result_bert[0]*2)):
for i in range(6):
result.append((result_bert[i])/1)
else:
for i in range(6):
result.append((result_lstm[i]+result_gru[i])/2)
return (result)
def judgeBert(x):
result = []
x = tokenize(x)
try:
bert_pred = BERT_predict(x)
except:
bert_pred = np.zeros(6, dtype=float)
result_bert = np.round(bert_pred, 2)
for i in range(6):
result.append((result_bert[i])/1)
return (result)
|