ierhon's picture
model improvement and caching
5e22f32 verified
raw
history blame
3.4 kB
import gradio as gr
from todset import todset
import numpy as np
from keras.models import Model
from keras.saving import load_model
from keras.layers import *
from keras.preprocessing.text import Tokenizer
import os
os.mkdir("cache")
emb_size = 128
inp_len = 16
maxshift = 4
def hash_str(data: str):
return hashlib.md5(data.encode('utf-8')).hexdigest()
def train(data: str, message: str):
if "→" not in data or "\n" not in data:
return "Dataset example:\nquestion→answer\nquestion→answer\netc."
dset, responses = todset(data)
resps_len = len(responses)
tokenizer = Tokenizer()
tokenizer.fit_on_texts(list(dset.keys()))
vocab_size = len(tokenizer.word_index) + 1
if hash_str(data)+".keras" in os.listdir("cache"):
model = load_model(hash_str(data)+".keras")
else:
input_layer = Input(shape=(inp_len,))
emb_layer = Embedding(input_dim=vocab_size, output_dim=emb_size, input_length=inp_len)(input_layer)
attn_layer = MultiHeadAttention(num_heads=4, key_dim=128)(emb_layer, emb_layer, emb_layer)
noise_layer = GaussianNoise(0.1)(attn_layer)
conv1_layer = Conv1D(64, 8, padding='same', activation='relu', strides=1, input_shape=(64, 128))(noise_layer)
conv2_layer = Conv1D(16, 4, padding='valid', activation='relu', strides=1)(conv1_layer)
conv3_layer = Conv1D(8, 2, padding='valid', activation='relu', strides=1)(conv2_layer)
flatten_layer = Flatten()(conv3_layer)
attn_flatten_layer = Flatten()(attn_layer)
conv1_flatten_layer = Flatten()(conv1_layer)
conv3_flatten_layer = Flatten()(conv3_layer)
concat1_layer = Concatenate()([flatten_layer, attn_flatten_layer, conv1_flatten_layer, conv2_layer, conv3_flatten_layer])
dense1_layer = Dense(512, activation="linear")(concat1_layer)
prelu1_layer = PReLU()(dense1_layer)
dropout_layer = Dropout(0.3)(prelu1_layer)
dense2_layer = Dense(256, activation="tanh")(dropout_layer)
dense3_layer = Dense(256, activation="relu")(dense2_layer)
dense4_layer = Dense(100, activation="tanh")(dense3_layer)
concat2_layer = Concatenate()([dense4_layer, prelu1_layer, attn_flatten_layer, conv1_flatten_layer])
dense4_layer = Dense(resps_len, activation="softmax")(concat2_layer)
X = []
y = []
for key in dset:
for p in range(maxshift):
tokens = tokenizer.texts_to_sequences([key,])[0]
X.append(np.array(([0,]*p+list(tokens)+[0,]*inp_len)[:inp_len]))
output_array = np.zeros(resps_len)
output_array[dset[key]] = 1
y.append(output_array)
X = np.array(X)
y = np.array(y)
model.compile(loss="categorical_crossentropy", metrics=["accuracy",])
model.fit(X, y, epochs=10, batch_size=8, workers=4, use_multiprocessing=True)
model.save("{data_hash}.keras")
tokens = tokenizer.texts_to_sequences([message,])[0]
prediction = model.predict(np.array([(list(tokens)+[0,]*inp_len)[:inp_len],]))[0]
max_o = 0
max_v = 0
for ind, i in enumerate(prediction):
if max_v < i:
max_v = i
max_o = ind
return responses[ind]
iface = gr.Interface(fn=train, inputs=["text", "text"], outputs="text")
iface.launch()