File size: 1,745 Bytes
0342af9 7dac0b4 0342af9 7dac0b4 0342af9 ff44688 0342af9 50fc5c2 0342af9 7dac0b4 0342af9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
import os
import re
import pickle
import streamlit as st
import tensorflow as tf
from tensorflow.keras.layers import TextVectorization
def clean_text(text):
text = re.sub(r'<[^>]+>', '', text)
text = re.sub(r'http\S+|www\S+|https\S+', '', text)
text = re.sub(r'[^a-zA-Z\'\s]', ' ', text)
text = re.sub(r'(\s)([iI][eE]|[eE][gG])(\s)', r' \2 ', text)
text = " ".join(text.split())
return text.lower()
@st.cache_resource
def load_model():
model = tf.keras.models.load_model(os.path.join("model", "toxmodel.keras"))
return model
@st.cache_resource
def load_vectorizer():
from_disk = pickle.load(open(os.path.join("model", "vectorizer.pkl"), "rb"))
new_v = TextVectorization.from_config(from_disk['config'])
new_v.adapt(tf.data.Dataset.from_tensor_slices(["xyz"])) # fix for Keras bug
new_v.set_weights(from_disk['weights'])
return new_v
st.title("Toxic Comment Test")
st.divider()
model = load_model()
vectorizer = load_vectorizer()
default_prompt = "i love you man, but fuck you!"
input_text = st.text_area("Comment:", default_prompt, height=150).lower()
if st.button("Test"):
if not input_text:
st.write("⚠ Warning: Empty prompt.")
elif len(input_text) < 15:
st.write("⚠ Warning: Model is far less accurate with a small prompt.")
if input_text == default_prompt:
st.write("Expected results from default prompt are positive for 0 and 2")
with st.spinner("Testing..."):
clean_input_text = clean_text(input_text)
inputv = vectorizer([clean_input_text])
output = model.predict(inputv)
res = (output > 0.5)
st.write(["toxic","severe toxic","obscene","threat","insult","identity hate"], res)
st.write(output)
|