vluz commited on
Commit
9b1307c
1 Parent(s): d754b3d

Upload 4 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ model/toxmodel.keras filter=lfs diff=lfs merge=lfs -text
model/toxmodel.keras ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12ed25b56fd7cd096a1e42ec40154d317d36cb9389f22a6178f3526d0fed6bad
3
+ size 77972576
model/vectorizer.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:861c5a2adbc2c8501dfe0d2333f28c54a39d198fbf2473fe27d1cd552f622579
3
+ size 2750076
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ pandas
2
+
toxtest.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pickle
3
+ import streamlit as st
4
+ import tensorflow as tf
5
+ from tensorflow.keras.layers import TextVectorization
6
+
7
+
8
+ @st.cache_resource
9
+ def load_model():
10
+ model = tf.keras.models.load_model(os.path.join("model", "toxmodel.keras"))
11
+ return model
12
+
13
+
14
+ @st.cache_resource
15
+ def load_vectorizer():
16
+ from_disk = pickle.load(open(os.path.join("model", "vectorizer.pkl"), "rb"))
17
+ new_v = TextVectorization.from_config(from_disk['config'])
18
+ new_v.adapt(tf.data.Dataset.from_tensor_slices(["xyz"])) # Keras bug
19
+ new_v.set_weights(from_disk['weights'])
20
+ return new_v
21
+
22
+
23
+ @st.cache_resource
24
+ def load_vocab():
25
+ vocab = {}
26
+ with open('vocab.txt', 'r') as f:
27
+ for line in f:
28
+ token, index = line.strip().split('\t')
29
+ vocab[token] = int(index)
30
+
31
+
32
+ st.title("Toxic Comment Test")
33
+ st.divider()
34
+ model = load_model()
35
+ vectorizer = load_vectorizer()
36
+ input_text = st.text_area("Comment:", "I love you man, but fuck you!", height=150)
37
+ if st.button("Test"):
38
+ with st.spinner("Testing..."):
39
+ inputv = vectorizer([input_text])
40
+ output = model.predict(inputv)
41
+ res = (output > 0.5)
42
+ st.write(["toxic","severe toxic","obscene","threat","insult","identity hate"], res)
43
+ st.write(output)
44
+ print(output)
45
+
46
+