Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -47,6 +47,21 @@ def classify_emotion(audio):
|
|
47 |
emotion_classifier = foreign_class(source="speechbrain/emotion-recognition-wav2vec2-IEMOCAP", pymodule_file="custom_interface.py", classname="CustomEncoderWav2vec2Classifier")
|
48 |
out_prob, score, index, text_lab = emotion_classifier.classify_file(audio)
|
49 |
return emo_dict[text_lab[0]]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
|
51 |
# Create a Gradio interface with audio file and text inputs
|
52 |
def classify_toxicity(audio_file, text_input, classify_anxiety, emo_class, explitive_selection, slider):
|
@@ -59,6 +74,7 @@ def classify_toxicity(audio_file, text_input, classify_anxiety, emo_class, expli
|
|
59 |
print("emo_class ", emo_class, "explitive select", explitive_selection)
|
60 |
|
61 |
## SLIDER ##
|
|
|
62 |
|
63 |
#------- explitive call ---------------
|
64 |
|
@@ -94,10 +110,12 @@ def classify_toxicity(audio_file, text_input, classify_anxiety, emo_class, expli
|
|
94 |
print("keys ", classification_output.keys())
|
95 |
|
96 |
# plot.update(x=classification_df["labels"], y=classification_df["scores"])
|
97 |
-
|
|
|
98 |
return toxicity_score, classification_output, transcribed_text
|
99 |
# return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
|
100 |
else:
|
|
|
101 |
model = whisper.load_model("large")
|
102 |
# model = model_cache[model_name]
|
103 |
# class_names = classify_anxiety.split(",")
|
|
|
47 |
emotion_classifier = foreign_class(source="speechbrain/emotion-recognition-wav2vec2-IEMOCAP", pymodule_file="custom_interface.py", classname="CustomEncoderWav2vec2Classifier")
|
48 |
out_prob, score, index, text_lab = emotion_classifier.classify_file(audio)
|
49 |
return emo_dict[text_lab[0]]
|
50 |
+
|
51 |
+
def slider_logic(slider):
|
52 |
+
if slider == 1:
|
53 |
+
theshold = .98
|
54 |
+
elif slider == 2:
|
55 |
+
threshold = .88
|
56 |
+
elif slider == 3:
|
57 |
+
threshold = .77
|
58 |
+
elif slider == 4:
|
59 |
+
threshold = .66
|
60 |
+
elif slider == 5:
|
61 |
+
threshold = .55
|
62 |
+
else:
|
63 |
+
threshold = []
|
64 |
+
return threshold
|
65 |
|
66 |
# Create a Gradio interface with audio file and text inputs
|
67 |
def classify_toxicity(audio_file, text_input, classify_anxiety, emo_class, explitive_selection, slider):
|
|
|
74 |
print("emo_class ", emo_class, "explitive select", explitive_selection)
|
75 |
|
76 |
## SLIDER ##
|
77 |
+
threshold = slider_logic(slider)
|
78 |
|
79 |
#------- explitive call ---------------
|
80 |
|
|
|
110 |
print("keys ", classification_output.keys())
|
111 |
|
112 |
# plot.update(x=classification_df["labels"], y=classification_df["scores"])
|
113 |
+
if toxicity_score > threshold:
|
114 |
+
print("threshold exceeded!!")
|
115 |
return toxicity_score, classification_output, transcribed_text
|
116 |
# return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
|
117 |
else:
|
118 |
+
threshold = slider_logic(slider)
|
119 |
model = whisper.load_model("large")
|
120 |
# model = model_cache[model_name]
|
121 |
# class_names = classify_anxiety.split(",")
|