mskov commited on
Commit
6d9a5de
1 Parent(s): 18d712a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -8
app.py CHANGED
@@ -110,15 +110,27 @@ def classify_toxicity(audio_file, classify_anxiety, emo_class, explitive_selecti
110
  # formatted_classification_output = "\n".join([f"{key}: {value}" for key, value in classification_output.items()])
111
  # label_score_pairs = [(label, score) for label, score in zip(classification_output['labels'], classification_output['scores'])]
112
  label_score_dict = {label: score for label, score in zip(classification_output['labels'], classification_output['scores'])}
113
-
114
- # plot.update(x=classification_df["labels"], y=classification_df["scores"])
115
- if toxicity_score > threshold:
116
- print("threshold exceeded!! Launch intervention")
117
- affirm = positive_affirmations()
 
 
 
 
 
 
118
  else:
119
- affirm = ""
120
-
121
- return transcribed_text, toxicity_score, label_score_dict, affirm
 
 
 
 
 
 
122
  # return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
123
 
124
  def positive_affirmations():
 
110
  # formatted_classification_output = "\n".join([f"{key}: {value}" for key, value in classification_output.items()])
111
  # label_score_pairs = [(label, score) for label, score in zip(classification_output['labels'], classification_output['scores'])]
112
  label_score_dict = {label: score for label, score in zip(classification_output['labels'], classification_output['scores'])}
113
+ k = max(label_score_dict, value=label_score_dict.get)
114
+ maxval = label_score_dict[k]
115
+ if maxval > tox_score:
116
+ if maxval > threshold:
117
+ print("Toxic")
118
+ affirm = positive_affirmations()
119
+ topScore = maxval
120
+ else:
121
+ print("Not Toxic")
122
+ affirm = ""
123
+ topScore = maxval
124
  else:
125
+ if tox_score > threshold:
126
+ affirm = positive_affirmations()
127
+ topScore = toxicity_score
128
+ else:
129
+ print("Not Toxic")
130
+ affirm = ""
131
+ topScore = toxicity_score
132
+
133
+ return transcribed_text, topScore, label_score_dict, affirm
134
  # return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
135
 
136
  def positive_affirmations():