mskov commited on
Commit
22b7cff
1 Parent(s): 2186147

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -3
app.py CHANGED
@@ -111,7 +111,10 @@ def classify_toxicity(audio_file, text_input, classify_anxiety, emo_class, expli
111
  # classification_df = pd.DataFrame.from_dict(classification_output)
112
  print("keys ", classification_output.keys())
113
 
114
- formatted_classification_output = "\n".join([f"{key}: {value}" for key, value in classification_output.items()])
 
 
 
115
 
116
 
117
  # plot.update(x=classification_df["labels"], y=classification_df["scores"])
@@ -121,7 +124,7 @@ def classify_toxicity(audio_file, text_input, classify_anxiety, emo_class, expli
121
  else:
122
  affirm = ""
123
 
124
- return toxicity_score, formatted_classification_output, transcribed_text, affirm
125
  # return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
126
  else:
127
  threshold = slider_logic(slider)
@@ -182,7 +185,7 @@ with gr.Blocks() as iface:
182
  submit_btn = gr.Button(label="Run")
183
  with gr.Column():
184
  out_val = gr.Textbox()
185
- out_class = gr.Textbox()
186
  out_text = gr.Textbox()
187
  out_affirm = gr.Textbox()
188
  submit_btn.click(fn=classify_toxicity, inputs=[aud_input, text, anxiety_class, emo_class, explit_preference, sense_slider], outputs=[out_val, out_class, out_text, out_affirm])
 
111
  # classification_df = pd.DataFrame.from_dict(classification_output)
112
  print("keys ", classification_output.keys())
113
 
114
+ # formatted_classification_output = "\n".join([f"{key}: {value}" for key, value in classification_output.items()])
115
+ label_score_pairs = [(label, score) for label, score in zip(classification_output['labels'], classification_output['scores'])]
116
+
117
+
118
 
119
 
120
  # plot.update(x=classification_df["labels"], y=classification_df["scores"])
 
124
  else:
125
  affirm = ""
126
 
127
+ return toxicity_score, label_score_pairs, transcribed_text, affirm
128
  # return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
129
  else:
130
  threshold = slider_logic(slider)
 
185
  submit_btn = gr.Button(label="Run")
186
  with gr.Column():
187
  out_val = gr.Textbox()
188
+ out_class = gr.Label()
189
  out_text = gr.Textbox()
190
  out_affirm = gr.Textbox()
191
  submit_btn.click(fn=classify_toxicity, inputs=[aud_input, text, anxiety_class, emo_class, explit_preference, sense_slider], outputs=[out_val, out_class, out_text, out_affirm])