thushalya commited on
Commit
f589a31
1 Parent(s): b286b42

Add hate and non hate

Browse files
Files changed (1) hide show
  1. app.py +25 -5
app.py CHANGED
@@ -351,9 +351,9 @@ def load_model(tweet):
351
  pred = model(inputs)
352
  print("prediction ",pred)
353
  print("sigmoid output",torch.sigmoid(pred))
354
- print("sigmoid item",torch.sigmoid(pred).item())
355
  # Assuming your model returns a single value for prediction
356
- pred = torch.round(torch.sigmoid(pred)).item()
357
 
358
  return pred
359
 
@@ -367,20 +367,40 @@ def load_model(tweet):
367
  #Gradio interface
368
  def greet(tweet):
369
  print("start")
370
- predicted_class = load_model(tweet)
371
  # features_list = extract_features(tweet)
372
  # print(personality_detection(tweet))
373
  # print(str(features_list["Average_Word_Length"]))
374
  # print(calc_emotion_score(tweet))
 
375
  print("end")
 
 
 
 
376
 
377
 
378
- return str(predicted_class)
379
 
380
  # demo = gr.Interface(fn=greet, inputs="text", outputs="text")
381
  demo = gr.Interface(
382
  title = "Unmasking Hate: An Integrated Approach to Detecting Hate Speech in Social Media",
383
  # fn=greet,
384
  fn=greet, inputs=gr.Textbox(placeholder="Enter an input sentence...",label="Input Sentence"),
385
- allow_flagging = "never",outputs="text")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
386
  demo.launch()
 
351
  pred = model(inputs)
352
  print("prediction ",pred)
353
  print("sigmoid output",torch.sigmoid(pred))
354
+ pred = torch.sigmoid(pred).item()
355
  # Assuming your model returns a single value for prediction
356
+
357
 
358
  return pred
359
 
 
367
  #Gradio interface
368
  def greet(tweet):
369
  print("start")
370
+ prediction = load_model(tweet)
371
  # features_list = extract_features(tweet)
372
  # print(personality_detection(tweet))
373
  # print(str(features_list["Average_Word_Length"]))
374
  # print(calc_emotion_score(tweet))
375
+ predicted_class = torch.round(torch.sigmoid(prediction)).item()
376
  print("end")
377
+ if (predicted_class==0.0):
378
+ label = "Hate"
379
+ else:
380
+ label = "Non Hate"
381
 
382
 
383
+ return label,str(prediction)+"%",str(1-prediction)+"%"
384
 
385
  # demo = gr.Interface(fn=greet, inputs="text", outputs="text")
386
  demo = gr.Interface(
387
  title = "Unmasking Hate: An Integrated Approach to Detecting Hate Speech in Social Media",
388
  # fn=greet,
389
  fn=greet, inputs=gr.Textbox(placeholder="Enter an input sentence...",label="Input Sentence"),
390
+ allow_flagging = "never",outputs=[
391
+ gr.Label(label="Label"),
392
+ gr.Textbox(label="Hate Speech Percentage"),
393
+ gr.Textbox(label="Non Hate Speech Percentage")
394
+ ],
395
+ examples=[
396
+ ["I like you"],
397
+ ["I hate you"],
398
+ ["I can't stand those asian always causing trouble. They need to go back to where they came from!"],
399
+ ["Just saw a Sunni preaching on the street. Why don't they go worship in their own country instead of invading ours?"],
400
+ ["I wish all bisexuals would just disappear. Sick of their agenda being shoved in our faces"],
401
+ ["Had a great time celebrating diversity at the multicultural festival today!"],
402
+ ["Congratulations to Sri Lankans for their cultural contributions to our society"],
403
+ ["Love is love, no matter who you are or who you love"] ]
404
+ )
405
+
406
  demo.launch()