ky2k commited on
Commit
88ee86c
1 Parent(s): 02d4487

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -3,8 +3,8 @@ from transformers import pipeline
3
 
4
  # Load the models
5
  MODEL_PATHS = {
6
- "Unitary Toxic BERT": "unitary/toxic-bert",
7
- "Martin HA Toxic Comment": "martin-ha/toxic-comment-model"
8
  }
9
 
10
  classifiers = {name: pipeline("text-classification", model=path, tokenizer=path) for name, path in MODEL_PATHS.items()}
@@ -26,17 +26,17 @@ iface = gr.Interface(
26
  fn=predict_toxicity,
27
  inputs=[
28
  gr.Textbox(lines=5, label="Enter text to analyze"),
29
- gr.Radio(choices=list(MODEL_PATHS.keys()), label="Choose a model", value="Toxic BERT")
30
  ],
31
  outputs=gr.Label(num_top_classes=6, label="Toxicity Scores"),
32
  title="Toxicity Prediction",
33
- description="This POC uses trained&pre-trained models to predict toxicity in text. Choose between two models: 'Toxic BERT' based and 'Martin HA Toxic Comment'.",
34
  examples=[
35
- ["Great game everyone!", "Toxic BERT"],
36
- ["You're such a noob, uninstall please.", "Martin HA Toxic Comment"],
37
- ["I hope you die in real life, loser.", "Toxic BERT"],
38
- ["Nice move! How did you do that?", "Martin HA Toxic Comment"],
39
- ["Go back to the kitchen where you belong.", "Toxic BERT"],
40
  ]
41
  )
42
 
 
3
 
4
  # Load the models
5
  MODEL_PATHS = {
6
+ "Toxic Bert-based model": "unitary/toxic-bert",
7
+ "Martin-HA-toxic-comment-model": "martin-ha/toxic-comment-model"
8
  }
9
 
10
  classifiers = {name: pipeline("text-classification", model=path, tokenizer=path) for name, path in MODEL_PATHS.items()}
 
26
  fn=predict_toxicity,
27
  inputs=[
28
  gr.Textbox(lines=5, label="Enter text to analyze"),
29
+ gr.Radio(choices=list(MODEL_PATHS.keys()), label="Choose a model", value="Toxic Bert-based model")
30
  ],
31
  outputs=gr.Label(num_top_classes=6, label="Toxicity Scores"),
32
  title="Toxicity Prediction",
33
+ description="This POC uses trained & pre-trained models to predict toxicity in text. Choose between two models: 'Toxic Bert-based model' for multi-class labeling and 'Martin-HA-toxic-comment-model' for binary clasification.",
34
  examples=[
35
+ ["Great game everyone!", "Toxic Bert-based model"],
36
+ ["You're such a noob, uninstall please.", "Martin-HA-toxic-comment-model"],
37
+ ["I hope you die in real life, loser.", "Toxic Bert-based model"],
38
+ ["Nice move! How did you do that?", "Martin-HA-toxic-comment-model"],
39
+ ["Go back to the kitchen where you belong.", "Toxic Bert-based model"],
40
  ]
41
  )
42