Update README.md
Browse files
README.md
CHANGED
@@ -44,8 +44,8 @@ MODEL_PATH = "helinivan/italian-sarcasm-detector"
|
|
44 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
|
45 |
model = AutoModelForSequenceClassification.from_pretrained(MODEL_PATH)
|
46 |
|
47 |
-
text = "
|
48 |
-
tokenized_text = tokenizer([preprocess_data(text)], padding=True, truncation=True, max_length=
|
49 |
output = model(**tokenized_text)
|
50 |
probs = output.logits.softmax(dim=-1).tolist()[0]
|
51 |
confidence = max(probs)
|
@@ -57,7 +57,7 @@ results = {"is_sarcastic": prediction, "confidence": confidence}
|
|
57 |
Output:
|
58 |
|
59 |
```
|
60 |
-
{'is_sarcastic':
|
61 |
```
|
62 |
|
63 |
## Performance
|
|
|
44 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
|
45 |
model = AutoModelForSequenceClassification.from_pretrained(MODEL_PATH)
|
46 |
|
47 |
+
text = "Gli Usa a un passo dalla recessione"
|
48 |
+
tokenized_text = tokenizer([preprocess_data(text)], padding=True, truncation=True, max_length=256, return_tensors="pt")
|
49 |
output = model(**tokenized_text)
|
50 |
probs = output.logits.softmax(dim=-1).tolist()[0]
|
51 |
confidence = max(probs)
|
|
|
57 |
Output:
|
58 |
|
59 |
```
|
60 |
+
{'is_sarcastic': 0, 'confidence': 0.9965020418167114}
|
61 |
```
|
62 |
|
63 |
## Performance
|