Update README.md
Browse files
README.md
CHANGED
@@ -18,7 +18,19 @@ Dataset: ```PAWS``` [link](https://github.com/google-research-datasets/paws)
|
|
18 |
|
19 |
## Performance:
|
20 |
|
21 |
-
|
22 |
ROC-AUC score: 0.86
|
23 |
|
24 |
## Usage:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
## Performance:
|
20 |
|
21 |
+
F1-score: 0.86
|
22 |
ROC-AUC score: 0.86
|
23 |
|
24 |
## Usage:
|
25 |
+
```python
|
26 |
+
from transformers import T5ForConditionalGeneration, T5Tokenizer
|
27 |
+
tokenizer = T5Tokenizer.from_pretrained("etomoscow/T5_paraphrase_detector")
|
28 |
+
model = T5ForConditionalGeneration.from_pretrained("etomoscow/T5_paraphrase_detector")
|
29 |
+
text_1 = 'During her sophomore , junior and senior summers , she spent half of it with her Alaska team , and half playing , and living in Oregon .'
|
30 |
+
text_2 = 'During her second , junior and senior summers , she spent half of it with her Alaska team , half playing and living in Oregon.'
|
31 |
+
true_label = '1'
|
32 |
+
input_text = tokenizer.encode_plus(text_1 + ' <sep> ' + text_2, return_tensors='pt')
|
33 |
+
out = model.generate(input_text['input_ids'].to(device))
|
34 |
+
print(tokenizer.decode(out.squeeze(0), skip_special_tokens=True))
|
35 |
+
# 1
|
36 |
+
```
|