rabiyulfahim
commited on
Commit
•
217b673
1
Parent(s):
699d3f7
Upload 7 files
Browse files- .gitattributes +6 -31
- README.md +45 -0
- config.json +49 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +1 -0
- spiece.model +3 -0
- tokenizer_config.json +1 -0
.gitattributes
CHANGED
@@ -1,34 +1,9 @@
|
|
1 |
-
*.
|
2 |
-
*.
|
3 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.
|
30 |
-
*.
|
31 |
-
*.
|
32 |
-
|
33 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
1 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
3 |
*.bin filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
4 |
*.h5 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
*.tflite filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.tar.gz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
9 |
+
spiece.model filter=lfs diff=lfs merge=lfs -text
|
|
|
|
README.md
CHANGED
@@ -1,3 +1,48 @@
|
|
1 |
---
|
|
|
2 |
license: apache-2.0
|
|
|
|
|
|
|
|
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
language: en
|
3 |
license: apache-2.0
|
4 |
+
tags:
|
5 |
+
- pegasus
|
6 |
+
- paraphrasing
|
7 |
+
- seq2seq
|
8 |
---
|
9 |
+
|
10 |
+
## Model description
|
11 |
+
[PEGASUS](https://github.com/google-research/pegasus) fine-tuned for paraphrasing
|
12 |
+
|
13 |
+
## Model in Action 🚀
|
14 |
+
```
|
15 |
+
import torch
|
16 |
+
from transformers import PegasusForConditionalGeneration, PegasusTokenizer
|
17 |
+
model_name = 'tuner007/pegasus_paraphrase'
|
18 |
+
torch_device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
19 |
+
tokenizer = PegasusTokenizer.from_pretrained(model_name)
|
20 |
+
model = PegasusForConditionalGeneration.from_pretrained(model_name).to(torch_device)
|
21 |
+
|
22 |
+
def get_response(input_text,num_return_sequences,num_beams):
|
23 |
+
batch = tokenizer([input_text],truncation=True,padding='longest',max_length=60, return_tensors="pt").to(torch_device)
|
24 |
+
translated = model.generate(**batch,max_length=60,num_beams=num_beams, num_return_sequences=num_return_sequences, temperature=1.5)
|
25 |
+
tgt_text = tokenizer.batch_decode(translated, skip_special_tokens=True)
|
26 |
+
return tgt_text
|
27 |
+
```
|
28 |
+
#### Example:
|
29 |
+
```
|
30 |
+
num_beams = 10
|
31 |
+
num_return_sequences = 10
|
32 |
+
context = "The ultimate test of your knowledge is your capacity to convey it to another."
|
33 |
+
get_response(context,num_return_sequences,num_beams)
|
34 |
+
# output:
|
35 |
+
['The test of your knowledge is your ability to convey it.',
|
36 |
+
'The ability to convey your knowledge is the ultimate test of your knowledge.',
|
37 |
+
'The ability to convey your knowledge is the most important test of your knowledge.',
|
38 |
+
'Your capacity to convey your knowledge is the ultimate test of it.',
|
39 |
+
'The test of your knowledge is your ability to communicate it.',
|
40 |
+
'Your capacity to convey your knowledge is the ultimate test of your knowledge.',
|
41 |
+
'Your capacity to convey your knowledge to another is the ultimate test of your knowledge.',
|
42 |
+
'Your capacity to convey your knowledge is the most important test of your knowledge.',
|
43 |
+
'The test of your knowledge is how well you can convey it.',
|
44 |
+
'Your capacity to convey your knowledge is the ultimate test.']
|
45 |
+
```
|
46 |
+
|
47 |
+
> Created by [Arpit Rajauria](https://twitter.com/arpit_rajauria)
|
48 |
+
[![Twitter icon](https://cdn0.iconfinder.com/data/icons/shift-logotypes/32/Twitter-32.png)](https://twitter.com/arpit_rajauria)
|
config.json
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"activation_dropout": 0.1,
|
3 |
+
"activation_function": "relu",
|
4 |
+
"add_bias_logits": false,
|
5 |
+
"add_final_layer_norm": true,
|
6 |
+
"architectures": [
|
7 |
+
"PegasusForConditionalGeneration"
|
8 |
+
],
|
9 |
+
"attention_dropout": 0.1,
|
10 |
+
"bos_token_id": 0,
|
11 |
+
"classif_dropout": 0.0,
|
12 |
+
"d_model": 1024,
|
13 |
+
"decoder_attention_heads": 16,
|
14 |
+
"decoder_ffn_dim": 4096,
|
15 |
+
"decoder_layerdrop": 0.0,
|
16 |
+
"decoder_layers": 16,
|
17 |
+
"dropout": 0.1,
|
18 |
+
"encoder_attention_heads": 16,
|
19 |
+
"encoder_ffn_dim": 4096,
|
20 |
+
"encoder_layerdrop": 0.0,
|
21 |
+
"encoder_layers": 16,
|
22 |
+
"eos_token_id": 1,
|
23 |
+
"extra_pos_embeddings": 1,
|
24 |
+
"force_bos_token_to_be_generated": false,
|
25 |
+
"id2label": {
|
26 |
+
"0": "LABEL_0",
|
27 |
+
"1": "LABEL_1",
|
28 |
+
"2": "LABEL_2"
|
29 |
+
},
|
30 |
+
"init_std": 0.02,
|
31 |
+
"is_encoder_decoder": true,
|
32 |
+
"label2id": {
|
33 |
+
"LABEL_0": 0,
|
34 |
+
"LABEL_1": 1,
|
35 |
+
"LABEL_2": 2
|
36 |
+
},
|
37 |
+
"length_penalty": 0.8,
|
38 |
+
"max_length": 60,
|
39 |
+
"max_position_embeddings": 60,
|
40 |
+
"model_type": "pegasus",
|
41 |
+
"normalize_before": true,
|
42 |
+
"normalize_embedding": false,
|
43 |
+
"num_beams": 8,
|
44 |
+
"num_hidden_layers": 16,
|
45 |
+
"pad_token_id": 0,
|
46 |
+
"scale_embedding": true,
|
47 |
+
"static_position_embeddings": true,
|
48 |
+
"vocab_size": 96103
|
49 |
+
}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:73318308cac63b31bc1df8405626c1f31b24bfba52bafe5373e15d1f12df4ac0
|
3 |
+
size 2275437102
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
|
spiece.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0015189ef36359283fec8b93cf6d9ce51bca37eb1101defc68a53b394913b96c
|
3 |
+
size 1912529
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"model_max_length": 60, "special_tokens_map_file": null, "full_tokenizer_file": null}
|