Update README.md
Browse files
README.md
CHANGED
@@ -19,7 +19,7 @@ https://github.com/JunnYu/RoFormer_pytorch
|
|
19 |
import torch
|
20 |
from transformers import RoFormerForMaskedLM, RoFormerTokenizer
|
21 |
|
22 |
-
text = "今天[MASK]
|
23 |
tokenizer = RoFormerTokenizer.from_pretrained("junnyu/roformer_chinese_base")
|
24 |
pt_model = RoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base")
|
25 |
pt_inputs = tokenizer(text, return_tensors="pt")
|
@@ -34,14 +34,14 @@ for i, id in enumerate(tokenizer.encode(text)):
|
|
34 |
pt_outputs_sentence += "".join(
|
35 |
tokenizer.convert_ids_to_tokens([id], skip_special_tokens=True))
|
36 |
print(pt_outputs_sentence)
|
37 |
-
# pytorch 今天[
|
38 |
```
|
39 |
|
40 |
## tensorflow2.0使用
|
41 |
```python
|
42 |
import tensorflow as tf
|
43 |
from transformers import RoFormerTokenizer, TFRoFormerForMaskedLM
|
44 |
-
text = "今天[MASK]
|
45 |
tokenizer = RoFormerTokenizer.from_pretrained("junnyu/roformer_chinese_base")
|
46 |
tf_model = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base")
|
47 |
tf_inputs = tokenizer(text, return_tensors="tf")
|
@@ -56,7 +56,7 @@ for i, id in enumerate(tokenizer.encode(text)):
|
|
56 |
tf_outputs_sentence += "".join(
|
57 |
tokenizer.convert_ids_to_tokens([id], skip_special_tokens=True))
|
58 |
print(tf_outputs_sentence)
|
59 |
-
# tf2.0 今天[
|
60 |
```
|
61 |
|
62 |
## 引用
|
|
|
19 |
import torch
|
20 |
from transformers import RoFormerForMaskedLM, RoFormerTokenizer
|
21 |
|
22 |
+
text = "今天[MASK]很好,我想去公园玩!"
|
23 |
tokenizer = RoFormerTokenizer.from_pretrained("junnyu/roformer_chinese_base")
|
24 |
pt_model = RoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base")
|
25 |
pt_inputs = tokenizer(text, return_tensors="pt")
|
|
|
34 |
pt_outputs_sentence += "".join(
|
35 |
tokenizer.convert_ids_to_tokens([id], skip_special_tokens=True))
|
36 |
print(pt_outputs_sentence)
|
37 |
+
# pytorch: 今天[天气||天||阳光||太阳||空气]很好,我想去公园玩!
|
38 |
```
|
39 |
|
40 |
## tensorflow2.0使用
|
41 |
```python
|
42 |
import tensorflow as tf
|
43 |
from transformers import RoFormerTokenizer, TFRoFormerForMaskedLM
|
44 |
+
text = "今天[MASK]很好,我想去公园玩!"
|
45 |
tokenizer = RoFormerTokenizer.from_pretrained("junnyu/roformer_chinese_base")
|
46 |
tf_model = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base")
|
47 |
tf_inputs = tokenizer(text, return_tensors="tf")
|
|
|
56 |
tf_outputs_sentence += "".join(
|
57 |
tokenizer.convert_ids_to_tokens([id], skip_special_tokens=True))
|
58 |
print(tf_outputs_sentence)
|
59 |
+
# tf2.0: 今天[天气||天||阳光||太阳||空气]很好,我想去公园玩!
|
60 |
```
|
61 |
|
62 |
## 引用
|