fix README.md
Browse files
README.md
CHANGED
@@ -14,7 +14,7 @@ metrics:
|
|
14 |
- accuracy
|
15 |
mask_token: "[MASK]"
|
16 |
widget:
|
17 |
-
- text: "
|
18 |
---
|
19 |
|
20 |
# Model Card for Japanese DeBERTa V2 base
|
@@ -32,7 +32,7 @@ from transformers import AutoTokenizer, AutoModelForMaskedLM
|
|
32 |
tokenizer = AutoTokenizer.from_pretrained('ku-nlp/deberta-v2-base-japanese')
|
33 |
model = AutoModelForMaskedLM.from_pretrained('ku-nlp/deberta-v2-base-japanese')
|
34 |
|
35 |
-
sentence = '
|
36 |
encoding = tokenizer(sentence, return_tensors='pt')
|
37 |
...
|
38 |
```
|
|
|
14 |
- accuracy
|
15 |
mask_token: "[MASK]"
|
16 |
widget:
|
17 |
+
- text: "京都 大学 で 自然 言語 処理 を [MASK] する 。"
|
18 |
---
|
19 |
|
20 |
# Model Card for Japanese DeBERTa V2 base
|
|
|
32 |
tokenizer = AutoTokenizer.from_pretrained('ku-nlp/deberta-v2-base-japanese')
|
33 |
model = AutoModelForMaskedLM.from_pretrained('ku-nlp/deberta-v2-base-japanese')
|
34 |
|
35 |
+
sentence = '京都 大学 で 自然 言語 処理 を [MASK] する 。' # input should be segmented into words by Juman++ in advance
|
36 |
encoding = tokenizer(sentence, return_tensors='pt')
|
37 |
...
|
38 |
```
|