KoichiYasuoka
commited on
Commit
•
275916f
1
Parent(s):
ca84192
bug fix
Browse files
README.md
CHANGED
@@ -24,7 +24,8 @@ from transformers import AutoTokenizer,AutoModelForTokenClassification,TokenClas
|
|
24 |
tokenizer=AutoTokenizer.from_pretrained("KoichiYasuoka/roberta-small-japanese-luw-upos")
|
25 |
model=AutoModelForTokenClassification.from_pretrained("KoichiYasuoka/roberta-small-japanese-luw-upos")
|
26 |
pipeline=TokenClassificationPipeline(tokenizer=tokenizer,model=model,aggregation_strategy="simple")
|
27 |
-
|
|
|
28 |
```
|
29 |
|
30 |
## See Also
|
|
|
24 |
tokenizer=AutoTokenizer.from_pretrained("KoichiYasuoka/roberta-small-japanese-luw-upos")
|
25 |
model=AutoModelForTokenClassification.from_pretrained("KoichiYasuoka/roberta-small-japanese-luw-upos")
|
26 |
pipeline=TokenClassificationPipeline(tokenizer=tokenizer,model=model,aggregation_strategy="simple")
|
27 |
+
nlp=lambda x:[(x[t["start"]:t["end"]],t["entity_group"]) for t in pipeline(x)]
|
28 |
+
print(nlp("国境の長いトンネルを抜けると雪国であった。"))
|
29 |
```
|
30 |
|
31 |
## See Also
|