KoichiYasuoka
commited on
Commit
•
a907dea
1
Parent(s):
f350fce
universal-dependencies pipeline
Browse files- README.md +8 -0
- config.json +5 -0
- ud.py +35 -0
README.md
CHANGED
@@ -57,4 +57,12 @@ nlp=UDgoeswith("KoichiYasuoka/roberta-large-japanese-aozora-ud-goeswith")
|
|
57 |
print(nlp("全学年にわたって小学校の国語の教科書に挿し絵が用いられている"))
|
58 |
```
|
59 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
[ufal.chu-liu-edmonds](https://pypi.org/project/ufal.chu-liu-edmonds/) is required.
|
|
|
57 |
print(nlp("全学年にわたって小学校の国語の教科書に挿し絵が用いられている"))
|
58 |
```
|
59 |
|
60 |
+
or
|
61 |
+
|
62 |
+
```
|
63 |
+
from transformers import pipeline
|
64 |
+
nlp=pipeline("universal-dependencies","KoichiYasuoka/roberta-large-japanese-aozora-ud-goeswith",trust_remote_code=True)
|
65 |
+
print(nlp("全学年にわたって小学校の国語の教科書に挿し絵が用いられている"))
|
66 |
+
```
|
67 |
+
|
68 |
[ufal.chu-liu-edmonds](https://pypi.org/project/ufal.chu-liu-edmonds/) is required.
|
config.json
CHANGED
@@ -5,6 +5,11 @@
|
|
5 |
"attention_probs_dropout_prob": 0.1,
|
6 |
"bos_token_id": 2,
|
7 |
"classifier_dropout": null,
|
|
|
|
|
|
|
|
|
|
|
8 |
"eos_token_id": 3,
|
9 |
"hidden_act": "gelu",
|
10 |
"hidden_dropout_prob": 0.1,
|
|
|
5 |
"attention_probs_dropout_prob": 0.1,
|
6 |
"bos_token_id": 2,
|
7 |
"classifier_dropout": null,
|
8 |
+
"custom_pipelines": {
|
9 |
+
"universal-dependencies": {
|
10 |
+
"impl": "ud.UniversalDependenciesPipeline"
|
11 |
+
}
|
12 |
+
},
|
13 |
"eos_token_id": 3,
|
14 |
"hidden_act": "gelu",
|
15 |
"hidden_dropout_prob": 0.1,
|
ud.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import TokenClassificationPipeline
|
2 |
+
from transformers.pipelines import PIPELINE_REGISTRY
|
3 |
+
|
4 |
+
class UniversalDependenciesPipeline(TokenClassificationPipeline):
|
5 |
+
def _forward(self,model_input):
|
6 |
+
import torch
|
7 |
+
v=model_input["input_ids"][0].tolist()
|
8 |
+
with torch.no_grad():
|
9 |
+
e=self.model(input_ids=torch.tensor([v[0:i]+[self.tokenizer.mask_token_id]+v[i+1:]+[j] for i,j in enumerate(v[1:-1],1)]))
|
10 |
+
return {"logits":e.logits[:,1:-2,:],**model_input}
|
11 |
+
def postprocess(self,model_output,**kwargs):
|
12 |
+
import numpy
|
13 |
+
import ufal.chu_liu_edmonds
|
14 |
+
e=model_output["logits"].numpy()
|
15 |
+
r=[1 if i==0 else -1 if j.endswith("|root") else 0 for i,j in sorted(self.model.config.id2label.items())]
|
16 |
+
e+=numpy.where(numpy.add.outer(numpy.identity(e.shape[0]),r)==0,0,numpy.nan)
|
17 |
+
m=numpy.full((e.shape[0]+1,e.shape[1]+1),numpy.nan)
|
18 |
+
m[1:,1:]=numpy.nanmax(e,axis=2).transpose()
|
19 |
+
p=numpy.zeros(m.shape)
|
20 |
+
p[1:,1:]=numpy.nanargmax(e,axis=2).transpose()
|
21 |
+
for i in range(1,m.shape[0]):
|
22 |
+
m[i,0],m[i,i],p[i,0]=m[i,i],numpy.nan,p[i,i]
|
23 |
+
h=ufal.chu_liu_edmonds.chu_liu_edmonds(m)[0]
|
24 |
+
if [0 for i in h if i==0]!=[0]:
|
25 |
+
m[:,0]+=numpy.where(m[:,0]<numpy.nanmax(m[:,0]),numpy.nan,0)
|
26 |
+
h=ufal.chu_liu_edmonds.chu_liu_edmonds(m)[0]
|
27 |
+
t=model_output["sentence"]
|
28 |
+
u="# text = "+t+"\n"
|
29 |
+
v=[(s,e) for s,e in model_output["offset_mapping"][0].tolist() if s<e]
|
30 |
+
for i,(s,e) in enumerate(v,1):
|
31 |
+
q=self.model.config.id2label[p[i,h[i]]].split("|")
|
32 |
+
u+="\t".join([str(i),t[s:e],"_",q[0],"_","|".join(q[1:-1]),str(h[i]),q[-1],"_","_" if i<len(v) and e<v[i][0] else "SpaceAfter=No"])+"\n"
|
33 |
+
return u+"\n"
|
34 |
+
|
35 |
+
PIPELINE_REGISTRY.register_pipeline("universal-dependencies",pipeline_class=UniversalDependenciesPipeline)
|