KoichiYasuoka commited on
Commit
ef65075
1 Parent(s): e1e0bfa

initial release

Browse files
Files changed (11) hide show
  1. README.md +42 -0
  2. config.json +0 -0
  3. maker.py +75 -0
  4. merges.txt +0 -0
  5. pytorch_model.bin +3 -0
  6. special_tokens_map.json +51 -0
  7. supar.model +3 -0
  8. tokenizer.json +0 -0
  9. tokenizer_config.json +55 -0
  10. upos.py +41 -0
  11. vocab.json +0 -0
README.md ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - "sr"
4
+ tags:
5
+ - "serbian"
6
+ - "token-classification"
7
+ - "pos"
8
+ - "dependency-parsing"
9
+ base_model: jerteh/gpt2-orao
10
+ datasets:
11
+ - "universal_dependencies"
12
+ license: "cc-by-sa-4.0"
13
+ pipeline_tag: "token-classification"
14
+ widget:
15
+ - text: "Да има сира и масла и моја би мати знала гибати гибаницу."
16
+ - text: "Da ima sira i masla i moja bi mati znala gibati gibanicu."
17
+ ---
18
+
19
+ # gpt2-large-serbian-upos
20
+
21
+ ## Model Description
22
+
23
+ This is a GPT-2 model in Serbian (Cyrillic and Latin) for POS-tagging and dependency-parsing, derived from [gpt2-orao](https://huggingface.co/jerteh/gpt2-orao). Every word is tagged by [UPOS](https://universaldependencies.org/u/pos/) (Universal Part-Of-Speech) and [FEATS](https://universaldependencies.org/u/feat/).
24
+
25
+ ## How to Use
26
+
27
+ ```py
28
+ from transformers import pipeline
29
+ nlp=pipeline("upos","KoichiYasuoka/gpt2-large-serbian-upos",trust_remote_code=True,aggregation_strategy="simple")
30
+ ```
31
+
32
+ or
33
+
34
+ ```py
35
+ import esupar
36
+ nlp=esupar.load("KoichiYasuoka/gpt2-large-serbian-upos")
37
+ ```
38
+
39
+ ## See Also
40
+
41
+ [esupar](https://github.com/KoichiYasuoka/esupar): Tokenizer POS-tagger and Dependency-parser with BERT/RoBERTa/DeBERTa/GPT models
42
+
config.json ADDED
The diff for this file is too large to render. See raw diff
 
maker.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #! /usr/bin/python3
2
+ src="jerteh/gpt2-orao"
3
+ tgt="KoichiYasuoka/gpt2-large-serbian-upos"
4
+
5
+ import os
6
+ from transformers import AutoTokenizer,AutoConfig,GPT2ForTokenClassification,DataCollatorForTokenClassification,TrainingArguments,Trainer
7
+ from tokenizers.pre_tokenizers import Sequence,Punctuation
8
+ for d in ["UD_Serbian-SET","UD_Croatian-SET"]:
9
+ os.system("test -d "+d+" || git clone --depth=1 https://github.com/UniversalDependencies/"+d)
10
+ os.system("for F in train dev test ; do cat UD_*-SET/*-$F.conllu > $F.conllu ; done")
11
+
12
+ class UPOSFileDataset(object):
13
+ def __init__(self,conllu,tokenizer):
14
+ self.conllu=open(conllu,"r",encoding="utf-8")
15
+ self.tokenizer=tokenizer
16
+ self.seeks=[0]
17
+ label=set(["SYM"])
18
+ s=self.conllu.readline()
19
+ while s!="":
20
+ if s=="\n":
21
+ self.seeks.append(self.conllu.tell())
22
+ else:
23
+ w=s.split("\t")
24
+ if len(w)==10:
25
+ if w[0].isdecimal():
26
+ label.add(w[3] if w[5]=="_" else w[3]+"|"+w[5])
27
+ s=self.conllu.readline()
28
+ lid={}
29
+ for i,l in enumerate(sorted(label)):
30
+ lid[l],lid["B-"+l],lid["I-"+l]=i*3,i*3+1,i*3+2
31
+ self.label2id=lid
32
+ def __call__(*args):
33
+ lid={l:i for i,l in enumerate(sorted(set(sum([list(t.label2id) for t in args],[]))))}
34
+ for t in args:
35
+ t.label2id=lid
36
+ return lid
37
+ def __del__(self):
38
+ self.conllu.close()
39
+ __len__=lambda self:len(self.seeks)-1
40
+ def __getitem__(self,i):
41
+ self.conllu.seek(self.seeks[i])
42
+ form,upos,sp=[],[],False
43
+ while self.conllu.tell()<self.seeks[i+1]:
44
+ w=self.conllu.readline().split("\t")
45
+ if len(w)==10:
46
+ form.append(" "+w[1] if sp else w[1])
47
+ if w[0].isdecimal():
48
+ upos.append(w[3] if w[5]=="_" else w[3]+"|"+w[5])
49
+ sp=w[9].find("SpaceAfter=No")<0
50
+ v=self.tokenizer(form,add_special_tokens=False)
51
+ i,u=[self.tokenizer.cls_token_id],["SYM"]
52
+ for j,(x,y) in enumerate(zip(v["input_ids"],upos)):
53
+ if x!=[]:
54
+ i+=x
55
+ u+=[y] if len(x)==1 else ["B-"+y]+["I-"+y]*(len(x)-1)
56
+ if len(i)<self.tokenizer.model_max_length-3:
57
+ ids=i+[self.tokenizer.sep_token_id]
58
+ upos=u+["SYM"]
59
+ else:
60
+ ids=i[0:self.tokenizer.model_max_length-2]
61
+ upos=u[0:self.tokenizer.model_max_length-2]
62
+ return {"input_ids":ids,"labels":[self.label2id[t] for t in upos]}
63
+
64
+ tkz=AutoTokenizer.from_pretrained(src,cls_token="<s>",pad_token="<pad>",sep_token="</s>",unk_token="<unk>",mask_token="<mask>",bos_token="<s>",eos_token="</s>",model_max_length=1024)
65
+ tkz.backend_tokenizer.pre_tokenizer=Sequence([Punctuation(),tkz.backend_tokenizer.pre_tokenizer])
66
+ trainDS=UPOSFileDataset("train.conllu",tkz)
67
+ devDS=UPOSFileDataset("dev.conllu",tkz)
68
+ testDS=UPOSFileDataset("test.conllu",tkz)
69
+ lid=trainDS(devDS,testDS)
70
+ cfg=AutoConfig.from_pretrained(src,num_labels=len(lid),label2id=lid,id2label={i:l for l,i in lid.items()},ignore_mismatched_sizes=True)
71
+ arg=TrainingArguments(num_train_epochs=3,per_device_train_batch_size=16,output_dir=tgt,overwrite_output_dir=True,save_total_limit=2,learning_rate=5e-05,warmup_ratio=0.1,save_safetensors=False)
72
+ trn=Trainer(args=arg,data_collator=DataCollatorForTokenClassification(tkz),model=GPT2ForTokenClassification.from_pretrained(src,config=cfg,ignore_mismatched_sizes=True),train_dataset=trainDS)
73
+ trn.train()
74
+ trn.save_model(tgt)
75
+ tkz.save_pretrained(tgt)
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11e79c31736f0b0cf2bcaf5dc509f7b65255482b49262ab9cbe53c807db57386
3
+ size 3104317858
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
supar.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e36285110f969df8e894fd9756d8a6567554677aa28c3ce79043791978c21814
3
+ size 3144851074
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<pad>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "4": {
37
+ "content": "<mask>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
+ "bos_token": "<s>",
46
+ "clean_up_tokenization_spaces": true,
47
+ "cls_token": "<s>",
48
+ "eos_token": "</s>",
49
+ "mask_token": "<mask>",
50
+ "model_max_length": 1024,
51
+ "pad_token": "<pad>",
52
+ "sep_token": "</s>",
53
+ "tokenizer_class": "GPT2TokenizerFast",
54
+ "unk_token": "<unk>"
55
+ }
upos.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import TokenClassificationPipeline
2
+
3
+ class BellmanFordTokenClassificationPipeline(TokenClassificationPipeline):
4
+ def __init__(self,**kwargs):
5
+ import numpy
6
+ super().__init__(**kwargs)
7
+ x=self.model.config.label2id
8
+ y=[k for k in x if not k.startswith("I-")]
9
+ self.transition=numpy.full((len(x),len(x)),numpy.nan)
10
+ for k,v in x.items():
11
+ for j in ["I-"+k[2:]] if k.startswith("B-") else [k]+y if k.startswith("I-") else y:
12
+ self.transition[v,x[j]]=0
13
+ def check_model_type(self,supported_models):
14
+ pass
15
+ def postprocess(self,model_outputs,**kwargs):
16
+ import numpy
17
+ if "logits" not in model_outputs:
18
+ return self.postprocess(model_outputs[0],**kwargs)
19
+ m=model_outputs["logits"][0].numpy()
20
+ e=numpy.exp(m-numpy.max(m,axis=-1,keepdims=True))
21
+ z=e/e.sum(axis=-1,keepdims=True)
22
+ for i in range(m.shape[0]-1,0,-1):
23
+ m[i-1]+=numpy.nanmax(m[i]+self.transition,axis=1)
24
+ k=[numpy.nanargmax(m[0]+self.transition[0])]
25
+ for i in range(1,m.shape[0]):
26
+ k.append(numpy.nanargmax(m[i]+self.transition[k[-1]]))
27
+ w=[{"entity":self.model.config.id2label[j],"start":s,"end":e,"score":z[i,j]} for i,((s,e),j) in enumerate(zip(model_outputs["offset_mapping"][0].tolist(),k)) if s<e]
28
+ if "aggregation_strategy" in kwargs and kwargs["aggregation_strategy"]!="none":
29
+ for i,t in reversed(list(enumerate(w))):
30
+ p=t.pop("entity")
31
+ if p.startswith("I-"):
32
+ w[i-1]["score"]=min(w[i-1]["score"],t["score"])
33
+ w[i-1]["end"]=w.pop(i)["end"]
34
+ elif p.startswith("B-"):
35
+ t["entity_group"]=p[2:]
36
+ else:
37
+ t["entity_group"]=p
38
+ for t in w:
39
+ t["text"]=model_outputs["sentence"][t["start"]:t["end"]]
40
+ return w
41
+
vocab.json ADDED
The diff for this file is too large to render. See raw diff