KoichiYasuoka commited on
Commit
0c2109b
·
1 Parent(s): 204ab84

model improved

Browse files
Files changed (6) hide show
  1. config.json +2 -2
  2. maker.py +19 -16
  3. oldtokenizer.json +0 -0
  4. pytorch_model.bin +1 -1
  5. tokenizer_config.json +1 -1
  6. ud.py +8 -2
config.json CHANGED
@@ -365,9 +365,9 @@
365
  "rotary_emb_base": 10000,
366
  "rotary_pct": 1.0,
367
  "tie_word_embeddings": false,
368
- "tokenizer_class": "PreTrainedTokenizerFast",
369
  "torch_dtype": "float32",
370
- "transformers_version": "4.42.4",
371
  "use_cache": true,
372
  "use_parallel_residual": false,
373
  "vocab_size": 52096
 
365
  "rotary_emb_base": 10000,
366
  "rotary_pct": 1.0,
367
  "tie_word_embeddings": false,
368
+ "tokenizer_class": "GPTNeoXTokenizerFast",
369
  "torch_dtype": "float32",
370
+ "transformers_version": "4.44.2",
371
  "use_cache": true,
372
  "use_parallel_residual": false,
373
  "vocab_size": 52096
maker.py CHANGED
@@ -4,13 +4,14 @@ tgt="KoichiYasuoka/open-calm-large-ud-causal"
4
  url="https://github.com/UniversalDependencies/UD_Japanese-GSDLUW"
5
 
6
  import os,json,unicodedata
7
- from transformers import AutoTokenizer,PreTrainedTokenizerFast,AutoConfig,GPTNeoXForTokenClassification,DefaultDataCollator,TrainingArguments,Trainer
8
  d=os.path.basename(url)
9
  os.system("test -d "+d+" || git clone --depth=1 "+url)
10
  os.system("for F in train dev test ; do cp "+d+"/*-$F.conllu $F.conllu ; done")
11
- tkz=AutoTokenizer.from_pretrained(src,cls_token="<|endoftext|>",sep_token="<|endoftext|>",mask_token="<|endoftext|>",model_max_length=2048)
12
- tkz.save_pretrained("tmpdir")
13
- d=json.loads(tkz.backend_tokenizer.to_str())
 
14
  form=set()
15
  with open("train.conllu","r",encoding="utf-8") as r:
16
  for s in r:
@@ -18,20 +19,21 @@ with open("train.conllu","r",encoding="utf-8") as r:
18
  if len(w)==10 and w[0].isdecimal():
19
  form.add(w[1])
20
  m=[t for t in d["model"]["merges"] if len(t)<5]
21
- for i in range(len(tkz)):
22
- w=tkz.decode(i)
23
  if len(w)==2 and w in form and not unicodedata.name(w[0]).startswith("HIRAGANA"):
24
- k=tkz([w[0],w[1]],add_special_tokens=False)["input_ids"]
25
  if len(k[0])==1 and len(k[1])==1:
26
- m.append(" ".join(tkz.convert_ids_to_tokens([k[0][0],k[1][0]])))
27
  d["model"]["merges"]=m
28
- tkz.backend_tokenizer.from_str(json.dumps(d)).save("tmpdir/tokenizer.json")
29
- tkz=PreTrainedTokenizerFast.from_pretrained("tmpdir")
30
 
31
  class UDCausalDataset(object):
32
- def __init__(self,conllu,tokenizer,embeddings=None):
33
  self.conllu=open(conllu,"r",encoding="utf-8")
34
  self.tokenizer=tokenizer
 
35
  self.embeddings=embeddings
36
  self.max_tokens=3
37
  self.seeks=[(0,0)]
@@ -76,8 +78,8 @@ class UDCausalDataset(object):
76
  if w[0].isdecimal():
77
  upos.append(w[3] if w[5]=="_" else w[3]+"|"+w[5])
78
  deps.append((int(w[6]),w[7]))
79
- v=self.tokenizer(form,add_special_tokens=False)
80
  if t==0:
 
81
  i,u=[],[]
82
  for j,(x,y) in enumerate(zip(v["input_ids"],upos)):
83
  if x!=[]:
@@ -87,6 +89,7 @@ class UDCausalDataset(object):
87
  pad=self.tokenizer.pad_token_id
88
  else:
89
  import torch
 
90
  m=[]
91
  for x in v["input_ids"]:
92
  if x==[]:
@@ -114,9 +117,9 @@ class UDCausalDataset(object):
114
  upos=u[0:self.max_tokens]
115
  return {"inputs_embeds":emb[ids,:],"labels":[self.label2id[p] for p in upos]}
116
 
117
- trainDS=UDCausalDataset("train.conllu",tkz)
118
- devDS=UDCausalDataset("dev.conllu",tkz)
119
- testDS=UDCausalDataset("test.conllu",tkz)
120
  lid=trainDS(devDS,testDS)
121
  cfg=AutoConfig.from_pretrained(src,num_labels=len(lid),label2id=lid,id2label={i:l for l,i in lid.items()},ignore_mismatched_sizes=True)
122
  mdl=GPTNeoXForTokenClassification.from_pretrained(src,config=cfg,ignore_mismatched_sizes=True)
@@ -126,4 +129,4 @@ arg=TrainingArguments(num_train_epochs=3,per_device_train_batch_size=24,dataload
126
  trn=Trainer(args=arg,data_collator=DefaultDataCollator(),model=mdl,train_dataset=trainDS)
127
  trn.train()
128
  trn.save_model(tgt)
129
- tkz.save_pretrained(tgt)
 
4
  url="https://github.com/UniversalDependencies/UD_Japanese-GSDLUW"
5
 
6
  import os,json,unicodedata
7
+ from transformers import AutoTokenizer,AutoConfig,GPTNeoXForTokenClassification,DefaultDataCollator,TrainingArguments,Trainer
8
  d=os.path.basename(url)
9
  os.system("test -d "+d+" || git clone --depth=1 "+url)
10
  os.system("for F in train dev test ; do cp "+d+"/*-$F.conllu $F.conllu ; done")
11
+ otk=AutoTokenizer.from_pretrained(src,cls_token="<|endoftext|>",sep_token="<|endoftext|>",mask_token="<|endoftext|>",model_max_length=2048)
12
+ otk.save_pretrained("tmpdir")
13
+ os.rename("tmpdir/tokenizer.json","tmpdir/oldtokenizer.json")
14
+ d=json.loads(otk.backend_tokenizer.to_str())
15
  form=set()
16
  with open("train.conllu","r",encoding="utf-8") as r:
17
  for s in r:
 
19
  if len(w)==10 and w[0].isdecimal():
20
  form.add(w[1])
21
  m=[t for t in d["model"]["merges"] if len(t)<5]
22
+ for i in range(len(otk)):
23
+ w=otk.decode(i)
24
  if len(w)==2 and w in form and not unicodedata.name(w[0]).startswith("HIRAGANA"):
25
+ k=otk([w[0],w[1]],add_special_tokens=False)["input_ids"]
26
  if len(k[0])==1 and len(k[1])==1:
27
+ m.append(" ".join(otk.convert_ids_to_tokens([k[0][0],k[1][0]])))
28
  d["model"]["merges"]=m
29
+ otk.backend_tokenizer.from_str(json.dumps(d)).save("tmpdir/tokenizer.json")
30
+ ntk=AutoTokenizer.from_pretrained("tmpdir")
31
 
32
  class UDCausalDataset(object):
33
+ def __init__(self,conllu,tokenizer,oldtokenizer=None,embeddings=None):
34
  self.conllu=open(conllu,"r",encoding="utf-8")
35
  self.tokenizer=tokenizer
36
+ self.oldtokenizer=oldtokenizer if oldtokenizer else tokenizer
37
  self.embeddings=embeddings
38
  self.max_tokens=3
39
  self.seeks=[(0,0)]
 
78
  if w[0].isdecimal():
79
  upos.append(w[3] if w[5]=="_" else w[3]+"|"+w[5])
80
  deps.append((int(w[6]),w[7]))
 
81
  if t==0:
82
+ v=self.tokenizer(form,add_special_tokens=False)
83
  i,u=[],[]
84
  for j,(x,y) in enumerate(zip(v["input_ids"],upos)):
85
  if x!=[]:
 
89
  pad=self.tokenizer.pad_token_id
90
  else:
91
  import torch
92
+ v=self.oldtokenizer(form,add_special_tokens=False)
93
  m=[]
94
  for x in v["input_ids"]:
95
  if x==[]:
 
117
  upos=u[0:self.max_tokens]
118
  return {"inputs_embeds":emb[ids,:],"labels":[self.label2id[p] for p in upos]}
119
 
120
+ trainDS=UDCausalDataset("train.conllu",ntk,otk)
121
+ devDS=UDCausalDataset("dev.conllu",ntk,otk)
122
+ testDS=UDCausalDataset("test.conllu",ntk,otk)
123
  lid=trainDS(devDS,testDS)
124
  cfg=AutoConfig.from_pretrained(src,num_labels=len(lid),label2id=lid,id2label={i:l for l,i in lid.items()},ignore_mismatched_sizes=True)
125
  mdl=GPTNeoXForTokenClassification.from_pretrained(src,config=cfg,ignore_mismatched_sizes=True)
 
129
  trn=Trainer(args=arg,data_collator=DefaultDataCollator(),model=mdl,train_dataset=trainDS)
130
  trn.train()
131
  trn.save_model(tgt)
132
+ ntk.save_pretrained(tgt)
oldtokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e39727b5aa1208b9b92464e401237c5a3d3eefbc9412ff5c9c5998452815e8ae
3
  size 3041029982
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b015a23a6f172bc344853f1c41560a535c528d7201022fe046e8868673ee0b5
3
  size 3041029982
tokenizer_config.json CHANGED
@@ -28,6 +28,6 @@
28
  "model_max_length": 2048,
29
  "pad_token": "<|padding|>",
30
  "sep_token": "<|endoftext|>",
31
- "tokenizer_class": "PreTrainedTokenizerFast",
32
  "unk_token": "<|endoftext|>"
33
  }
 
28
  "model_max_length": 2048,
29
  "pad_token": "<|padding|>",
30
  "sep_token": "<|endoftext|>",
31
+ "tokenizer_class": "GPTNeoXTokenizerFast",
32
  "unk_token": "<|endoftext|>"
33
  }
ud.py CHANGED
@@ -1,5 +1,10 @@
1
  import numpy
2
- from transformers import TokenClassificationPipeline
 
 
 
 
 
3
 
4
  class BellmanFordTokenClassificationPipeline(TokenClassificationPipeline):
5
  def __init__(self,**kwargs):
@@ -42,6 +47,7 @@ class UniversalDependenciesCausalPipeline(BellmanFordTokenClassificationPipeline
42
  def __init__(self,**kwargs):
43
  kwargs["aggregation_strategy"]="simple"
44
  super().__init__(**kwargs)
 
45
  x=self.model.config.label2id
46
  self.root=numpy.full((len(x)),numpy.nan)
47
  self.left_arc=numpy.full((len(x)),numpy.nan)
@@ -87,7 +93,7 @@ class UniversalDependenciesCausalPipeline(BellmanFordTokenClassificationPipeline
87
  if d[i].strip()=="":
88
  d.pop(i)
89
  w.pop(i)
90
- v=self.tokenizer(d,add_special_tokens=False)
91
  e=self.model.get_input_embeddings().weight
92
  m=[]
93
  for x in v["input_ids"]:
 
1
  import numpy
2
+ from transformers import TokenClassificationPipeline,AutoTokenizer
3
+ try:
4
+ from transformers.utils import cached_file
5
+ except:
6
+ from transformers.file_utils import cached_path,hf_bucket_url
7
+ cached_file=lambda x,y:os.path.join(x,y) if os.path.isdir(x) else cached_path(hf_bucket_url(x,y))
8
 
9
  class BellmanFordTokenClassificationPipeline(TokenClassificationPipeline):
10
  def __init__(self,**kwargs):
 
47
  def __init__(self,**kwargs):
48
  kwargs["aggregation_strategy"]="simple"
49
  super().__init__(**kwargs)
50
+ self.oldtokenizer=AutoTokenizer.from_pretrained(self.tokenizer.name_or_path,tokenizer_file=cached_file(self.tokenizer.name_or_path,"oldtokenizer.json"))
51
  x=self.model.config.label2id
52
  self.root=numpy.full((len(x)),numpy.nan)
53
  self.left_arc=numpy.full((len(x)),numpy.nan)
 
93
  if d[i].strip()=="":
94
  d.pop(i)
95
  w.pop(i)
96
+ v=self.oldtokenizer(d,add_special_tokens=False)
97
  e=self.model.get_input_embeddings().weight
98
  m=[]
99
  for x in v["input_ids"]: