File size: 5,264 Bytes
204ab84 0c2109b 204ab84 0c2109b 204ab84 0c2109b 204ab84 0c2109b 204ab84 0c2109b 204ab84 0c2109b 204ab84 0c2109b 204ab84 0c2109b 204ab84 0c2109b 204ab84 0c2109b 204ab84 0c2109b 204ab84 0c2109b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
#! /usr/bin/python3
src="cyberagent/open-calm-large"
tgt="KoichiYasuoka/open-calm-large-ud-causal"
url="https://github.com/UniversalDependencies/UD_Japanese-GSDLUW"
import os,json,unicodedata
from transformers import AutoTokenizer,AutoConfig,GPTNeoXForTokenClassification,DefaultDataCollator,TrainingArguments,Trainer
d=os.path.basename(url)
os.system("test -d "+d+" || git clone --depth=1 "+url)
os.system("for F in train dev test ; do cp "+d+"/*-$F.conllu $F.conllu ; done")
otk=AutoTokenizer.from_pretrained(src,cls_token="<|endoftext|>",sep_token="<|endoftext|>",mask_token="<|endoftext|>",model_max_length=2048)
otk.save_pretrained("tmpdir")
os.rename("tmpdir/tokenizer.json","tmpdir/oldtokenizer.json")
d=json.loads(otk.backend_tokenizer.to_str())
form=set()
with open("train.conllu","r",encoding="utf-8") as r:
for s in r:
w=s.split("\t")
if len(w)==10 and w[0].isdecimal():
form.add(w[1])
m=[t for t in d["model"]["merges"] if len(t)<5]
for i in range(len(otk)):
w=otk.decode(i)
if len(w)==2 and w in form and not unicodedata.name(w[0]).startswith("HIRAGANA"):
k=otk([w[0],w[1]],add_special_tokens=False)["input_ids"]
if len(k[0])==1 and len(k[1])==1:
m.append(" ".join(otk.convert_ids_to_tokens([k[0][0],k[1][0]])))
d["model"]["merges"]=m
otk.backend_tokenizer.from_str(json.dumps(d)).save("tmpdir/tokenizer.json")
ntk=AutoTokenizer.from_pretrained("tmpdir")
class UDCausalDataset(object):
def __init__(self,conllu,tokenizer,oldtokenizer=None,embeddings=None):
self.conllu=open(conllu,"r",encoding="utf-8")
self.tokenizer=tokenizer
self.oldtokenizer=oldtokenizer if oldtokenizer else tokenizer
self.embeddings=embeddings
self.max_tokens=3
self.seeks=[(0,0)]
label=set(["SYM"])
dep=set()
s=self.conllu.readline()
while s!="":
if s=="\n":
self.seeks.append((self.conllu.tell(),0))
else:
w=s.split("\t")
if len(w)==10:
if w[0].isdecimal():
p=w[3] if w[5]=="_" else w[3]+"|"+w[5]
label.add(p)
dep.add(p+("|" if w[6]=="0" else "|l-" if int(w[0])<int(w[6]) else "|r-")+w[7])
self.seeks.append((self.seeks[-1][0],int(w[0])))
self.max_tokens=max(self.max_tokens,int(w[0])*2+1)
s=self.conllu.readline()
lid={}
for i,l in enumerate(sorted(label)):
lid[l],lid["B-"+l],lid["I-"+l]=i*3,i*3+1,i*3+2
for i,d in enumerate(sorted(dep),len(lid)):
lid[d]=i
self.label2id=lid
def __call__(*args):
lid={l:i for i,l in enumerate(sorted(set(sum([list(t.label2id) for t in args],[]))))}
for t in args:
t.label2id=lid
return lid
def __del__(self):
self.conllu.close()
__len__=lambda self:len(self.seeks)-1
def __getitem__(self,i):
s,t=self.seeks[i]
self.conllu.seek(s)
form,upos,deps,w=[],[],[],[""]
while w[0]!="\n":
w=self.conllu.readline().split("\t")
if len(w)==10:
form.append(w[1])
if w[0].isdecimal():
upos.append(w[3] if w[5]=="_" else w[3]+"|"+w[5])
deps.append((int(w[6]),w[7]))
if t==0:
v=self.tokenizer(form,add_special_tokens=False)
i,u=[],[]
for j,(x,y) in enumerate(zip(v["input_ids"],upos)):
if x!=[]:
i+=x
u+=[y] if len(x)==1 else ["B-"+y]+["I-"+y]*(len(x)-1)
emb=self.embeddings
pad=self.tokenizer.pad_token_id
else:
import torch
v=self.oldtokenizer(form,add_special_tokens=False)
m=[]
for x in v["input_ids"]:
if x==[]:
m.append(self.embeddings[self.tokenizer.unk_token_id,:])
else:
m.append(self.embeddings[x,:].sum(axis=0))
m.append(self.embeddings[self.tokenizer.sep_token_id,:])
m.append(self.embeddings[self.tokenizer.pad_token_id,:])
emb=torch.stack(m)
i,u=list(range(len(upos)+1)),upos+["SYM"]
i.append(t-1)
k,d=deps[t-1]
u.append(upos[t-1]+"|"+d if k==0 else upos[t-1])
for j in range(t,len(upos)):
i.append(j)
a,b=deps[j]
u.append(upos[j]+"|r-"+b if a==t else upos[t-1]+"|l-"+d if j+1==k else upos[j])
pad=-1
j=self.max_tokens-len(i)
if j>0:
ids=i+[pad]*j
upos=u+["SYM"]*j
else:
ids=i[0:self.max_tokens]
upos=u[0:self.max_tokens]
return {"inputs_embeds":emb[ids,:],"labels":[self.label2id[p] for p in upos]}
trainDS=UDCausalDataset("train.conllu",ntk,otk)
devDS=UDCausalDataset("dev.conllu",ntk,otk)
testDS=UDCausalDataset("test.conllu",ntk,otk)
lid=trainDS(devDS,testDS)
cfg=AutoConfig.from_pretrained(src,num_labels=len(lid),label2id=lid,id2label={i:l for l,i in lid.items()},ignore_mismatched_sizes=True)
mdl=GPTNeoXForTokenClassification.from_pretrained(src,config=cfg,ignore_mismatched_sizes=True)
trainDS.embeddings=mdl.get_input_embeddings().weight
trainDS.max_tokens=min(trainDS.max_tokens,cfg.max_position_embeddings)
arg=TrainingArguments(num_train_epochs=3,per_device_train_batch_size=24,dataloader_pin_memory=False,output_dir=tgt,overwrite_output_dir=True,save_total_limit=2,learning_rate=5e-05,warmup_ratio=0.1,save_safetensors=False)
trn=Trainer(args=arg,data_collator=DefaultDataCollator(),model=mdl,train_dataset=trainDS)
trn.train()
trn.save_model(tgt)
ntk.save_pretrained(tgt)
|