#! /usr/bin/python3 src="KoichiYasuoka/modernbert-base-japanese-aozora-luw-upos" tgt="KoichiYasuoka/modernbert-base-japanese-aozora-ud-embeds" url="https://github.com/UniversalDependencies/UD_Japanese-GSDLUW" import os d=os.path.basename(url) os.system("test -d "+d+" || git clone --depth=1 "+url) os.system("for F in train dev test ; do cp "+d+"/*-$F.conllu $F.conllu ; done") class UDEmbedsDataset(object): def __init__(self,conllu,tokenizer,embeddings=None): self.conllu=open(conllu,"r",encoding="utf-8") self.tokenizer=tokenizer self.embeddings=embeddings self.seeks=[0] label=set(["SYM","SYM."]) dep=set() s=self.conllu.readline() while s!="": if s=="\n": self.seeks.append(self.conllu.tell()) else: w=s.split("\t") if len(w)==10: if w[0].isdecimal(): p=w[3] q="" if w[5]=="_" else "|"+w[5] d=("|" if w[6]=="0" else "|l-" if int(w[0])j or sum([1 if int(c[i][6])==j+1 else 0 for i in range(j+1,len(c))])>0 else False for j,t in enumerate(c)] v=self.tokenizer([t[1] for t in c],add_special_tokens=False)["input_ids"] if z==0: ids,upos=[self.tokenizer.cls_token_id],["SYM."] for i,(j,k) in enumerate(zip(v,c)): if j==[]: j=[self.tokenizer.unk_token_id] p=k[3] if x[i] else k[3]+"." ids+=j upos+=[p] if len(j)==1 else ["B-"+p]+["I-"+p]*(len(j)-1) ids.append(self.tokenizer.sep_token_id) upos.append("SYM.") emb=self.embeddings else: import torch if len(x)<128: x=[True]*len(x) else: w=sum([len(x)-i+1 if b else 0 for i,b in enumerate(x)])+1 for i in range(len(x)): if x[i]==False and w+len(x)-i<8192: x[i]=True w+=len(x)-i+1 p=[t[3] if t[5]=="_" else t[3]+"|"+t[5] for i,t in enumerate(c)] d=[t[7] if t[6]=="0" else "l-"+t[7] if int(t[0])