File size: 6,525 Bytes
d4321fc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
#! /usr/bin/python3
import os,json
tgt="KoichiYasuoka/modernbert-base-english-ud-embeds"
url="https://github.com/UniversalDependencies/UD_English-"
for e in ["EWT","GUM","Atis","ParTUT","LinES"]:
  u=url+e
  d=os.path.basename(u)
  os.system("test -d "+d+" || git clone --depth=1 "+u)
s='BEGIN{FS="\\t";OFS="\\t"};{if(NF==10){if($1~/^[1-9][0-9]*-/){split($1,a,"-");if($10~/SpaceAfter=No/)a[2]++}else if($1-a[1]>=0&&$1-a[2]<0)$10=($10=="_")?"SpaceAfter=No":$10"|SpaceAfter=No"}print}'
os.system("for F in train dev test ; do nawk '"+s+"' UD_English-*/*-$F.conllu > $F.conllu ; done")
os.system("""
if test -d transformers
then :
else git clone --depth=1 https://github.com/huggingface/transformers transformers-all
     ln -s transformers-all/src/transformers transformers
fi
test -d ModernBERT-base || git clone --depth=1 https://huggingface.co/answerdotai/ModernBERT-base
test -f ModernBERT-base/configuration_modernbert.py || sed 's/^from \\.\\.\\./from transformers./' transformers/models/modernbert/configuration_modernbert.py > ModernBERT-base/configuration_modernbert.py
test -f ModernBERT-base/modeling_modernbert.py || sed -e 's/^from \\.\\.\\./from transformers./' -e 's/^from .* import is_triton_available/import importlib\\nis_triton_available = lambda: importlib.util.find_spec("triton") is not None/' transformers/models/modernbert/modeling_modernbert.py > ModernBERT-base/modeling_modernbert.py
""")
with open("ModernBERT-base/config.json","r",encoding="utf-8") as r:
  d=json.load(r)
if not "auto_map" in d:
  d["auto_map"]={
    "AutoConfig":"configuration_modernbert.ModernBertConfig",
    "AutoModel":"modeling_modernbert.ModernBertModel",
    "AutoModelForMaskedLM":"modeling_modernbert.ModernBertForMaskedLM",
    "AutoModelForSequenceClassification":"modeling_modernbert.ModernBertForSequenceClassification",
    "AutoModelForTokenClassification":"modeling_modernbert.ModernBertForTokenClassification"
  }
  with open("ModernBERT-base/config.json","w",encoding="utf-8") as w:
    json.dump(d,w,indent=2)
class UDEmbedsDataset(object):
  def __init__(self,conllu,tokenizer,embeddings=None):
    self.conllu=open(conllu,"r",encoding="utf-8")
    self.tokenizer=tokenizer
    self.embeddings=embeddings
    self.seeks=[0]
    label=set(["SYM","SYM."])
    dep=set()
    s=self.conllu.readline()
    while s!="":
      if s=="\n":
        self.seeks.append(self.conllu.tell())
      else:
        w=s.split("\t")
        if len(w)==10:
          if w[0].isdecimal():
            p=w[3]
            q="" if w[5]=="_" else "|"+w[5]
            d=("|" if w[6]=="0" else "|l-" if int(w[0])<int(w[6]) else "|r-")+w[7]
            for k in [p,p+".","B-"+p,"B-"+p+".","I-"+p,"I-"+p+".",p+q+"|_",p+q+d]:
              label.add(k)
      s=self.conllu.readline()
    self.label2id={l:i for i,l in enumerate(sorted(label))}
  def __call__(*args):
    lid={l:i for i,l in enumerate(sorted(set(sum([list(t.label2id) for t in args],[]))))}
    for t in args:
      t.label2id=lid
    return lid
  def __del__(self):
    self.conllu.close()
  __len__=lambda self:(len(self.seeks)-1)*2
  def __getitem__(self,i):
    self.conllu.seek(self.seeks[int(i/2)])
    z,c,t,s=i%2,[],[""],False
    while t[0]!="\n":
      t=self.conllu.readline().split("\t") 
      if len(t)==10 and t[0].isdecimal():
        if s:
           t[1]=" "+t[1]
        c.append(t)
        s=t[9].find("SpaceAfter=No")<0
    x=[True if t[6]=="0" or int(t[6])>j or sum([1 if int(c[i][6])==j+1 else 0 for i in range(j+1,len(c))])>0 else False for j,t in enumerate(c)]
    v=self.tokenizer([t[1] for t in c],add_special_tokens=False)["input_ids"]
    if z==0:
      ids,upos=[self.tokenizer.cls_token_id],["SYM."]
      for i,(j,k) in enumerate(zip(v,c)):
        if j==[]:
          j=[self.tokenizer.unk_token_id]
        p=k[3] if x[i] else k[3]+"."
        ids+=j
        upos+=[p] if len(j)==1 else ["B-"+p]+["I-"+p]*(len(j)-1)
      ids.append(self.tokenizer.sep_token_id)
      upos.append("SYM.")
      emb=self.embeddings
    else:
      import torch
      if len(x)<128:
        x=[True]*len(x)
      else:
        w=sum([len(x)-i+1 if b else 0 for i,b in enumerate(x)])+1
        for i in range(len(x)):
          if x[i]==False and w+len(x)-i<8192:
            x[i]=True
            w+=len(x)-i+1
      p=[t[3] if t[5]=="_" else t[3]+"|"+t[5] for i,t in enumerate(c)]
      d=[t[7] if t[6]=="0" else "l-"+t[7] if int(t[0])<int(t[6]) else "r-"+t[7] for t in c]
      ids,upos=[-1],["SYM|_"]
      for i in range(len(x)):
        if x[i]:
          ids.append(i)
          upos.append(p[i]+"|"+d[i] if c[i][6]=="0" else p[i]+"|_")
          for j in range(i+1,len(x)):
            ids.append(j)
            upos.append(p[j]+"|"+d[j] if int(c[j][6])==i+1 else p[i]+"|"+d[i] if int(c[i][6])==j+1 else p[j]+"|_")
          ids.append(-1)
          upos.append("SYM|_")
      with torch.no_grad():
        m=[]
        for j in v:
          if j==[]:
            j=[self.tokenizer.unk_token_id]
          m.append(self.embeddings[j,:].sum(axis=0))
        m.append(self.embeddings[self.tokenizer.sep_token_id,:])
        emb=torch.stack(m)
    return{"inputs_embeds":emb[ids[:8192],:],"labels":[self.label2id[p] for p in upos[:8192]]}
from transformers import AutoTokenizer,AutoConfig,AutoModelForTokenClassification,DefaultDataCollator,TrainingArguments,Trainer
from tokenizers.pre_tokenizers import Sequence,Split
from tokenizers import Regex
tkz=AutoTokenizer.from_pretrained("ModernBERT-base")
tkz.backend_tokenizer.pre_tokenizer=Sequence([Split(Regex("[nN]['`’][tT]"),"isolated"),tkz.backend_tokenizer.pre_tokenizer])
trainDS=UDEmbedsDataset("train.conllu",tkz)
devDS=UDEmbedsDataset("dev.conllu",tkz)
testDS=UDEmbedsDataset("test.conllu",tkz)
lid=trainDS(devDS,testDS)
cfg=AutoConfig.from_pretrained("ModernBERT-base",num_labels=len(lid),label2id=lid,id2label={i:l for l,i in lid.items()},ignore_mismatched_sizes=True,trust_remote_code=True)
mdl=AutoModelForTokenClassification.from_pretrained("ModernBERT-base",config=cfg,ignore_mismatched_sizes=True,trust_remote_code=True)
trainDS.embeddings=mdl.get_input_embeddings().weight
arg=TrainingArguments(num_train_epochs=3,per_device_train_batch_size=1,dataloader_pin_memory=False,output_dir=tgt,overwrite_output_dir=True,save_total_limit=2,learning_rate=5e-05,warmup_ratio=0.1,save_safetensors=False)
trn=Trainer(args=arg,data_collator=DefaultDataCollator(),model=mdl,train_dataset=trainDS)
trn.train()
trn.save_model(tgt)
tkz.save_pretrained(tgt)
os.system("cp -p ModernBERT-base/*.py "+tgt)