Upload stanzas_eval.py
Browse files- stanzas_eval.py +706 -0
stanzas_eval.py
ADDED
@@ -0,0 +1,706 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""Evaluating models for BibleBERT
|
3 |
+
Copyright 2021 © Javier de la Rosa
|
4 |
+
"""
|
5 |
+
|
6 |
+
# Dependencies
|
7 |
+
# !pip install -qU transformers sacrebleu scikit-learn datasets seqeval conllu pyarrow nltk
|
8 |
+
|
9 |
+
# Dependencies and helper functions
|
10 |
+
import argparse
|
11 |
+
import logging
|
12 |
+
import os
|
13 |
+
import random
|
14 |
+
import sys
|
15 |
+
from dataclasses import dataclass
|
16 |
+
from dataclasses import field
|
17 |
+
from pathlib import Path
|
18 |
+
from typing import Optional
|
19 |
+
|
20 |
+
import datasets
|
21 |
+
import numpy as np
|
22 |
+
import pandas as pd
|
23 |
+
# from datasets import ClassLabel
|
24 |
+
from datasets import load_dataset
|
25 |
+
from nltk.tokenize import word_tokenize
|
26 |
+
from nltk.tokenize.treebank import TreebankWordDetokenizer
|
27 |
+
from seqeval.metrics.sequence_labeling import accuracy_score as seq_accuracy_score
|
28 |
+
from seqeval.metrics.sequence_labeling import f1_score as seq_f1_score
|
29 |
+
from seqeval.metrics.sequence_labeling import precision_score as seq_precision_score
|
30 |
+
from seqeval.metrics.sequence_labeling import recall_score as seq_recall_score
|
31 |
+
from seqeval.metrics.sequence_labeling import classification_report as seq_classification_report
|
32 |
+
from sklearn.metrics import accuracy_score as sk_accuracy_score
|
33 |
+
from sklearn.metrics import f1_score as sk_f1_score
|
34 |
+
from sklearn.metrics import precision_score as sk_precision_score
|
35 |
+
from sklearn.metrics import recall_score as sk_recall_score
|
36 |
+
from sklearn.metrics import classification_report as sk_classification_report
|
37 |
+
# from sklearn.preprocessing import MultiLabelBinarizer
|
38 |
+
from tqdm import tqdm
|
39 |
+
from transformers import (
|
40 |
+
AutoConfig,
|
41 |
+
AutoModelForTokenClassification,
|
42 |
+
AutoModelForSequenceClassification,
|
43 |
+
AutoTokenizer,
|
44 |
+
RobertaTokenizer,
|
45 |
+
RobertaTokenizerFast,
|
46 |
+
DataCollatorForTokenClassification,
|
47 |
+
DataCollatorWithPadding,
|
48 |
+
PreTrainedTokenizerFast,
|
49 |
+
Trainer,
|
50 |
+
TrainingArguments,
|
51 |
+
pipeline,
|
52 |
+
set_seed,
|
53 |
+
)
|
54 |
+
# from transformers.training_args import TrainingArguments
|
55 |
+
import wandb
|
56 |
+
|
57 |
+
BIBLES_BASE_URI = "https://huggingface.co/datasets/linhd-postdata/stanzas/resolve/main"
|
58 |
+
BIBLES = {
|
59 |
+
"validation": f"{BIBLES_BASE_URI}/eval.csv",
|
60 |
+
"test": f"{BIBLES_BASE_URI}/test.csv",
|
61 |
+
"train": f"{BIBLES_BASE_URI}/train.csv"
|
62 |
+
}
|
63 |
+
|
64 |
+
|
65 |
+
# Helper Funtions
|
66 |
+
def printm(string):
|
67 |
+
print(str(string))
|
68 |
+
|
69 |
+
|
70 |
+
# Tokenize all texts and align the labels with them.
|
71 |
+
def tokenize_and_align_labels(
|
72 |
+
tokenizer, examples, text_column_name, max_length, padding,
|
73 |
+
label_column_name, label_to_id, label_all_tokens
|
74 |
+
):
|
75 |
+
tokenized_inputs = tokenizer(
|
76 |
+
examples[text_column_name],
|
77 |
+
max_length=max_length,
|
78 |
+
padding=padding,
|
79 |
+
truncation=True,
|
80 |
+
# We use this argument because the texts in our dataset are lists of words (with a label for each word).
|
81 |
+
is_split_into_words=True,
|
82 |
+
)
|
83 |
+
labels = []
|
84 |
+
for i, label in enumerate(examples[label_column_name]):
|
85 |
+
word_ids = tokenized_inputs.word_ids(batch_index=i)
|
86 |
+
previous_word_idx = None
|
87 |
+
label_ids = []
|
88 |
+
for word_idx in word_ids:
|
89 |
+
# Special tokens have a word id that is None. We set the label to -100 so they are automatically
|
90 |
+
# ignored in the loss function.
|
91 |
+
if word_idx is None:
|
92 |
+
label_ids.append(-100)
|
93 |
+
# We set the label for the first token of each word.
|
94 |
+
elif word_idx != previous_word_idx:
|
95 |
+
label_ids.append(label_to_id[label[word_idx]])
|
96 |
+
# For the other tokens in a word, we set the label to either the current label or -100, depending on
|
97 |
+
# the label_all_tokens flag.
|
98 |
+
else:
|
99 |
+
label_ids.append(label_to_id[label[word_idx]] if label_all_tokens else -100)
|
100 |
+
previous_word_idx = word_idx
|
101 |
+
|
102 |
+
labels.append(label_ids)
|
103 |
+
tokenized_inputs["labels"] = labels
|
104 |
+
return tokenized_inputs
|
105 |
+
|
106 |
+
|
107 |
+
# Metrics
|
108 |
+
def token_compute_metrics(pairs, label_list):
|
109 |
+
"""Token metrics based on seqeval"""
|
110 |
+
raw_predictions, labels = pairs
|
111 |
+
predictions = np.argmax(raw_predictions, axis=2)
|
112 |
+
|
113 |
+
# Remove ignored index (special tokens)
|
114 |
+
true_predictions = [
|
115 |
+
[label_list[p] for (p, l) in zip(prediction, label) if l != -100]
|
116 |
+
for prediction, label in zip(predictions, labels)
|
117 |
+
]
|
118 |
+
true_probas = [
|
119 |
+
[label_list[p] for (p, l) in zip(prediction, label) if l != -100]
|
120 |
+
for prediction, label in zip(predictions, labels)
|
121 |
+
]
|
122 |
+
true_labels = [
|
123 |
+
[label_list[l] for (p, l) in zip(prediction, label) if l != -100]
|
124 |
+
for prediction, label in zip(predictions, labels)
|
125 |
+
]
|
126 |
+
raw_scores = (
|
127 |
+
np.exp(raw_predictions) / np.exp(raw_predictions).sum(-1, keepdims=True)
|
128 |
+
)
|
129 |
+
scores = raw_scores.max(axis=2)
|
130 |
+
true_scores = [
|
131 |
+
[(s, l) for (s, l) in zip(score, label) if l != -100]
|
132 |
+
for score, label in zip(scores, labels)
|
133 |
+
]
|
134 |
+
|
135 |
+
# mlb = MultiLabelBinarizer() # sparse_output=True
|
136 |
+
# true_predictions = mlb.fit_transform(true_predictions)
|
137 |
+
# mlb = MultiLabelBinarizer() # sparse_output=True
|
138 |
+
# true_labels = mlb.fit_transform(true_labels)
|
139 |
+
# wandb.log({
|
140 |
+
# "roc" : wandb.plot.roc_curve(
|
141 |
+
# labels,
|
142 |
+
# predictions,
|
143 |
+
# labels=label_list
|
144 |
+
# )})
|
145 |
+
metrics = {
|
146 |
+
"accuracy": seq_accuracy_score(true_labels, true_predictions),
|
147 |
+
"precision_micro": seq_precision_score(true_labels, true_predictions, average="micro"),
|
148 |
+
"recall_micro": seq_recall_score(true_labels, true_predictions, average="micro"),
|
149 |
+
"f1_micro": seq_f1_score(true_labels, true_predictions, average="micro"),
|
150 |
+
"precision_macro": seq_precision_score(true_labels, true_predictions, average="macro"),
|
151 |
+
"recall_macro": seq_recall_score(true_labels, true_predictions, average="macro"),
|
152 |
+
"f1_macro": seq_f1_score(true_labels, true_predictions, average="macro"),
|
153 |
+
# "report": seq_classification_report(true_labels, true_predictions, digits=4)
|
154 |
+
}
|
155 |
+
reports = seq_classification_report(
|
156 |
+
true_labels, true_predictions, output_dict=True, zero_division=0,
|
157 |
+
)
|
158 |
+
for label, report in reports.items():
|
159 |
+
for metric_key, metric_value in report.items():
|
160 |
+
metric_title = metric_key.replace(" avg", "_avg", 1)
|
161 |
+
metrics.update({
|
162 |
+
f"label_{label}_{metric_title}": metric_value,
|
163 |
+
})
|
164 |
+
# labels_to_plot = label_list.copy()
|
165 |
+
# if "O" in labels_to_plot:
|
166 |
+
# labels_to_plot.remove("O")
|
167 |
+
flat_true_labels = sum(true_labels, [])
|
168 |
+
flat_true_predictions = sum(true_predictions, [])
|
169 |
+
wandb.log({
|
170 |
+
# "roc": wandb.plot.roc_curve(
|
171 |
+
# labels.reshape(-1),
|
172 |
+
# raw_scores.reshape(-1, raw_predictions.shape[-1]),
|
173 |
+
# labels=label_list,
|
174 |
+
# classes_to_plot=labels_to_plot,
|
175 |
+
# ),
|
176 |
+
"matrix": wandb.sklearn.plot_confusion_matrix(
|
177 |
+
flat_true_labels, flat_true_predictions, label_list
|
178 |
+
)
|
179 |
+
})
|
180 |
+
return metrics
|
181 |
+
|
182 |
+
|
183 |
+
def sequence_compute_metrics(pairs, label_list):
|
184 |
+
"""Sequence metrics based on sklearn"""
|
185 |
+
raw_predictions, labels = pairs
|
186 |
+
predictions = np.argmax(raw_predictions, axis=1)
|
187 |
+
metrics = {
|
188 |
+
"accuracy": sk_accuracy_score(labels, predictions),
|
189 |
+
"precision_micro": sk_precision_score(labels, predictions, average="micro"),
|
190 |
+
"recall_micro": sk_recall_score(labels, predictions, average="micro"),
|
191 |
+
"f1_micro": sk_f1_score(labels, predictions, average="micro"),
|
192 |
+
"precision_macro": sk_precision_score(labels, predictions, average="macro"),
|
193 |
+
"recall_macro": sk_recall_score(labels, predictions, average="macro"),
|
194 |
+
"f1_macro": sk_f1_score(labels, predictions, average="macro"),
|
195 |
+
# "report": sk_classification_report(labels, predictions, digits=4)
|
196 |
+
}
|
197 |
+
reports = sk_classification_report(
|
198 |
+
labels, predictions, target_names=label_list, output_dict=True,
|
199 |
+
)
|
200 |
+
for label, report in reports.items():
|
201 |
+
if not isinstance(report, dict):
|
202 |
+
report = {"": report}
|
203 |
+
for metric_key, metric_value in report.items():
|
204 |
+
metric_title = metric_key.replace(" avg", "_avg", 1)
|
205 |
+
metrics.update({
|
206 |
+
f"label_{label}_{metric_title}": metric_value,
|
207 |
+
})
|
208 |
+
wandb.log({
|
209 |
+
"roc": wandb.plot.roc_curve(
|
210 |
+
labels, raw_predictions, labels=label_list
|
211 |
+
),
|
212 |
+
"matrix": wandb.sklearn.plot_confusion_matrix(
|
213 |
+
labels, predictions, label_list
|
214 |
+
)
|
215 |
+
})
|
216 |
+
return metrics
|
217 |
+
|
218 |
+
|
219 |
+
def write_file(kind, metrics, output_dir, save_artifact=False):
|
220 |
+
output_file = output_dir / f"{kind}_results.txt"
|
221 |
+
headers = []
|
222 |
+
label_headers = []
|
223 |
+
data = []
|
224 |
+
label_data = []
|
225 |
+
with open(output_file, "w") as writer:
|
226 |
+
printm(f"**{kind.capitalize()} results**")
|
227 |
+
for key, value in metrics.items():
|
228 |
+
printm(f"\t{key} = {value}")
|
229 |
+
writer.write(f"{key} = {value}\n")
|
230 |
+
title = key.replace("eval_", "", 1)
|
231 |
+
if title.startswith("label_"):
|
232 |
+
label_headers.append(title.replace("label_", "", 1))
|
233 |
+
label_data.append(value)
|
234 |
+
else:
|
235 |
+
headers.append(title)
|
236 |
+
data.append(value)
|
237 |
+
wandb.log({f"{kind}:{title}": value})
|
238 |
+
wandb.log({kind: wandb.Table(data=[data], columns=headers)})
|
239 |
+
if label_headers:
|
240 |
+
wandb.log({
|
241 |
+
f"{kind}:labels": wandb.Table(
|
242 |
+
data=[label_data], columns=label_headers
|
243 |
+
)
|
244 |
+
})
|
245 |
+
if save_artifact:
|
246 |
+
artifact = wandb.Artifact(kind, type="result")
|
247 |
+
artifact.add_file(str(output_file))
|
248 |
+
wandb.log_artifact(artifact)
|
249 |
+
|
250 |
+
|
251 |
+
def dataset_select(dataset, size):
|
252 |
+
dataset_len = len(dataset)
|
253 |
+
if size < 0 or size > dataset_len:
|
254 |
+
return dataset
|
255 |
+
elif size <= 1: # it's a percentage
|
256 |
+
return dataset.select(range(int(size * dataset_len)))
|
257 |
+
else: # it's a number
|
258 |
+
return dataset.select(range(int(size)))
|
259 |
+
|
260 |
+
|
261 |
+
def main(args):
|
262 |
+
# Set seed
|
263 |
+
if args.run:
|
264 |
+
seed = random.randrange(10**3)
|
265 |
+
else:
|
266 |
+
seed = args.seed
|
267 |
+
set_seed(seed)
|
268 |
+
# Run name
|
269 |
+
model_name = args.model_name
|
270 |
+
model_name = model_name[2:] if model_name.startswith("./") else model_name
|
271 |
+
model_name = model_name[1:] if model_name.startswith("/") else model_name
|
272 |
+
run_name = f"{model_name}_{args.task_name}"
|
273 |
+
run_name = f"{run_name}_{args.dataset_config or args.dataset_name}"
|
274 |
+
run_name = run_name.replace("/", "-")
|
275 |
+
run_name = f"{run_name}_l{str(args.dataset_language)}"
|
276 |
+
run_name = f"{run_name}_c{str(args.dataset_century)}"
|
277 |
+
run_name = f"{run_name}_e{str(args.num_train_epochs)}"
|
278 |
+
run_name = f"{run_name}_lr{str(args.learning_rate)}"
|
279 |
+
run_name = f"{run_name}_ws{str(args.warmup_steps)}"
|
280 |
+
run_name = f"{run_name}_wd{str(args.weight_decay)}"
|
281 |
+
run_name = f"{run_name}_s{str(seed)}"
|
282 |
+
run_name = f"{run_name}_eas{str(args.eval_accumulation_steps)}"
|
283 |
+
if args.max_length != 512:
|
284 |
+
run_name = f"{run_name}_seq{str(args.max_length)}"
|
285 |
+
if args.label_all_tokens:
|
286 |
+
run_name = f"{run_name}_labelall"
|
287 |
+
if args.run:
|
288 |
+
run_name = f"{run_name}_r{str(args.run)}"
|
289 |
+
output_dir = Path(args.output_dir) / run_name
|
290 |
+
# Tokenizer settings
|
291 |
+
padding = "longest" # args.task_name not in ("ner", "pos") # default: False @param ["False", "'max_length'"] {type: 'raw'}
|
292 |
+
max_length = args.max_length #@param {type: "number"}
|
293 |
+
# Training settings
|
294 |
+
weight_decay = args.weight_decay #@param {type: "number"}
|
295 |
+
adam_beta1 = 0.9 #@param {type: "number"}
|
296 |
+
adam_beta2 = 0.999 #@param {type: "number"}
|
297 |
+
adam_epsilon = 1e-08 #@param {type: "number"}
|
298 |
+
max_grad_norm = 1.0 #@param {type: "number"}
|
299 |
+
save_total_limit = 1 #@param {type: "integer"}
|
300 |
+
load_best_model_at_end = False #@param {type: "boolean"}
|
301 |
+
# wandb
|
302 |
+
wandb.init(name=run_name, project="postdata")
|
303 |
+
wandb.log({
|
304 |
+
"seed": int(seed),
|
305 |
+
})
|
306 |
+
# Loading Dataset
|
307 |
+
print("\n\n#####################################")
|
308 |
+
print(args.model_name)
|
309 |
+
print(args.task_name)
|
310 |
+
print(args.dataset_config)
|
311 |
+
print(args.dataset_language)
|
312 |
+
print(args.dataset_century)
|
313 |
+
train_split = args.dataset_split_train
|
314 |
+
test_split = args.dataset_split_test
|
315 |
+
validation_split = args.dataset_split_validation
|
316 |
+
if ":" in args.dataset_name:
|
317 |
+
dataset_name, dataset_config = args.dataset_name.split(":")
|
318 |
+
else:
|
319 |
+
dataset_name = args.dataset_name
|
320 |
+
dataset_config = args.dataset_config
|
321 |
+
use_auth_token = os.environ.get("AUTH_TOKEN", None)
|
322 |
+
if dataset_config is None or len(dataset_config) == 0:
|
323 |
+
dataset = load_dataset(dataset_name, use_auth_token=use_auth_token)
|
324 |
+
elif dataset_name == "csv" and dataset_config:
|
325 |
+
dataset = load_dataset(
|
326 |
+
dataset_name,
|
327 |
+
data_files={
|
328 |
+
"train": BIBLES["train"](dataset_config),
|
329 |
+
"validation": BIBLES["validation"](dataset_config),
|
330 |
+
"test": BIBLES["test"](dataset_config),
|
331 |
+
},
|
332 |
+
use_auth_token=use_auth_token)
|
333 |
+
else:
|
334 |
+
dataset = load_dataset(dataset_name, dataset_config, use_auth_token=use_auth_token)
|
335 |
+
if args.dataset_language and args.dataset_language.lower() not in ("all", "balanced"):
|
336 |
+
dataset = dataset.filter(lambda x: x["language"] == args.dataset_language)
|
337 |
+
if args.dataset_century and args.dataset_century.lower() != "all":
|
338 |
+
dataset = dataset.filter(lambda x: x["century"] in args.dataset_century)
|
339 |
+
if dataset["train"].shape[0] == 0 or dataset["test"].shape[0] == 0 or dataset["validation"].shape[0] == 0:
|
340 |
+
print(f"Not enough data for {str(args.dataset_language)} on {str(args.dataset_century)}: {str(dataset.shape)}")
|
341 |
+
return
|
342 |
+
column_names = dataset[train_split].column_names
|
343 |
+
features = dataset[train_split].features
|
344 |
+
if "tokens" in column_names:
|
345 |
+
text_column_name = "tokens"
|
346 |
+
elif "text" in column_names:
|
347 |
+
text_column_name = "text"
|
348 |
+
else:
|
349 |
+
text_column_name = column_names[0]
|
350 |
+
if f"{args.task_name}_tags" in column_names:
|
351 |
+
label_column_name = f"{args.task_name}_tags"
|
352 |
+
elif "label" in column_names:
|
353 |
+
label_column_name = "label"
|
354 |
+
else:
|
355 |
+
label_column_name = column_names[1]
|
356 |
+
if dataset_name == "csv":
|
357 |
+
label_list = list(set(dataset[train_split][label_column_name]))
|
358 |
+
elif isinstance(features[label_column_name], datasets.features.Sequence):
|
359 |
+
label_list = features[label_column_name].feature.names
|
360 |
+
else:
|
361 |
+
label_list = features[label_column_name].names
|
362 |
+
label_to_id = {i: i for i in range(len(label_list))}
|
363 |
+
num_labels = len(label_list)
|
364 |
+
print(f"Number of labels: {num_labels}")
|
365 |
+
print({label.split("-")[-1] for label in label_list})
|
366 |
+
|
367 |
+
# Training
|
368 |
+
config = AutoConfig.from_pretrained(
|
369 |
+
args.model_name,
|
370 |
+
num_labels=num_labels,
|
371 |
+
finetuning_task=args.task_name,
|
372 |
+
cache_dir=args.cache_dir,
|
373 |
+
force_download=args.force_download,
|
374 |
+
)
|
375 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
376 |
+
args.model_name,
|
377 |
+
cache_dir=args.cache_dir,
|
378 |
+
use_fast=True,
|
379 |
+
force_download=args.force_download,
|
380 |
+
)
|
381 |
+
if isinstance(tokenizer, (RobertaTokenizer, RobertaTokenizerFast)):
|
382 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
383 |
+
args.model_name,
|
384 |
+
cache_dir=args.cache_dir,
|
385 |
+
use_fast=True,
|
386 |
+
force_download=args.force_download,
|
387 |
+
add_prefix_space=True,
|
388 |
+
)
|
389 |
+
|
390 |
+
tokenizer_test_sentence = """
|
391 |
+
Ya que el Ángel del Señor tiene nombre propio, y su nombre es Yahveh.
|
392 |
+
""".strip()
|
393 |
+
printm("""Tokenizer test""")
|
394 |
+
printm(f"> {tokenizer_test_sentence}")
|
395 |
+
printm(tokenizer.tokenize(tokenizer_test_sentence))
|
396 |
+
printm(tokenizer(tokenizer_test_sentence).input_ids)
|
397 |
+
# STILTs
|
398 |
+
is_stilt = args.model_name in args.stilt.split(",") or args.stilt == "all"
|
399 |
+
model_config = dict(
|
400 |
+
from_tf=bool(".ckpt" in args.model_name),
|
401 |
+
config=config,
|
402 |
+
cache_dir=args.cache_dir,
|
403 |
+
force_download=args.force_download,
|
404 |
+
)
|
405 |
+
# Token tasks
|
406 |
+
if args.task_name in ("pos", "ner"):
|
407 |
+
if is_stilt:
|
408 |
+
# model = AutoModelForTokenClassification.from_config(
|
409 |
+
# config=config
|
410 |
+
# )
|
411 |
+
model_config.pop("config")
|
412 |
+
model = AutoModelForTokenClassification.from_pretrained(
|
413 |
+
args.model_name, num_labels=num_labels, ignore_mismatched_sizes=True, **model_config,
|
414 |
+
)
|
415 |
+
else:
|
416 |
+
model = AutoModelForTokenClassification.from_pretrained(
|
417 |
+
args.model_name, **model_config,
|
418 |
+
)
|
419 |
+
# Preprocessing the dataset
|
420 |
+
tokenized_datasets = dataset.map(
|
421 |
+
lambda examples: tokenize_and_align_labels(
|
422 |
+
tokenizer, examples, text_column_name, max_length, padding,
|
423 |
+
label_column_name, label_to_id, args.label_all_tokens),
|
424 |
+
batched=True,
|
425 |
+
load_from_cache_file=not args.overwrite_cache,
|
426 |
+
num_proc=os.cpu_count(),
|
427 |
+
)
|
428 |
+
# Data collator
|
429 |
+
data_collator = DataCollatorForTokenClassification(tokenizer)
|
430 |
+
compute_metrics = token_compute_metrics
|
431 |
+
# Sequence tasks
|
432 |
+
else:
|
433 |
+
if is_stilt:
|
434 |
+
# model = AutoModelForSequenceClassification.from_config(
|
435 |
+
# config=config
|
436 |
+
# )
|
437 |
+
model_config.pop("config")
|
438 |
+
model = AutoModelForSequenceClassification.from_pretrained(
|
439 |
+
args.model_name, num_labels=num_labels, ignore_mismatched_sizes=True, **model_config,
|
440 |
+
)
|
441 |
+
else:
|
442 |
+
model = AutoModelForSequenceClassification.from_pretrained(
|
443 |
+
args.model_name, **model_config,
|
444 |
+
)
|
445 |
+
# Preprocessing the dataset
|
446 |
+
tokenized_datasets = dataset.map(
|
447 |
+
lambda examples: tokenizer(
|
448 |
+
examples[text_column_name],
|
449 |
+
max_length=max_length,
|
450 |
+
padding=padding,
|
451 |
+
truncation=True,
|
452 |
+
is_split_into_words=False,
|
453 |
+
),
|
454 |
+
batched=True,
|
455 |
+
load_from_cache_file=not args.overwrite_cache,
|
456 |
+
num_proc=os.cpu_count(),
|
457 |
+
)
|
458 |
+
# Data collator
|
459 |
+
data_collator = DataCollatorWithPadding(
|
460 |
+
tokenizer,
|
461 |
+
max_length=max_length,
|
462 |
+
padding=padding,
|
463 |
+
)
|
464 |
+
compute_metrics = sequence_compute_metrics
|
465 |
+
train_dataset = dataset_select(
|
466 |
+
tokenized_datasets[train_split], args.max_train_size
|
467 |
+
)
|
468 |
+
test_dataset = dataset_select(
|
469 |
+
tokenized_datasets[test_split], args.max_test_size
|
470 |
+
)
|
471 |
+
validation_dataset = dataset_select(
|
472 |
+
tokenized_datasets[validation_split], args.max_validation_size
|
473 |
+
)
|
474 |
+
wandb.log({
|
475 |
+
"train_size": len(train_dataset),
|
476 |
+
"test_size": len(test_dataset),
|
477 |
+
"validation_size": len(validation_dataset),
|
478 |
+
})
|
479 |
+
samples_per_batch = (
|
480 |
+
train_dataset.shape[0] / args.train_batch_size
|
481 |
+
)
|
482 |
+
total_steps = args.num_train_epochs * samples_per_batch
|
483 |
+
warmup_steps = int(args.warmup_steps * total_steps)
|
484 |
+
wandb.log({
|
485 |
+
"total_steps": int(total_steps),
|
486 |
+
"total_warmup_steps": warmup_steps
|
487 |
+
})
|
488 |
+
do_eval = args.do_eval and (validation_split in tokenized_datasets)
|
489 |
+
do_test = args.do_test and (test_split in tokenized_datasets)
|
490 |
+
do_predict = args.do_predict and (test_split in tokenized_datasets)
|
491 |
+
training_args = TrainingArguments(
|
492 |
+
output_dir=output_dir.as_posix(),
|
493 |
+
overwrite_output_dir=args.overwrite_output_dir,
|
494 |
+
do_train=args.do_train,
|
495 |
+
do_eval=do_eval,
|
496 |
+
do_predict=do_test or do_predict,
|
497 |
+
per_device_train_batch_size=int(args.train_batch_size),
|
498 |
+
per_device_eval_batch_size=int(args.eval_batch_size or args.train_batch_size),
|
499 |
+
learning_rate=float(args.learning_rate),
|
500 |
+
weight_decay=weight_decay,
|
501 |
+
adam_beta1=adam_beta1,
|
502 |
+
adam_beta2=adam_beta2,
|
503 |
+
adam_epsilon=adam_epsilon,
|
504 |
+
max_grad_norm=max_grad_norm,
|
505 |
+
num_train_epochs=args.num_train_epochs,
|
506 |
+
warmup_steps=warmup_steps,
|
507 |
+
load_best_model_at_end=load_best_model_at_end,
|
508 |
+
seed=seed,
|
509 |
+
save_total_limit=save_total_limit,
|
510 |
+
run_name=run_name,
|
511 |
+
disable_tqdm=False,
|
512 |
+
eval_steps=1000,
|
513 |
+
eval_accumulation_steps=args.eval_accumulation_steps or None, # it was not set
|
514 |
+
dataloader_num_workers=64, # it was not set
|
515 |
+
)
|
516 |
+
# Initialize our Trainer
|
517 |
+
trainer = Trainer(
|
518 |
+
model=model,
|
519 |
+
args=training_args,
|
520 |
+
train_dataset=train_dataset,
|
521 |
+
eval_dataset=validation_dataset if do_eval else None,
|
522 |
+
tokenizer=tokenizer,
|
523 |
+
data_collator=data_collator,
|
524 |
+
compute_metrics=lambda pairs: compute_metrics(pairs, label_list),
|
525 |
+
)
|
526 |
+
if args.do_train:
|
527 |
+
train_result = trainer.train()
|
528 |
+
trainer.save_model() # Saves the tokenizer too for easy upload
|
529 |
+
write_file("train", train_result.metrics, output_dir, save_artifact=args.save_artifacts)
|
530 |
+
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
|
531 |
+
trainer.state.save_to_json(output_dir / "trainer_state.json")
|
532 |
+
# Evaluation
|
533 |
+
if do_eval:
|
534 |
+
printm(f"**Evaluate**")
|
535 |
+
results = trainer.evaluate()
|
536 |
+
write_file("eval", results, output_dir, save_artifact=args.save_artifacts)
|
537 |
+
# Tesing and predicting
|
538 |
+
if do_test or do_predict:
|
539 |
+
printm("**Test**")
|
540 |
+
predictions, labels, metrics = trainer.predict(test_dataset)
|
541 |
+
if not do_predict:
|
542 |
+
write_file("test", metrics, output_dir, save_artifact=args.save_artifacts)
|
543 |
+
if args.task_name in ("ner", "pos"):
|
544 |
+
predictions = np.argmax(predictions, axis=2)
|
545 |
+
# Remove ignored index (special tokens)
|
546 |
+
true_predictions = [
|
547 |
+
[label_list[p] for (p, l) in zip(prediction, label) if l != -100]
|
548 |
+
for prediction, label in zip(predictions, labels)
|
549 |
+
]
|
550 |
+
else:
|
551 |
+
predictions = np.argmax(predictions, axis=1)
|
552 |
+
true_predictions = [
|
553 |
+
label_list[p] for (p, l) in zip(predictions, labels) if l != -100
|
554 |
+
]
|
555 |
+
# Save predictions
|
556 |
+
output_test_predictions_file = os.path.join(output_dir, "test_predictions.txt")
|
557 |
+
output_test_predictions = "\n".join(" ".join(map(str, p)) for p in true_predictions)
|
558 |
+
with open(output_test_predictions_file, "a+") as writer:
|
559 |
+
writer.write(output_test_predictions)
|
560 |
+
if args.save_artifacts:
|
561 |
+
artifact = wandb.Artifact("predictions", type="result")
|
562 |
+
artifact.add_file(output_test_predictions_file)
|
563 |
+
wandb.log_artifact(artifact)
|
564 |
+
# # Log the results
|
565 |
+
# logfile = output_dir / "evaluation.csv"
|
566 |
+
# # Check if logfile exist
|
567 |
+
# try:
|
568 |
+
# f = open(logfile)
|
569 |
+
# f.close()
|
570 |
+
# except FileNotFoundError:
|
571 |
+
# with open(logfile, 'a+') as f:
|
572 |
+
# f.write("model_name" + "\t" + "data_language" + "\t" + "task_name" + "\t" "learning_rate"+ "\t" + "num_epochs"+ "\t" + "warmup_steps"+ "\t" + "validation_f1" +"\t"+"test_f1"+"\n")
|
573 |
+
# with open(logfile, 'a') as f:
|
574 |
+
# print(results)
|
575 |
+
# f.write(args.model_name + "\t" + (args.dataset_config or args.dataset_name) + "\t" + args.task_name + "\t" + str(args.learning_rate) + "\t" + str(args.num_train_epochs)+ "\t" + str(warmup_steps)+ "\t" + str(results['eval_f1']) + "\t" + str(metrics['eval_f1']) + "\n")
|
576 |
+
|
577 |
+
|
578 |
+
if __name__ == "__main__":
|
579 |
+
# yesno = lambda x: str(x).lower() in {'true', 't', '1', 'yes', 'y'}
|
580 |
+
parser = argparse.ArgumentParser(description=f""
|
581 |
+
f"Evaluating BERT models for sequence classification on Bibles"""
|
582 |
+
f"", epilog=f"""Example usage:
|
583 |
+
{__file__} --task_name sequence --model_name "bert-base-multilingual-cased"
|
584 |
+
""", formatter_class=argparse.RawTextHelpFormatter)
|
585 |
+
parser.add_argument('--model_name',
|
586 |
+
metavar='model_name', help='Model name or path')
|
587 |
+
parser.add_argument('--dataset_name', default="csv",
|
588 |
+
metavar='dataset_name', help='Dataset name. It might enforce a config if added after a semicolon: "conll2002:es". This will ignore dataset_config, useful when run in grid search')
|
589 |
+
parser.add_argument('--dataset_config',
|
590 |
+
metavar='dataset_config', help='Dataset config name')
|
591 |
+
parser.add_argument('--dataset_language', default="all",
|
592 |
+
metavar='dataset_language', help='Dataset language name')
|
593 |
+
parser.add_argument('--dataset_century', default="all",
|
594 |
+
metavar='dataset_century', help='Dataset century')
|
595 |
+
|
596 |
+
parser.add_argument('--dataset_split_train', default="train",
|
597 |
+
metavar='dataset_split_train', help='Dataset train split name')
|
598 |
+
parser.add_argument('--dataset_split_test', default="test",
|
599 |
+
metavar='dataset_split_test', help='Dataset test split name')
|
600 |
+
parser.add_argument('--dataset_split_validation', default="validation",
|
601 |
+
metavar='dataset_split_validation', help='Dataset validation split name')
|
602 |
+
|
603 |
+
parser.add_argument('--max_train_size', type=float, default=-1.0,
|
604 |
+
metavar='max_train_size', help='Percentage of train dataset or number of rows to use')
|
605 |
+
parser.add_argument('--max_test_size', type=float, default=-1.0,
|
606 |
+
metavar='max_test_size', help='Percentage of test dataset or number of rows to use')
|
607 |
+
parser.add_argument('--max_validation_size', type=float, default=-1.0,
|
608 |
+
metavar='max_validation_size', help='Percentage of validation dataset or number of rows to use')
|
609 |
+
|
610 |
+
parser.add_argument('--do_train',
|
611 |
+
metavar='do_train', default=True, type=bool,
|
612 |
+
help='Run training',
|
613 |
+
)
|
614 |
+
parser.add_argument('--do_eval',
|
615 |
+
metavar='do_eval', default=True, type=bool,
|
616 |
+
help='Run evaluation on validation test',
|
617 |
+
)
|
618 |
+
parser.add_argument('--do_test',
|
619 |
+
metavar='do_test', default=True, type=bool,
|
620 |
+
help='Run evaluation on test set',
|
621 |
+
)
|
622 |
+
parser.add_argument('--do_predict',
|
623 |
+
metavar='do_predict', default=False, type=bool,
|
624 |
+
help='Run prediction only on test set',
|
625 |
+
)
|
626 |
+
parser.add_argument('--task_name',
|
627 |
+
metavar='task_name', default="ner",
|
628 |
+
help='Task name (supported in the dataset), either ner or pos',
|
629 |
+
)
|
630 |
+
parser.add_argument('--num_train_epochs',
|
631 |
+
metavar='num_train_epochs', default=4, type=float,
|
632 |
+
help='Number of training epochs',
|
633 |
+
)
|
634 |
+
parser.add_argument('--eval_accumulation_steps',
|
635 |
+
metavar='eval_accumulation_steps', default=0, type=int,
|
636 |
+
help='Number of predictions steps to accumulate the output tensors for, before moving the results to the CPU.',
|
637 |
+
)
|
638 |
+
parser.add_argument('--cache_dir',
|
639 |
+
metavar='cache_dir', default="/var/ml/cache/",
|
640 |
+
help='Cache dir for the transformer library',
|
641 |
+
)
|
642 |
+
parser.add_argument('--overwrite_cache',
|
643 |
+
metavar='overwrite_cache', default=False, type=bool,
|
644 |
+
help='Overwrite cache dir if present',
|
645 |
+
)
|
646 |
+
parser.add_argument('--output_dir',
|
647 |
+
metavar='output_dir', default="/var/ml/output/",
|
648 |
+
help='Output dir for models and logs',
|
649 |
+
)
|
650 |
+
parser.add_argument('--overwrite_output_dir',
|
651 |
+
metavar='overwrite_output_dir', default=True, type=bool,
|
652 |
+
help='Overwrite output dir if present',
|
653 |
+
)
|
654 |
+
parser.add_argument('--seed',
|
655 |
+
metavar='seed', type=int, default=2021,
|
656 |
+
help='Seed for the experiments',
|
657 |
+
)
|
658 |
+
parser.add_argument('--run',
|
659 |
+
metavar='run', type=int,
|
660 |
+
help='Control variable for doing several runs of the same experiment. It will force random seeds even across the same set of parameters fo a grid search',
|
661 |
+
)
|
662 |
+
parser.add_argument('--train_batch_size',
|
663 |
+
metavar='train_batch_size', type=int, default=8,
|
664 |
+
help='Batch size for training',
|
665 |
+
)
|
666 |
+
parser.add_argument('--eval_batch_size',
|
667 |
+
metavar='eval_batch_size', type=int,
|
668 |
+
help='Batch size for evaluation. Defaults to train_batch_size',
|
669 |
+
)
|
670 |
+
parser.add_argument('--max_length',
|
671 |
+
metavar='max_length', type=int, default=512,
|
672 |
+
help='Maximum sequence length',
|
673 |
+
)
|
674 |
+
parser.add_argument('--learning_rate',
|
675 |
+
metavar='learning_rate', type=str, default="3e-05",
|
676 |
+
help='Learning rate',
|
677 |
+
)
|
678 |
+
parser.add_argument('--warmup_steps',
|
679 |
+
metavar='warmup_steps', type=float, default=0.0,
|
680 |
+
help='Warmup steps as percentage of the total number of steps',
|
681 |
+
)
|
682 |
+
parser.add_argument('--weight_decay',
|
683 |
+
metavar='weight_decay', type=float, default=0.0,
|
684 |
+
help='Weight decay',
|
685 |
+
)
|
686 |
+
parser.add_argument('--label_all_tokens',
|
687 |
+
metavar='label_all_tokens', type=bool, default=False,
|
688 |
+
help=('Whether to put the label for one word on all tokens of '
|
689 |
+
'generated by that word or just on the one (in which case the '
|
690 |
+
'other tokens will have a padding index).'),
|
691 |
+
)
|
692 |
+
parser.add_argument('--force_download',
|
693 |
+
metavar='force_download', type=bool, default=False,
|
694 |
+
help='Force the download of model, tokenizer, and config',
|
695 |
+
)
|
696 |
+
parser.add_argument('--save_artifacts',
|
697 |
+
metavar='save_artifacts', type=bool, default=False,
|
698 |
+
help='Save train, eval, and test files in Weight & Biases',
|
699 |
+
)
|
700 |
+
parser.add_argument('--stilt',
|
701 |
+
metavar='stilt', type=str, default="",
|
702 |
+
help='Specify models already fine-tuned for other tasks',
|
703 |
+
)
|
704 |
+
|
705 |
+
args = parser.parse_args()
|
706 |
+
main(args)
|