|
import json |
|
import logging |
|
import sys |
|
|
|
import flair |
|
import torch |
|
|
|
from typing import List |
|
|
|
from flair.data import MultiCorpus |
|
from flair.datasets import ColumnCorpus, NER_HIPE_2022, NER_ICDAR_EUROPEANA |
|
from flair.embeddings import ( |
|
TokenEmbeddings, |
|
StackedEmbeddings, |
|
TransformerWordEmbeddings |
|
) |
|
from flair import set_seed |
|
from flair.models import SequenceTagger |
|
from flair.trainers import ModelTrainer |
|
|
|
from utils import prepare_ajmc_corpus, prepare_clef_2020_corpus, prepare_newseye_fi_sv_corpus, prepare_newseye_de_fr_corpus |
|
|
|
logger = logging.getLogger("flair") |
|
logger.setLevel(level="INFO") |
|
|
|
|
|
def run_experiment(seed: int, batch_size: int, epoch: int, learning_rate: float, subword_pooling: str, |
|
hipe_datasets: List[str], json_config: dict): |
|
hf_model = json_config["hf_model"] |
|
context_size = json_config["context_size"] |
|
layers = json_config["layers"] if "layers" in json_config else "-1" |
|
use_crf = json_config["use_crf"] if "use_crf" in json_config else False |
|
|
|
|
|
set_seed(seed) |
|
|
|
corpus_list = [] |
|
|
|
|
|
for dataset in hipe_datasets: |
|
dataset_name, language = dataset.split("/") |
|
|
|
|
|
preproc_fn = None |
|
|
|
if dataset_name == "ajmc": |
|
preproc_fn = prepare_ajmc_corpus |
|
elif dataset_name == "hipe2020": |
|
preproc_fn = prepare_clef_2020_corpus |
|
elif dataset_name == "newseye" and language in ["fi", "sv"]: |
|
preproc_fn = prepare_newseye_fi_sv_corpus |
|
elif dataset_name == "newseye" and language in ["de", "fr"]: |
|
preproc_fn = prepare_newseye_de_fr_corpus |
|
|
|
if dataset_name == "icdar": |
|
corpus_list.append(NER_ICDAR_EUROPEANA(language=language)) |
|
else: |
|
corpus_list.append(NER_HIPE_2022(dataset_name=dataset_name, language=language, preproc_fn=preproc_fn, |
|
add_document_separator=True)) |
|
|
|
if context_size == 0: |
|
context_size = False |
|
|
|
logger.info("FLERT Context: {}".format(context_size)) |
|
logger.info("Layers: {}".format(layers)) |
|
logger.info("Use CRF: {}".format(use_crf)) |
|
|
|
corpora: MultiCorpus = MultiCorpus(corpora=corpus_list, sample_missing_splits=False) |
|
label_dictionary = corpora.make_label_dictionary(label_type="ner") |
|
logger.info("Label Dictionary: {}".format(label_dictionary.get_items())) |
|
|
|
embeddings = TransformerWordEmbeddings( |
|
model=hf_model, |
|
layers=layers, |
|
subtoken_pooling=subword_pooling, |
|
fine_tune=True, |
|
use_context=context_size, |
|
) |
|
|
|
tagger: SequenceTagger = SequenceTagger( |
|
hidden_size=256, |
|
embeddings=embeddings, |
|
tag_dictionary=label_dictionary, |
|
tag_type="ner", |
|
use_crf=use_crf, |
|
use_rnn=False, |
|
reproject_embeddings=False, |
|
) |
|
|
|
|
|
trainer: ModelTrainer = ModelTrainer(tagger, corpora) |
|
|
|
datasets = "-".join([dataset for dataset in hipe_datasets]) |
|
|
|
trainer.fine_tune( |
|
f"hmbench-{datasets}-{hf_model}-bs{batch_size}-ws{context_size}-e{epoch}-lr{learning_rate}-pooling{subword_pooling}-layers{layers}-crf{use_crf}-{seed}", |
|
learning_rate=learning_rate, |
|
mini_batch_size=batch_size, |
|
max_epochs=epoch, |
|
shuffle=True, |
|
embeddings_storage_mode='none', |
|
weight_decay=0., |
|
use_final_model_for_eval=False, |
|
) |
|
|
|
|
|
tagger.print_model_card() |
|
|
|
|
|
if __name__ == "__main__": |
|
filename = sys.argv[1] |
|
with open(filename, "rt") as f_p: |
|
json_config = json.load(f_p) |
|
|
|
seeds = json_config["seeds"] |
|
batch_sizes = json_config["batch_sizes"] |
|
epochs = json_config["epochs"] |
|
learning_rates = json_config["learning_rates"] |
|
subword_poolings = json_config["subword_poolings"] |
|
|
|
hipe_datasets = json_config["hipe_datasets"] |
|
|
|
cuda = json_config["cuda"] |
|
flair.device = f'cuda:{cuda}' |
|
|
|
for seed in seeds: |
|
for batch_size in batch_sizes: |
|
for epoch in epochs: |
|
for learning_rate in learning_rates: |
|
for subword_pooling in subword_poolings: |
|
run_experiment(seed, batch_size, epoch, learning_rate, subword_pooling, hipe_datasets, |
|
json_config) |
|
|