Datasets:

Modalities:
Text
Languages:
Spanish
Libraries:
Datasets
License:
pharmaconer / pharmaconer.py
ccasimiro's picture
Update pharmaconer.py
f0aa28e
"""
A dataset loading script for the PharmaCoNER corpus.
The PharmaCoNER datset is a manually annotated collection of clinical case
studies derived from the Spanish Clinical Case Corpus (SPACCC). It was designed
for the Pharmacological Substances, Compounds and Proteins NER track, the first
shared task on detecting drug and chemical entities in Spanish medical documents.
"""
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{,
title = "PharmaCoNER: Pharmacological Substances, Compounds and proteins Named Entity Recognition track",
author = "Gonzalez-Agirre, Aitor and
Marimon, Montserrat and
Intxaurrondo, Ander and
Rabal, Obdulia and
Villegas, Marta and
Krallinger, Martin",
booktitle = "Proceedings of The 5th Workshop on BioNLP Open Shared Tasks",
month = nov,
year = "2019",
address = "Hong Kong, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/D19-5701",
doi = "10.18653/v1/D19-5701",
pages = "1--10",
abstract = "",
}
"""
_DESCRIPTION = """\
PharmaCoNER: Pharmacological Substances, Compounds and Proteins Named Entity Recognition track
This dataset is designed for the PharmaCoNER task, sponsored by Plan de Impulso de las Tecnologías del Lenguaje (Plan TL).
It is a manually classified collection of clinical case studies derived from the Spanish Clinical Case Corpus (SPACCC), an
open access electronic library that gathers Spanish medical publications from SciELO (Scientific Electronic Library Online).
The annotation of the entire set of entity mentions was carried out by medicinal chemistry experts
and it includes the following 4 entity types: NORMALIZABLES, NO_NORMALIZABLES, PROTEINAS and UNCLEAR.
The PharmaCoNER corpus contains a total of 396,988 words and 1,000 clinical cases that have been randomly sampled into 3 subsets.
The training set contains 500 clinical cases, while the development and test sets contain 250 clinical cases each.
In terms of training examples, this translates to a total of 8074, 3764 and 3931 annotated sentences in each set.
The original dataset was distributed in Brat format (https://brat.nlplab.org/standoff.html).
For further information, please visit https://temu.bsc.es/pharmaconer/ or send an email to encargo-pln-life@bsc.es
"""
_HOMEPAGE = "https://temu.bsc.es/pharmaconer/index.php/datasets/"
_LICENSE = "Creative Commons Attribution 4.0 International"
_VERSION = "1.1.0"
_URL = "https://huggingface.co/datasets/PlanTL-GOB-ES/pharmaconer/resolve/main/"
_TRAINING_FILE = "train-set_1.1.conll"
_DEV_FILE = "dev-set_1.1.conll"
_TEST_FILE = "test-set_1.1.conll"
class PharmaCoNERConfig(datasets.BuilderConfig):
"""BuilderConfig for PharmaCoNER dataset."""
def __init__(self, **kwargs):
super(PharmaCoNERConfig, self).__init__(**kwargs)
class PharmaCoNER(datasets.GeneratorBasedBuilder):
"""PharmaCoNER dataset."""
BUILDER_CONFIGS = [
PharmaCoNERConfig(
name="PharmaCoNER",
version=datasets.Version(_VERSION),
description="PharmaCoNER dataset"),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-NO_NORMALIZABLES",
"B-NORMALIZABLES",
"B-PROTEINAS",
"B-UNCLEAR",
"I-NO_NORMALIZABLES",
"I-NORMALIZABLES",
"I-PROTEINAS",
"I-UNCLEAR",
]
)
),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"train": f"{_URL}{_TRAINING_FILE}",
"dev": f"{_URL}{_DEV_FILE}",
"test": f"{_URL}{_TEST_FILE}",
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
]
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
guid = 0
tokens = []
pos_tags = []
ner_tags = []
for line in f:
if line == "\n":
if tokens:
yield guid, {
"id": str(guid),
"tokens": tokens,
"ner_tags": ner_tags,
}
guid += 1
tokens = []
ner_tags = []
else:
splits = line.split("\t")
tokens.append(splits[0])
ner_tags.append(splits[-1].rstrip())
# last example
yield guid, {
"id": str(guid),
"tokens": tokens,
"ner_tags": ner_tags,
}