Rodrigo1771 commited on
Commit
06e4162
1 Parent(s): f43b36d

Upload 2 files

Browse files
Files changed (2) hide show
  1. drugtemist_en_loading_script.py +107 -0
  2. test.conll +3 -0
drugtemist_en_loading_script.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Loading script for the DrugTEMIST English NER dataset.
2
+ import datasets
3
+
4
+
5
+ logger = datasets.logging.get_logger(__name__)
6
+
7
+
8
+ _CITATION = """\
9
+ }"""
10
+
11
+ _DESCRIPTION = """\
12
+ https://temu.bsc.es/multicardioner/
13
+ """
14
+
15
+ _URL = "https://huggingface.co/datasets/Rodrigo1771/drugtemist-en-ner/resolve/main/"
16
+ _TRAINING_FILE = "train.conll"
17
+ _DEV_FILE = "dev.conll"
18
+ _TEST_FILE = "test.conll"
19
+
20
+ class DrugTEMISTENNERConfig(datasets.BuilderConfig):
21
+ """BuilderConfig for DrugTEMIST English NER dataset"""
22
+
23
+ def __init__(self, **kwargs):
24
+ """BuilderConfig for DrugTEMIST English NER.
25
+
26
+ Args:
27
+ **kwargs: keyword arguments forwarded to super.
28
+ """
29
+ super(DrugTEMISTENNERConfig, self).__init__(**kwargs)
30
+
31
+
32
+ class DrugTEMISTENNER(datasets.GeneratorBasedBuilder):
33
+ """DrugTEMIST English NER dataset."""
34
+
35
+ BUILDER_CONFIGS = [
36
+ DrugTEMISTENNERConfig(
37
+ name="DrugTEMIST English NER",
38
+ version=datasets.Version("1.0.0"),
39
+ description="DrugTEMIST English NER dataset"),
40
+ ]
41
+
42
+ def _info(self):
43
+ return datasets.DatasetInfo(
44
+ description=_DESCRIPTION,
45
+ features=datasets.Features(
46
+ {
47
+ "id": datasets.Value("string"),
48
+ "tokens": datasets.Sequence(datasets.Value("string")),
49
+ "ner_tags": datasets.Sequence(
50
+ datasets.features.ClassLabel(
51
+ names=[
52
+ "O",
53
+ "B-FARMACO",
54
+ "I-FARMACO",
55
+ ]
56
+ )
57
+ ),
58
+ }
59
+ ),
60
+ supervised_keys=None,
61
+ homepage=_DESCRIPTION,
62
+ citation=_CITATION,
63
+ )
64
+
65
+ def _split_generators(self, dl_manager):
66
+ """Returns SplitGenerators."""
67
+ urls_to_download = {
68
+ "train": f"{_URL}{_TRAINING_FILE}",
69
+ "dev": f"{_URL}{_DEV_FILE}",
70
+ "test": f"{_URL}{_TEST_FILE}",
71
+ }
72
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
73
+
74
+ return [
75
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
76
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
77
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
78
+ ]
79
+
80
+ def _generate_examples(self, filepath):
81
+ logger.info("⏳ Generating examples from = %s", filepath)
82
+ with open(filepath, encoding="utf-8") as f:
83
+ guid = 0
84
+ tokens = []
85
+ ner_tags = []
86
+ for line in f:
87
+ if line.startswith("-DOCSTART-") or line == "" or line == "\n":
88
+ if tokens:
89
+ yield guid, {
90
+ "id": str(guid),
91
+ "tokens": tokens,
92
+ "ner_tags": ner_tags,
93
+ }
94
+ guid += 1
95
+ tokens = []
96
+ ner_tags = []
97
+ else:
98
+ # DrugTEMIST English tokens are tab separated
99
+ splits = line.split("\t")
100
+ tokens.append(splits[0])
101
+ ner_tags.append(splits[-1].rstrip())
102
+ # last example
103
+ yield guid, {
104
+ "id": str(guid),
105
+ "tokens": tokens,
106
+ "ner_tags": ner_tags,
107
+ }
test.conll ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01286ed15e15d6967a7f3f3be0cdbd35f83e9ef6c14de327de9f80fed9da977d
3
+ size 4669465