File size: 6,463 Bytes
7059d85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "8f719117-0591-4f80-a071-16b8b1a0a829",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-05-10 12:21:50.698143: I external/local_tsl/tsl/cuda/cudart_stub.cc:32] Could not find cuda drivers on your machine, GPU will not be used.\n",
      "2024-05-10 12:21:50.701405: I external/local_tsl/tsl/cuda/cudart_stub.cc:32] Could not find cuda drivers on your machine, GPU will not be used.\n",
      "2024-05-10 12:21:50.745330: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
      "To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
      "2024-05-10 12:21:51.413922: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n"
     ]
    }
   ],
   "source": [
    "import flair\n",
    "\n",
    "from flair.datasets.sequence_labeling import ColumnCorpus\n",
    "from flair.file_utils import cached_path\n",
    "\n",
    "from pathlib import Path\n",
    "from typing import Optional, Union"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "0ddcffce-b914-4250-848a-e8cb5b9cc049",
   "metadata": {},
   "outputs": [],
   "source": [
    "class NER_HISTNERO(ColumnCorpus):\n",
    "    def __init__(\n",
    "        self,\n",
    "        base_path: Optional[Union[str, Path]] = None,\n",
    "        in_memory: bool = True,\n",
    "        **corpusargs,\n",
    "    ) -> None:\n",
    "        base_path = flair.cache_root / \"datasets\" if not base_path else Path(base_path)\n",
    "        dataset_name = self.__class__.__name__.lower()\n",
    "        data_folder = base_path / dataset_name\n",
    "        data_path = flair.cache_root / \"datasets\" / dataset_name\n",
    "\n",
    "        column_format = {0: \"text\", 1: \"ner\"}\n",
    "\n",
    "        hf_download_path = \"https://huggingface.co/datasets/stefan-it/histnero/resolve/main\"\n",
    "\n",
    "        for split in [\"train\", \"dev\", \"test\"]:\n",
    "            cached_path(f\"{hf_download_path}/{split}.tsv\", data_path)\n",
    "        \n",
    "        super().__init__(\n",
    "            data_folder,\n",
    "            column_format = {0: \"text\", 1: \"ner\"},\n",
    "            column_delimiter=\"\\t\",\n",
    "            document_separator_token=\"-DOCSTART-\",\n",
    "            in_memory=in_memory,\n",
    "            comment_symbol=\"# \",\n",
    "            **corpusargs,\n",
    "        )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "caf404fb-b4f1-4688-81ef-abbd950beb0a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2024-05-10 12:21:56,117 Reading data from /home/stefan/.flair/datasets/ner_histnero\n",
      "2024-05-10 12:21:56,118 Train: /home/stefan/.flair/datasets/ner_histnero/train.tsv\n",
      "2024-05-10 12:21:56,119 Dev: /home/stefan/.flair/datasets/ner_histnero/dev.tsv\n",
      "2024-05-10 12:21:56,120 Test: /home/stefan/.flair/datasets/ner_histnero/test.tsv\n"
     ]
    }
   ],
   "source": [
    "corpus = NER_HISTNERO()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "c38074b8-36c6-47f8-9a9c-b9685514e4bf",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Corpus: 8149 train + 1114 dev + 1117 test sentences\n"
     ]
    }
   ],
   "source": [
    "print(str(corpus))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7f1bfac4-d09f-477a-b1d8-0543d2e4368c",
   "metadata": {},
   "source": [
    "# Tests\n",
    "\n",
    "We now check the number of parsed sentences and compare with the reference values from the HistNERo paper."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "f1dc9c90-c14c-44c6-9716-5e341c73d370",
   "metadata": {},
   "outputs": [],
   "source": [
    "gold_training_split_sentences = {\n",
    "    \"train\": 8_020,\n",
    "    \"dev\": 1_003,\n",
    "    \"test\": 1_003,\n",
    "}\n",
    "\n",
    "flair_corpus_mapping = {\n",
    "    \"train\": corpus.train,\n",
    "    \"dev\": corpus.dev,\n",
    "    \"test\": corpus.test,\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "957921b4-53f6-4c49-a916-339ff0296eee",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "✔️ Number of parsed sentences for train split\n",
      "✔️ Number of parsed sentences for dev split\n",
      "✔️ Number of parsed sentences for test split\n",
      "✔️ Number of parsed sentences for complete dataset\n"
     ]
    }
   ],
   "source": [
    "actual_total_sentences = 0\n",
    "gold_total_sentences = sum(gold_training_split_sentences.values())\n",
    "\n",
    "for dataset_split in [\"train\", \"dev\", \"test\"]:\n",
    "    gold_sentences = gold_training_split_sentences[dataset_split]\n",
    "\n",
    "    actual_sentences = 0\n",
    "    \n",
    "    for sentence in flair_corpus_mapping[dataset_split]:\n",
    "        # We do not count document marker as sentences!\n",
    "        if sentence[0].text.startswith(\"-DOCSTART-\"):\n",
    "            continue\n",
    "        actual_sentences += 1\n",
    "\n",
    "    actual_total_sentences += actual_sentences\n",
    "\n",
    "    assert gold_sentences == actual_sentences, f\"Mismatch of parsed sentences for {dataset_split} split!\"\n",
    "\n",
    "    print(f\"✔️ Number of parsed sentences for {dataset_split} split\")\n",
    "\n",
    "assert actual_total_sentences == gold_total_sentences, f\"Mismatch in total parsed sentences!\"\n",
    "\n",
    "print(f\"✔️ Number of parsed sentences for complete dataset\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}