wjbmattingly commited on
Commit
b8ac2fc
1 Parent(s): 8f180c1

Upload convert.ipynb

Browse files
Files changed (1) hide show
  1. convert.ipynb +419 -73
convert.ipynb CHANGED
@@ -2,133 +2,479 @@
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
- "execution_count": 108,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
 
9
  "import pandas as pd\n",
10
  "import glob\n",
11
- "import spacy\n",
12
- "from spacy.tokens import Span, Doc\n",
13
- "import os\n",
14
- "from spacy.training import biluo_tags_to_offsets, biluo_tags_to_spans, iob_to_biluo\n",
15
- "import srsly"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  ]
17
  },
18
  {
19
  "cell_type": "code",
20
- "execution_count": 113,
21
  "metadata": {},
22
  "outputs": [],
23
  "source": [
24
- "def create_spacy_training_data(file_path):\n",
25
- " # Load data from the Excel file\n",
26
- " data = pd.read_excel(file_path)\n",
27
- " # if \"Line_ID\" in data.columns:\n",
28
- " # group_col = \"Line_ID\"\n",
29
- " if \"ACT\" in data.columns:\n",
30
  " group_col = \"ACT\"\n",
31
- " elif \"Original_Act_ID\" in data.columns:\n",
32
  " group_col = \"Original_Act_ID\"\n",
33
  " else:\n",
34
  " \"unknown\"\n",
35
- " # data = data[~data['Word_x'].apply(lambda x: isinstance(x, int))]\n",
36
- " # data = data[~data['Word_x'].apply(lambda x: isinstance(x, float))]\n",
37
- " data['Word_x'] = data['Word_x'].astype(str).str.strip()\n",
 
 
38
  " \n",
39
  " # Combine words into sentences, assumed by unique 'Line_ID'\n",
40
- " grouped_data = data.groupby(group_col)\n",
 
 
 
 
 
 
41
  " \n",
42
- " # Prepare training data in spaCy format\n",
43
- " training_data = []\n",
44
- " for _, item in grouped_data:\n",
45
- " bilo_loc = item[\"LOC_x\"].tolist()\n",
46
- " bilo_person = item[\"PERS_x\"].tolist()\n",
47
- " tokens = item[\"Word_x\"].tolist()\n",
48
- " doc = Doc(nlp.vocab, words=tokens, spaces=[True for i in range(len(tokens))])\n",
49
- " # doc = nlp(\" \".join(tokens))\n",
50
  "\n",
51
- " spans = iob_to_biluo(bilo_person)\n",
52
- " spans = biluo_tags_to_spans(doc, spans)\n",
 
 
 
 
 
 
53
  "\n",
 
 
 
 
 
 
 
54
  "\n",
55
- " loc_spans = iob_to_biluo(bilo_loc)\n",
56
- " loc_spans = biluo_tags_to_spans(doc, loc_spans)\n",
 
 
 
 
 
 
 
 
 
57
  "\n",
58
- " spans = loc_spans + spans\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  " doc.spans[\"sc\"] = spans\n",
60
- " span_ents = []\n",
61
- " for span in doc.spans[\"sc\"]:\n",
62
- " span_ents.append({\"text\": span.text, \"label\": span.label_, \"start\": span.start, \"end\": span.end})\n",
63
- " training_data.append({\"text\": doc.text, \"spans\": span_ents})\n",
64
- " return training_data"
65
  ]
66
  },
67
  {
68
  "cell_type": "code",
69
- "execution_count": 98,
70
  "metadata": {},
71
  "outputs": [
72
  {
73
- "name": "stdout",
74
  "output_type": "stream",
75
  "text": [
76
- "17\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
  ]
78
  }
79
  ],
80
  "source": [
81
- "files = glob.glob(\"Database/*/CONLL/*.xlsx\")\n",
82
- "print(len(files))"
 
 
 
 
 
 
83
  ]
84
  },
85
  {
86
  "cell_type": "code",
87
- "execution_count": 117,
88
  "metadata": {},
89
  "outputs": [
90
  {
91
- "name": "stdout",
92
- "output_type": "stream",
93
- "text": [
94
- "0 Notre_Dame_Roche_Paris_BnF_10996\n",
95
- "1 Pontoise_Paris_BnF_5657\n",
96
- "2 Saint_Denis_Paris_AN_LL_1157\n",
97
- "3 Pontigny_Paris_BnF_lat_9887_inner_unmerged.xlsx\n",
98
- "4 Navarre_Pau_AD_E513\n",
99
- "5 Clairmarais_Troyes_AD_3H3700_inner_unmerged.xlsx\n",
100
- "6 Port_Royal_2_Paris_BnF_10998\n",
101
- "7 Nesle_Chantilly_GB_Reg12_14F22\n",
102
- "8 Fervaques_Paris_BnF_lat_11071\n",
103
- "9 Molesme_2_Dijon_ADCO_Cart_143_7H7\n",
104
- "10 Saint_Nicaise_Reims_BM_1843_inner_unmerged.xlsx\n",
105
- "11 Sommereux_Paris_Bnf_nal_1934\n",
106
- "12 Chartres_2_Paris_BnF_lat_10095\n",
107
- "13 Chartres_1_Paris_BnF_lat_10094\n",
108
- "14 Vauluisant_Paris_BnF_lat_9901\n",
109
- "15 Port_Royal_1_Paris_BnF_10997\n",
110
- "16 Molesme_1_Dijon_ADCO_Cart_142_7H6\n"
111
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
  }
113
  ],
114
  "source": [
115
- "training_data = []\n",
116
- "for i, filename in enumerate(files):\n",
117
- " manuscript = os.path.basename(filename).split('_final_version_inner')[0]\n",
118
- " print(i, manuscript)\n",
119
- " res = create_spacy_training_data(filename)\n",
120
- " for r in res:\n",
121
- " r[\"ms\"] = manuscript\n",
122
- " training_data.append(r)"
 
 
 
 
 
 
 
 
 
 
 
123
  ]
124
  },
125
  {
126
  "cell_type": "code",
127
- "execution_count": 120,
128
  "metadata": {},
129
  "outputs": [],
130
  "source": [
131
- "srsly.write_jsonl(\"home-alcar-ner.jsonl\", training_data)"
132
  ]
133
  },
134
  {
 
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
+ "execution_count": 1,
6
  "metadata": {},
7
  "outputs": [],
8
  "source": [
9
+ "import spacy\n",
10
  "import pandas as pd\n",
11
  "import glob\n",
12
+ "import tqdm"
13
+ ]
14
+ },
15
+ {
16
+ "cell_type": "code",
17
+ "execution_count": 2,
18
+ "metadata": {},
19
+ "outputs": [
20
+ {
21
+ "name": "stdout",
22
+ "output_type": "stream",
23
+ "text": [
24
+ "17\n"
25
+ ]
26
+ }
27
+ ],
28
+ "source": [
29
+ "files = glob.glob(\"Database/*/CONLL/*.xlsx\")\n",
30
+ "print(len(files))"
31
  ]
32
  },
33
  {
34
  "cell_type": "code",
35
+ "execution_count": 26,
36
  "metadata": {},
37
  "outputs": [],
38
  "source": [
39
+ "def group_acts(df):\n",
40
+ " if \"ACT\" in df.columns:\n",
 
 
 
 
41
  " group_col = \"ACT\"\n",
42
+ " elif \"Original_Act_ID\" in df.columns:\n",
43
  " group_col = \"Original_Act_ID\"\n",
44
  " else:\n",
45
  " \"unknown\"\n",
46
+ "\n",
47
+ " if \"LANG\" in df.columns:\n",
48
+ " df = df[df[\"LANG\"] == \"LAT\"]\n",
49
+ "\n",
50
+ " df['Word_x'] = df['Word_x'].astype(str).str.strip()\n",
51
  " \n",
52
  " # Combine words into sentences, assumed by unique 'Line_ID'\n",
53
+ " grouped_data = df.groupby(group_col)\n",
54
+ "\n",
55
+ " return grouped_data\n",
56
+ "\n",
57
+ "def create_spacy_doc(df, nlp):\n",
58
+ " grouped_df = group_acts(df)\n",
59
+ " docs = []\n",
60
  " \n",
61
+ " for key, group in grouped_df:\n",
62
+ " tokens = []\n",
63
+ " spaces = []\n",
64
+ " entities = []\n",
65
+ " current_entity = None\n",
 
 
 
66
  "\n",
67
+ " # Iterate over the rows in the group\n",
68
+ " for i, row in enumerate(group.itertuples()):\n",
69
+ " word = row.Word_x\n",
70
+ " length_word = len(word)\n",
71
+ " # Check if there is a next word and whether it should be followed by a space\n",
72
+ " space_after = not (i < len(group) - 1 and group.iloc[i + 1].Word_x in [',', '.', ';', ':'])\n",
73
+ " tokens.append(word)\n",
74
+ " spaces.append(space_after)\n",
75
  "\n",
76
+ " # Handle entity recognition\n",
77
+ " if row.PERS_x != 'O':\n",
78
+ " entity_type = 'PERSON'\n",
79
+ " elif row.LOC_x != 'O':\n",
80
+ " entity_type = 'LOC'\n",
81
+ " else:\n",
82
+ " entity_type = None\n",
83
  "\n",
84
+ " if current_entity is None and entity_type is not None:\n",
85
+ " # Start new entity\n",
86
+ " current_entity = [i, i, entity_type]\n",
87
+ " elif current_entity is not None:\n",
88
+ " if entity_type == current_entity[2]:\n",
89
+ " # Extend current entity\n",
90
+ " current_entity[1] = i\n",
91
+ " else:\n",
92
+ " # Finish current entity and add to entities list\n",
93
+ " entities.append(current_entity)\n",
94
+ " current_entity = [i, i, entity_type] if entity_type else None\n",
95
  "\n",
96
+ " # Check if an entity is still open at the end of the group\n",
97
+ " if current_entity is not None:\n",
98
+ " entities.append(current_entity)\n",
99
+ "\n",
100
+ " # Create a spacy Doc object\n",
101
+ " doc = spacy.tokens.Doc(nlp.vocab, words=tokens, spaces=spaces)\n",
102
+ " # Get the sentencizer component from the pipeline\n",
103
+ " sentencizer = nlp.get_pipe(\"sentencizer\")\n",
104
+ "\n",
105
+ " # Apply the sentencizer to the Doc\n",
106
+ " sentencizer(doc)\n",
107
+ " # Create Span objects for the entities\n",
108
+ " spans = [doc.char_span(doc[ent[0]].idx, doc[ent[1]].idx + len(doc[ent[1]].text), label=ent[2])\n",
109
+ " for ent in entities if doc.char_span(doc[ent[0]].idx, doc[ent[1]].idx + len(doc[ent[1]].text), label=ent[2])]\n",
110
  " doc.spans[\"sc\"] = spans\n",
111
+ " \n",
112
+ "\n",
113
+ " docs.append(doc)\n",
114
+ "\n",
115
+ " return docs\n"
116
  ]
117
  },
118
  {
119
  "cell_type": "code",
120
+ "execution_count": 27,
121
  "metadata": {},
122
  "outputs": [
123
  {
124
+ "name": "stderr",
125
  "output_type": "stream",
126
  "text": [
127
+ " 0%| | 0/17 [00:00<?, ?it/s]/var/folders/4f/ddlj81h90_n0_h5wwvjbd2b40000gn/T/ipykernel_8164/298127518.py:12: SettingWithCopyWarning: \n",
128
+ "A value is trying to be set on a copy of a slice from a DataFrame.\n",
129
+ "Try using .loc[row_indexer,col_indexer] = value instead\n",
130
+ "\n",
131
+ "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
132
+ " df['Word_x'] = df['Word_x'].astype(str).str.strip()\n",
133
+ " 6%|▌ | 1/17 [00:01<00:23, 1.45s/it]/var/folders/4f/ddlj81h90_n0_h5wwvjbd2b40000gn/T/ipykernel_8164/298127518.py:12: SettingWithCopyWarning: \n",
134
+ "A value is trying to be set on a copy of a slice from a DataFrame.\n",
135
+ "Try using .loc[row_indexer,col_indexer] = value instead\n",
136
+ "\n",
137
+ "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
138
+ " df['Word_x'] = df['Word_x'].astype(str).str.strip()\n",
139
+ " 12%|█▏ | 2/17 [00:03<00:26, 1.77s/it]/var/folders/4f/ddlj81h90_n0_h5wwvjbd2b40000gn/T/ipykernel_8164/298127518.py:12: SettingWithCopyWarning: \n",
140
+ "A value is trying to be set on a copy of a slice from a DataFrame.\n",
141
+ "Try using .loc[row_indexer,col_indexer] = value instead\n",
142
+ "\n",
143
+ "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
144
+ " df['Word_x'] = df['Word_x'].astype(str).str.strip()\n",
145
+ " 24%|██▎ | 4/17 [00:18<01:08, 5.30s/it]/var/folders/4f/ddlj81h90_n0_h5wwvjbd2b40000gn/T/ipykernel_8164/298127518.py:12: SettingWithCopyWarning: \n",
146
+ "A value is trying to be set on a copy of a slice from a DataFrame.\n",
147
+ "Try using .loc[row_indexer,col_indexer] = value instead\n",
148
+ "\n",
149
+ "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
150
+ " df['Word_x'] = df['Word_x'].astype(str).str.strip()\n",
151
+ " 29%|██▉ | 5/17 [00:26<01:15, 6.29s/it]/var/folders/4f/ddlj81h90_n0_h5wwvjbd2b40000gn/T/ipykernel_8164/298127518.py:12: SettingWithCopyWarning: \n",
152
+ "A value is trying to be set on a copy of a slice from a DataFrame.\n",
153
+ "Try using .loc[row_indexer,col_indexer] = value instead\n",
154
+ "\n",
155
+ "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
156
+ " df['Word_x'] = df['Word_x'].astype(str).str.strip()\n",
157
+ " 35%|███▌ | 6/17 [00:29<00:57, 5.25s/it]/var/folders/4f/ddlj81h90_n0_h5wwvjbd2b40000gn/T/ipykernel_8164/298127518.py:12: SettingWithCopyWarning: \n",
158
+ "A value is trying to be set on a copy of a slice from a DataFrame.\n",
159
+ "Try using .loc[row_indexer,col_indexer] = value instead\n",
160
+ "\n",
161
+ "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
162
+ " df['Word_x'] = df['Word_x'].astype(str).str.strip()\n",
163
+ " 41%|████ | 7/17 [00:30<00:38, 3.85s/it]/var/folders/4f/ddlj81h90_n0_h5wwvjbd2b40000gn/T/ipykernel_8164/298127518.py:12: SettingWithCopyWarning: \n",
164
+ "A value is trying to be set on a copy of a slice from a DataFrame.\n",
165
+ "Try using .loc[row_indexer,col_indexer] = value instead\n",
166
+ "\n",
167
+ "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
168
+ " df['Word_x'] = df['Word_x'].astype(str).str.strip()\n",
169
+ " 47%|████▋ | 8/17 [00:32<00:30, 3.43s/it]/var/folders/4f/ddlj81h90_n0_h5wwvjbd2b40000gn/T/ipykernel_8164/298127518.py:12: SettingWithCopyWarning: \n",
170
+ "A value is trying to be set on a copy of a slice from a DataFrame.\n",
171
+ "Try using .loc[row_indexer,col_indexer] = value instead\n",
172
+ "\n",
173
+ "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
174
+ " df['Word_x'] = df['Word_x'].astype(str).str.strip()\n",
175
+ " 53%|█████▎ | 9/17 [00:35<00:26, 3.30s/it]/var/folders/4f/ddlj81h90_n0_h5wwvjbd2b40000gn/T/ipykernel_8164/298127518.py:12: SettingWithCopyWarning: \n",
176
+ "A value is trying to be set on a copy of a slice from a DataFrame.\n",
177
+ "Try using .loc[row_indexer,col_indexer] = value instead\n",
178
+ "\n",
179
+ "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
180
+ " df['Word_x'] = df['Word_x'].astype(str).str.strip()\n",
181
+ " 76%|███████▋ | 13/17 [00:50<00:14, 3.51s/it]/var/folders/4f/ddlj81h90_n0_h5wwvjbd2b40000gn/T/ipykernel_8164/298127518.py:12: SettingWithCopyWarning: \n",
182
+ "A value is trying to be set on a copy of a slice from a DataFrame.\n",
183
+ "Try using .loc[row_indexer,col_indexer] = value instead\n",
184
+ "\n",
185
+ "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
186
+ " df['Word_x'] = df['Word_x'].astype(str).str.strip()\n",
187
+ "100%|██████████| 17/17 [01:06<00:00, 3.89s/it]\n"
188
  ]
189
  }
190
  ],
191
  "source": [
192
+ "\n",
193
+ "docs = []\n",
194
+ "nlp = spacy.blank(\"en\")\n",
195
+ "nlp.add_pipe(\"sentencizer\")\n",
196
+ "\n",
197
+ "for file in tqdm.tqdm(files):\n",
198
+ " df = pd.read_excel(file)\n",
199
+ " docs = docs+create_spacy_doc(df, nlp)"
200
  ]
201
  },
202
  {
203
  "cell_type": "code",
204
+ "execution_count": 40,
205
  "metadata": {},
206
  "outputs": [
207
  {
208
+ "data": {
209
+ "text/plain": [
210
+ "{'tokenized_text': ['In',\n",
211
+ " 'nomine',\n",
212
+ " 'Domini',\n",
213
+ " ',',\n",
214
+ " 'amen',\n",
215
+ " '.',\n",
216
+ " 'Ego',\n",
217
+ " 'Mauricius',\n",
218
+ " ',',\n",
219
+ " 'Dei',\n",
220
+ " 'gracia',\n",
221
+ " 'Parisiensis',\n",
222
+ " 'episcopus',\n",
223
+ " ',',\n",
224
+ " 'universitati',\n",
225
+ " 'presencium',\n",
226
+ " 'ac',\n",
227
+ " 'futurorum',\n",
228
+ " 'hujus',\n",
229
+ " 'pagine',\n",
230
+ " 'attestatione',\n",
231
+ " 'notificare',\n",
232
+ " 'curamus',\n",
233
+ " 'quod',\n",
234
+ " 'dominus',\n",
235
+ " 'Guido',\n",
236
+ " 'de',\n",
237
+ " 'Levies',\n",
238
+ " ',',\n",
239
+ " 'pia',\n",
240
+ " 'et',\n",
241
+ " 'honesta',\n",
242
+ " 'consideratione',\n",
243
+ " 'ductus',\n",
244
+ " ',',\n",
245
+ " 'ad',\n",
246
+ " 'edificandam',\n",
247
+ " 'quandam',\n",
248
+ " 'novellam',\n",
249
+ " 'plantationem',\n",
250
+ " ',',\n",
251
+ " 'amore',\n",
252
+ " 'Dei',\n",
253
+ " 'et',\n",
254
+ " 'remedio',\n",
255
+ " 'anime',\n",
256
+ " 'sue',\n",
257
+ " 'et',\n",
258
+ " 'animarum',\n",
259
+ " 'parentum',\n",
260
+ " 'predecessorum',\n",
261
+ " 'suorum',\n",
262
+ " ',',\n",
263
+ " 'fratribus',\n",
264
+ " 'ibi',\n",
265
+ " 'Deo',\n",
266
+ " 'servituris',\n",
267
+ " 'in',\n",
268
+ " 'perpetuam',\n",
269
+ " 'elemosinam',\n",
270
+ " 'donavit',\n",
271
+ " 'unam',\n",
272
+ " 'carrucam',\n",
273
+ " 'de',\n",
274
+ " 'terra',\n",
275
+ " 'quam',\n",
276
+ " 'emit',\n",
277
+ " 'des',\n",
278
+ " 'Fers',\n",
279
+ " 'Dasnois',\n",
280
+ " ',',\n",
281
+ " 'et',\n",
282
+ " 'de',\n",
283
+ " 'decima',\n",
284
+ " 'duas',\n",
285
+ " 'partes',\n",
286
+ " 'quas',\n",
287
+ " 'ab',\n",
288
+ " 'hiisdem',\n",
289
+ " 'emit',\n",
290
+ " ',',\n",
291
+ " 'et',\n",
292
+ " 'unam',\n",
293
+ " 'partem',\n",
294
+ " 'nemoris',\n",
295
+ " 'quantum',\n",
296
+ " 'semita',\n",
297
+ " 'dividit',\n",
298
+ " 'versus',\n",
299
+ " 'terram',\n",
300
+ " 'datam',\n",
301
+ " ';',\n",
302
+ " 'hanc',\n",
303
+ " 'elemosinam',\n",
304
+ " 'in',\n",
305
+ " 'manu',\n",
306
+ " 'nostra',\n",
307
+ " 'resignatam',\n",
308
+ " 'benigne',\n",
309
+ " 'tribuit',\n",
310
+ " '.',\n",
311
+ " 'Sciendum',\n",
312
+ " 'autem',\n",
313
+ " 'quod',\n",
314
+ " 'de',\n",
315
+ " 'hac',\n",
316
+ " 'elemosina',\n",
317
+ " 'investivimus',\n",
318
+ " 'Guidonem',\n",
319
+ " ',',\n",
320
+ " 'quondam',\n",
321
+ " 'presbiterum',\n",
322
+ " 'de',\n",
323
+ " 'Meencort',\n",
324
+ " ',',\n",
325
+ " 'pro',\n",
326
+ " 'se',\n",
327
+ " 'et',\n",
328
+ " 'pro',\n",
329
+ " 'aliis',\n",
330
+ " 'ibi',\n",
331
+ " 'Deo',\n",
332
+ " 'se',\n",
333
+ " 'reddituris',\n",
334
+ " '.',\n",
335
+ " 'Actum',\n",
336
+ " 'apud',\n",
337
+ " 'Sanctum',\n",
338
+ " 'Victorem',\n",
339
+ " ',',\n",
340
+ " 'astantibus',\n",
341
+ " 'Petro',\n",
342
+ " ',',\n",
343
+ " 'precentore',\n",
344
+ " 'Parisiensi',\n",
345
+ " ';',\n",
346
+ " 'Nicholao',\n",
347
+ " ',',\n",
348
+ " 'presbitero',\n",
349
+ " ';',\n",
350
+ " 'Philippo',\n",
351
+ " ',',\n",
352
+ " 'canonico',\n",
353
+ " ';',\n",
354
+ " 'Haimerico',\n",
355
+ " ',',\n",
356
+ " 'capellano',\n",
357
+ " 'nostro',\n",
358
+ " ';',\n",
359
+ " 'Enardo',\n",
360
+ " ',',\n",
361
+ " 'presbitero',\n",
362
+ " 'de',\n",
363
+ " 'Balneolis',\n",
364
+ " ';',\n",
365
+ " 'fratre',\n",
366
+ " 'Stephano',\n",
367
+ " 'de',\n",
368
+ " 'Monte-Fermeolo',\n",
369
+ " ';',\n",
370
+ " 'incarnationis',\n",
371
+ " 'dominice',\n",
372
+ " 'anno',\n",
373
+ " 'millesimo',\n",
374
+ " 'centesimo',\n",
375
+ " 'XCº',\n",
376
+ " ',',\n",
377
+ " 'episcopatus',\n",
378
+ " 'nostri',\n",
379
+ " 'tricesimo',\n",
380
+ " 'sexto',\n",
381
+ " '.'],\n",
382
+ " 'spans': [{'text': 'Mauricius', 'label': 'PERSON', 'start': 7, 'end': 8},\n",
383
+ " {'text': 'Parisiensis', 'label': 'LOC', 'start': 11, 'end': 12},\n",
384
+ " {'text': 'Guido de Levies', 'label': 'PERSON', 'start': 25, 'end': 28},\n",
385
+ " {'text': 'Fers Dasnois', 'label': 'LOC', 'start': 68, 'end': 70},\n",
386
+ " {'text': 'Guidonem', 'label': 'PERSON', 'start': 108, 'end': 109},\n",
387
+ " {'text': 'Meencort', 'label': 'LOC', 'start': 113, 'end': 114},\n",
388
+ " {'text': 'Sanctum Victorem', 'label': 'LOC', 'start': 127, 'end': 129},\n",
389
+ " {'text': 'Petro', 'label': 'PERSON', 'start': 131, 'end': 132},\n",
390
+ " {'text': 'Parisiensi', 'label': 'LOC', 'start': 134, 'end': 135},\n",
391
+ " {'text': 'Nicholao', 'label': 'PERSON', 'start': 136, 'end': 137},\n",
392
+ " {'text': 'Philippo', 'label': 'PERSON', 'start': 140, 'end': 141},\n",
393
+ " {'text': 'Haimerico', 'label': 'PERSON', 'start': 144, 'end': 145},\n",
394
+ " {'text': 'Enardo', 'label': 'PERSON', 'start': 149, 'end': 150},\n",
395
+ " {'text': 'Balneolis', 'label': 'LOC', 'start': 153, 'end': 154},\n",
396
+ " {'text': 'Stephano de Monte-Fermeolo',\n",
397
+ " 'label': 'PERSON',\n",
398
+ " 'start': 156,\n",
399
+ " 'end': 159}]}"
400
+ ]
401
+ },
402
+ "execution_count": 40,
403
+ "metadata": {},
404
+ "output_type": "execute_result"
405
+ }
406
+ ],
407
+ "source": [
408
+ "hf_docs = []\n",
409
+ "hf_docs_sents = []\n",
410
+ "for doc in docs:\n",
411
+ " tokenized_text = [token.text for token in doc]\n",
412
+ " spans = [{\"text\": span.text, \"label\": span.label_, \"start\": span.start, \"end\": span.end} for span in doc.spans[\"sc\"]]\n",
413
+ " hf_docs.append({\"tokenized_text\": tokenized_text, \"spans\": spans})\n",
414
+ " for sent in doc.sents:\n",
415
+ " sent_doc = sent.as_doc() # Create a new Doc from the sentence\n",
416
+ " \n",
417
+ " # Create a list of spans that are within the boundaries of this sentence\n",
418
+ " sent_spans = [\n",
419
+ " {\"start\": span.start - sent.start, \"end\": span.end - sent.start, \"label\": span.label_, \"text\": span.text}\n",
420
+ " for span in doc.spans[\"sc\"] if span.start >= sent.start and span.end <= sent.end\n",
421
+ " ]\n",
422
+ " \n",
423
+ " # Check if there are entities in the sentence, if yes, add to hf_docs_sents\n",
424
+ " if sent_spans:\n",
425
+ " hf_docs_sents.append({\n",
426
+ " \"tokenized_text\": [token.text for token in sent],\n",
427
+ " \"spans\": sent_spans,\n",
428
+ " \"ms\": {} # Assuming 'ms' should be some metadata, define or update accordingly\n",
429
+ " })\n",
430
+ "hf_docs[0]"
431
+ ]
432
+ },
433
+ {
434
+ "cell_type": "code",
435
+ "execution_count": 33,
436
+ "metadata": {},
437
+ "outputs": [
438
+ {
439
+ "data": {
440
+ "text/plain": [
441
+ "15141"
442
+ ]
443
+ },
444
+ "execution_count": 33,
445
+ "metadata": {},
446
+ "output_type": "execute_result"
447
  }
448
  ],
449
  "source": [
450
+ "len(hf_docs_sents)"
451
+ ]
452
+ },
453
+ {
454
+ "cell_type": "code",
455
+ "execution_count": 34,
456
+ "metadata": {},
457
+ "outputs": [],
458
+ "source": [
459
+ "import srsly"
460
+ ]
461
+ },
462
+ {
463
+ "cell_type": "code",
464
+ "execution_count": 35,
465
+ "metadata": {},
466
+ "outputs": [],
467
+ "source": [
468
+ "srsly.write_jsonl(\"home-alcar-ner.jsonl\", hf_docs)"
469
  ]
470
  },
471
  {
472
  "cell_type": "code",
473
+ "execution_count": 39,
474
  "metadata": {},
475
  "outputs": [],
476
  "source": [
477
+ "srsly.write_jsonl(\"home-alcar-ner-sents.jsonl\", hf_docs_sents)"
478
  ]
479
  },
480
  {