Datasets:
Tasks:
Token Classification
Languages:
English
Size:
10K<n<100K
Tags:
Not-For-All-Audiences
License:
normalize_tags: warn on caption decoding error
Browse files- normalize_tags.py +14 -9
normalize_tags.py
CHANGED
@@ -257,18 +257,23 @@ def process_directory(
|
|
257 |
blacklist_instances = 0
|
258 |
implied_instances = 0
|
259 |
for file in tqdm(files):
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
orig_tags = tags
|
|
|
|
|
|
|
|
|
|
|
|
|
268 |
|
269 |
# Convert tags to ids, separate implied tags
|
270 |
tags = [RE_ESCAPES.sub("", t.lower().replace(" ", "_")) for t in tags]
|
271 |
-
original_len = len(tags)
|
272 |
|
273 |
# Encode to integer ids and strip implied tags
|
274 |
tags, implied = tagset_normalizer.encode(
|
|
|
257 |
blacklist_instances = 0
|
258 |
implied_instances = 0
|
259 |
for file in tqdm(files):
|
260 |
+
try:
|
261 |
+
with open(file, "rt", encoding="utf-8") as fd:
|
262 |
+
content = fd.read()
|
263 |
+
except ValueError as e:
|
264 |
+
logging.warning('Failed to read "%s": %s', file, e)
|
265 |
+
continue
|
266 |
+
|
267 |
+
orig_tags = tags = []
|
268 |
+
for chunk in RE_SEP.split(content):
|
269 |
+
chunk = chunk.strip()
|
270 |
+
if not chunk:
|
271 |
+
continue
|
272 |
+
tags.append(chunk)
|
273 |
+
original_len = len(tags)
|
274 |
|
275 |
# Convert tags to ids, separate implied tags
|
276 |
tags = [RE_ESCAPES.sub("", t.lower().replace(" ", "_")) for t in tags]
|
|
|
277 |
|
278 |
# Encode to integer ids and strip implied tags
|
279 |
tags, implied = tagset_normalizer.encode(
|