Datasets:
Tasks:
Token Classification
Sub-tasks:
named-entity-recognition
Languages:
Portuguese
Size:
n<1K
License:
Update files from the datasets library (from 1.6.1)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.6.1
harem.py
CHANGED
@@ -125,7 +125,7 @@ def reconstruct_text_from_tokens(tokens: List[Token], include_last_tail: bool =
|
|
125 |
|
126 |
|
127 |
def tokenize(text: str) -> Tuple[List[Token], List[int]]:
|
128 |
-
"""
|
129 |
doc_tokens = []
|
130 |
char_to_word_offset = []
|
131 |
|
@@ -248,7 +248,7 @@ class HAREM(datasets.GeneratorBasedBuilder):
|
|
248 |
]
|
249 |
|
250 |
def _generate_examples(self, filepath, split):
|
251 |
-
"""
|
252 |
|
253 |
logger.info("⏳ Generating examples from = %s", filepath)
|
254 |
|
|
|
125 |
|
126 |
|
127 |
def tokenize(text: str) -> Tuple[List[Token], List[int]]:
|
128 |
+
"""Perform whitespace and punctuation tokenization keeping track of char alignment"""
|
129 |
doc_tokens = []
|
130 |
char_to_word_offset = []
|
131 |
|
|
|
248 |
]
|
249 |
|
250 |
def _generate_examples(self, filepath, split):
|
251 |
+
"""Yields examples."""
|
252 |
|
253 |
logger.info("⏳ Generating examples from = %s", filepath)
|
254 |
|