Datasets:
Tasks:
Token Classification
Modalities:
Text
Formats:
json
Sub-tasks:
named-entity-recognition
Languages:
Turkish
Size:
10K - 100K
License:
BayanDuygu
commited on
Commit
•
30a1ef7
1
Parent(s):
fcfbf7f
Delete turkish-wikiNER.py
Browse files- turkish-wikiNER.py +0 -85
turkish-wikiNER.py
DELETED
@@ -1,85 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
from itertools import chain
|
3 |
-
import datasets
|
4 |
-
|
5 |
-
logger = datasets.logging.get_logger(__name__)
|
6 |
-
_DESCRIPTION = """\
|
7 |
-
General Purpose Turkish NER dataset. 19 labels and 20.000 instances at total. \
|
8 |
-
[Turkish Wiki NER dataset](https://github.com/turkish-nlp-suite/Turkish-Wiki-NER-Dataset)
|
9 |
-
"""
|
10 |
-
_NAME = "turkish-WikiNER"
|
11 |
-
|
12 |
-
_VERSION = "1.0.0"
|
13 |
-
|
14 |
-
_CITATION = """\
|
15 |
-
@inproceedings{altinok-2023-diverse,
|
16 |
-
title = "A Diverse Set of Freely Available Linguistic Resources for {T}urkish",
|
17 |
-
author = "Altinok, Duygu",
|
18 |
-
booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
|
19 |
-
month = jul,
|
20 |
-
year = "2023",
|
21 |
-
address = "Toronto, Canada",
|
22 |
-
publisher = "Association for Computational Linguistics",
|
23 |
-
url = "https://aclanthology.org/2023.acl-long.768",
|
24 |
-
pages = "13739--13750",
|
25 |
-
abstract = "This study presents a diverse set of freely available linguistic resources for Turkish natural language processing, including corpora, pretrained models and education material. Although Turkish is spoken by a sizeable population of over 80 million people, Turkish linguistic resources for natural language processing remain scarce. In this study, we provide corpora to allow practitioners to build their own applications and pretrained models that would assist industry researchers in creating quick prototypes. The provided corpora include named entity recognition datasets of diverse genres, including Wikipedia articles and supplement products customer reviews. In addition, crawling e-commerce and movie reviews websites, we compiled several sentiment analysis datasets of different genres. Our linguistic resources for Turkish also include pretrained spaCy language models. To the best of our knowledge, our models are the first spaCy models trained for the Turkish language. Finally, we provide various types of education material, such as video tutorials and code examples, that can support the interested audience on practicing Turkish NLP. The advantages of our linguistic resources are three-fold: they are freely available, they are first of their kind, and they are easy to use in a broad range of implementations. Along with a thorough description of the resource creation process, we also explain the position of our resources in the Turkish NLP world.",
|
26 |
-
}
|
27 |
-
"""
|
28 |
-
|
29 |
-
_HOME_PAGE = "https://github.com/turkish-nlp-suite/Turkish-Wiki-NER-Dataset"
|
30 |
-
_URL = f'https://huggingface.co/datasets/turkish-nlp-suite/{_NAME}/raw/main/dataset'
|
31 |
-
_URLS = {
|
32 |
-
str(datasets.Split.TEST): [f'{_URL}/test.json'],
|
33 |
-
str(datasets.Split.TRAIN): [f'{_URL}/train.json'],
|
34 |
-
str(datasets.Split.VALIDATION): [f'{_URL}/valid.json'],
|
35 |
-
}
|
36 |
-
|
37 |
-
|
38 |
-
class TurkishWikiNERConfig(datasets.BuilderConfig):
|
39 |
-
"""BuilderConfig"""
|
40 |
-
|
41 |
-
def __init__(self, **kwargs):
|
42 |
-
"""BuilderConfig.
|
43 |
-
Args:
|
44 |
-
**kwargs: keyword arguments forwarded to super.
|
45 |
-
"""
|
46 |
-
super(TurkishWikiNERConfig, self).__init__(**kwargs)
|
47 |
-
|
48 |
-
|
49 |
-
class TurkishWikiNER(datasets.GeneratorBasedBuilder):
|
50 |
-
"""Dataset."""
|
51 |
-
|
52 |
-
BUILDER_CONFIGS = [
|
53 |
-
TurkishWikiNERConfig(name=_NAME, version=datasets.Version(_VERSION), description=_DESCRIPTION),
|
54 |
-
]
|
55 |
-
|
56 |
-
def _split_generators(self, dl_manager):
|
57 |
-
downloaded_file = dl_manager.download_and_extract(_URLS)
|
58 |
-
return [datasets.SplitGenerator(name=i, gen_kwargs={"filepaths": downloaded_file[str(i)]})
|
59 |
-
for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]]
|
60 |
-
|
61 |
-
def _generate_examples(self, filepaths):
|
62 |
-
_key = 0
|
63 |
-
for filepath in filepaths:
|
64 |
-
logger.info(f"generating examples from = {filepath}")
|
65 |
-
with open(filepath, encoding="utf-8") as f:
|
66 |
-
_list = [i for i in f.read().split('\n') if len(i) > 0]
|
67 |
-
for i in _list:
|
68 |
-
data = json.loads(i)
|
69 |
-
yield _key, data
|
70 |
-
_key += 1
|
71 |
-
|
72 |
-
def _info(self):
|
73 |
-
return datasets.DatasetInfo(
|
74 |
-
description=_DESCRIPTION,
|
75 |
-
features=datasets.Features(
|
76 |
-
{
|
77 |
-
"tokens": datasets.Sequence(datasets.Value("string")),
|
78 |
-
"tags": datasets.Sequence(datasets.Value("string")),
|
79 |
-
}
|
80 |
-
),
|
81 |
-
supervised_keys=None,
|
82 |
-
homepage=_HOME_PAGE,
|
83 |
-
citation=_CITATION,
|
84 |
-
)
|
85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|