mahdiyehebrahimi commited on
Commit
7f781bc
1 Parent(s): f4f86da

Update nerutc.py

Browse files
Files changed (1) hide show
  1. nerutc.py +39 -36
nerutc.py CHANGED
@@ -1,30 +1,30 @@
1
  import csv
 
2
 
3
  import datasets
4
 
5
-
6
  logger = datasets.logging.get_logger(__name__)
7
 
8
- _CITATION = """Citation"""
9
 
10
- _DESCRIPTION = """Description"""
11
 
12
  _DOWNLOAD_URLS = {
13
- "train": "Splitdataset\nerutc_train.csv",
14
- "test": "Splitdataset\nerutc_test.csv",
15
  }
16
 
17
 
18
- class DatasetNameConfig(datasets.BuilderConfig):
19
  def __init__(self, **kwargs):
20
- super(DatasetNameConfig, self).__init__(**kwargs)
21
 
22
 
23
- class DatasetName(datasets.GeneratorBasedBuilder):
24
  BUILDER_CONFIGS = [
25
- DatasetNameConfig(
26
- name="nerutc",
27
- version=datasets.Version("1.1.1"),
28
  description=_DESCRIPTION,
29
  ),
30
  ]
@@ -35,21 +35,28 @@ class DatasetName(datasets.GeneratorBasedBuilder):
35
  features=datasets.Features(
36
  {
37
  "tokens": datasets.Sequence(datasets.Value("string")),
38
- # TODO YOU SHOULD PUT THE EXTRACTED UNIQUE TAGS IN YOUR DATASET HERE. THIS LIST IS JUST AN EXAMPLE
39
- """
40
- To extract unique tags from a pandas dataframe use this code and paste the output list below.
41
-
42
- ```python
43
- unique_tags = df["TAGS_COLUMN_NAME"].explode().unique()
44
- print(unique_tags)
45
- ```
46
- """
47
- "ner_tags": datasets.Sequence( # USE `pos_tags`, `ner_tags`, `chunk_tags`, etc.
48
- datasets.features.ClassLabel(names=['O' 'B-UNI' 'I-UNI']) # TODO
 
 
 
 
 
 
 
49
  ),
50
  }
51
  ),
52
- homepage="PUT PATH TO THE ORIGINAL DATASET HOME PAGE HERE (OPTIONAL BUT RECOMMENDED)",
53
  citation=_CITATION,
54
  )
55
 
@@ -62,28 +69,24 @@ class DatasetName(datasets.GeneratorBasedBuilder):
62
  test_path = dl_manager.download_and_extract(_DOWNLOAD_URLS["test"])
63
 
64
  return [
65
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
66
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}),
 
 
 
 
67
  ]
68
 
69
- # TODO
70
  def _generate_examples(self, filepath):
71
- """
72
- Per each file_path read the csv file and iterate it.
73
- For each row yield a tuple of (id, {"tokens": ..., "tags": ..., ...})
74
- Each call to this method yields an output like below:
75
- ```
76
- (124, {"tokens": ["hello", "world"], "pos_tags": ["NOUN", "NOUN"]})
77
- ```
78
- """
79
  logger.info("⏳ Generating examples from = %s", filepath)
80
  with open(filepath, encoding="utf-8") as csv_file:
81
  csv_reader = csv.reader(csv_file, quotechar='"', skipinitialspace=True)
82
 
83
- # Uncomment below line to skip the first row if your csv file has a header row
84
- # next(csv_reader, None)
85
 
86
  for id_, row in enumerate(csv_reader):
87
  tokens, ner_tags = row
88
  # Optional preprocessing here
 
 
89
  yield id_, {"tokens": tokens, "ner_tags": ner_tags}
 
1
  import csv
2
+ from ast import literal_eval
3
 
4
  import datasets
5
 
 
6
  logger = datasets.logging.get_logger(__name__)
7
 
8
+ _CITATION = """"""
9
 
10
+ _DESCRIPTION = """"""
11
 
12
  _DOWNLOAD_URLS = {
13
+ "train": "https://huggingface.co/datasets/hezarai/parstwiner/resolve/main/parstwiner_train.csv",
14
+ "test": "https://huggingface.co/datasets/hezarai/parstwiner/resolve/main/parstwiner_test.csv",
15
  }
16
 
17
 
18
+ class ParsTwiNERConfig(datasets.BuilderConfig):
19
  def __init__(self, **kwargs):
20
+ super(ParsTwiNERConfig, self).__init__(**kwargs)
21
 
22
 
23
+ class ParsTwiNER(datasets.GeneratorBasedBuilder):
24
  BUILDER_CONFIGS = [
25
+ ParsTwiNERConfig(
26
+ name="ParsTwiNER",
27
+ version=datasets.Version("1.0.0"),
28
  description=_DESCRIPTION,
29
  ),
30
  ]
 
35
  features=datasets.Features(
36
  {
37
  "tokens": datasets.Sequence(datasets.Value("string")),
38
+ "ner_tags": datasets.Sequence(
39
+ datasets.features.ClassLabel(
40
+ names=[
41
+ "O",
42
+ "B-POG",
43
+ "I-POG",
44
+ "B-PER",
45
+ "I-PER",
46
+ "B-ORG",
47
+ "I-ORG",
48
+ "B-NAT",
49
+ "I-NAT",
50
+ "B-LOC",
51
+ "I-LOC",
52
+ "B-EVE",
53
+ "I-EVE",
54
+ ]
55
+ )
56
  ),
57
  }
58
  ),
59
+ homepage="https://github.com/overfit-ir/parstwiner",
60
  citation=_CITATION,
61
  )
62
 
 
69
  test_path = dl_manager.download_and_extract(_DOWNLOAD_URLS["test"])
70
 
71
  return [
72
+ datasets.SplitGenerator(
73
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}
74
+ ),
75
+ datasets.SplitGenerator(
76
+ name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}
77
+ ),
78
  ]
79
 
 
80
  def _generate_examples(self, filepath):
 
 
 
 
 
 
 
 
81
  logger.info("⏳ Generating examples from = %s", filepath)
82
  with open(filepath, encoding="utf-8") as csv_file:
83
  csv_reader = csv.reader(csv_file, quotechar='"', skipinitialspace=True)
84
 
85
+ next(csv_reader, None)
 
86
 
87
  for id_, row in enumerate(csv_reader):
88
  tokens, ner_tags = row
89
  # Optional preprocessing here
90
+ tokens = literal_eval(tokens)
91
+ ner_tags = literal_eval(ner_tags)
92
  yield id_, {"tokens": tokens, "ner_tags": ner_tags}