aabidk commited on
Commit
994249b
1 Parent(s): d6dccac

Upload dataset

Browse files
Files changed (2) hide show
  1. README.md +92 -0
  2. generate_dataset_old.py +134 -0
README.md ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - crowdsourced
4
+ language_creators:
5
+ - found
6
+ language:
7
+ - en
8
+ license:
9
+ - cc-by-4.0
10
+ multilinguality:
11
+ - monolingual
12
+ size_categories:
13
+ - 1K<n<10K
14
+ source_datasets:
15
+ - original
16
+ task_categories:
17
+ - token-classification
18
+ task_ids:
19
+ - named-entity-recognition
20
+ paperswithcode_id: maps-token-classification
21
+ pretty_name: Maps Token Classification
22
+ dataset_info:
23
+ features:
24
+ - name: id
25
+ dtype: string
26
+ - name: tokens
27
+ sequence: string
28
+ - name: ner_tags
29
+ sequence:
30
+ class_label:
31
+ names:
32
+ '0': O
33
+ '1': B-zoomIn
34
+ '2': I-zoomIn
35
+ '3': B-zoomOut
36
+ '4': I-zoomOut
37
+ '5': B-panLeft
38
+ '6': B-panRight
39
+ '7': B-panUp
40
+ '8': B-panDown
41
+ '9': B-goTo
42
+ '10': B-location
43
+ '11': I-location
44
+ '12': B-negation
45
+ '13': B-layer
46
+ '14': I-layer
47
+ config_name: maps-token-classification
48
+ splits:
49
+ - name: train
50
+ num_examples: 146
51
+ - name: validation
52
+ num_examples: 20
53
+ - name: test
54
+ num_examples: 20
55
+ ---
56
+
57
+ # Dataset Card for "maps-token-classification"
58
+
59
+ ## Table of Contents
60
+ - [Dataset Description](#dataset-description)
61
+ - [Dataset Summary](#dataset-summary)
62
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
63
+ - [Languages](#languages)
64
+ - [Dataset Structure](#dataset-structure)
65
+ - [Data Instances](#data-instances)
66
+ - [Data Fields](#data-fields)
67
+ - [Data Splits](#data-splits)
68
+ - [Dataset Creation](#dataset-creation)
69
+ - [Curation Rationale](#curation-rationale)
70
+ - [Source Data](#source-data)
71
+ - [Annotations](#annotations)
72
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
73
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
74
+ - [Social Impact of Dataset](#social-impact-of-dataset)
75
+ - [Discussion of Biases](#discussion-of-biases)
76
+ - [Other Known Limitations](#other-known-limitations)
77
+ - [Additional Information](#additional-information)
78
+ - [Dataset Curators](#dataset-curators)
79
+ - [Licensing Information](#licensing-information)
80
+ - [Citation Information](#citation-information)
81
+ - [Contributions](#contributions)
82
+
83
+ ### Dataset Summary
84
+
85
+ Maps Token Classification Dataset
86
+
87
+ This dataset is designed for token classification tasks in the context of maps applications.
88
+ It includes categories for actions, layers, locations, and modifiers.
89
+
90
+ ### Supported Tasks and Leaderboards
91
+
92
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
generate_dataset_old.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Lint as: python3
3
+ """The Maps Token Classification Dataset."""
4
+
5
+ import datasets
6
+ logger = datasets.logging.get_logger(__name__)
7
+
8
+ _CITATION = """\
9
+ @misc{maps_token_classification,
10
+ title = {Maps Token Classification Dataset},
11
+ author = {Your Name},
12
+ year = {2023},
13
+ publisher = {Your Institution},
14
+ }
15
+ """
16
+
17
+ _DESCRIPTION = """\
18
+ Maps Token Classification Dataset
19
+
20
+ This dataset is designed for token classification tasks in the context of maps applications.
21
+ It includes categories for actions, layers, locations, and modifiers.
22
+ """
23
+
24
+ _URL = "https://raw.githubusercontent.com/aabidk20/mapsVoiceDataset/main/"
25
+ _TRAINING_FILE = "maps_train.conll"
26
+ _DEV_FILE = "maps_dev.conll"
27
+ _TEST_FILE = "maps_test.conll"
28
+
29
+ class MapsTokenClassificationConfig(datasets.BuilderConfig):
30
+ """The Maps Token Classification Dataset."""
31
+
32
+ def __init__(self, **kwargs):
33
+ """BuilderConfig for Maps Token Classification.
34
+
35
+ Args:
36
+ **kwargs: keyword arguments forwarded to super.
37
+ """
38
+ super(MapsTokenClassificationConfig, self).__init__(**kwargs)
39
+
40
+ class MapsTokenClassification(datasets.GeneratorBasedBuilder):
41
+ """The Maps Token Classification Dataset."""
42
+
43
+ BUILDER_CONFIGS = [
44
+ MapsTokenClassificationConfig(
45
+ name="maps_token_classification", version=datasets.Version("1.0.0"), description="The Maps Token Classification Dataset"
46
+ ),
47
+ ]
48
+
49
+ def _info(self):
50
+ return datasets.DatasetInfo(
51
+ description=_DESCRIPTION,
52
+ features=datasets.Features(
53
+ {
54
+ "id": datasets.Value("string"),
55
+ "tokens": datasets.Sequence(datasets.Value("string")),
56
+ "ner_tags": datasets.Sequence(
57
+ datasets.features.ClassLabel(
58
+ names=[
59
+ "O",
60
+ "B-zoomIn",
61
+ "I-zoomIn",
62
+ "B-zoomOut",
63
+ "I-zoomOut",
64
+ "B-panLeft",
65
+ "B-panRight",
66
+ "B-panUp",
67
+ "B-panDown",
68
+ "B-goTo",
69
+ "B-location",
70
+ "I-location",
71
+ "B-negation",
72
+ "B-layer",
73
+ "I-layer",
74
+ ]
75
+ )
76
+ ),
77
+ }
78
+ ),
79
+ supervised_keys=None,
80
+ citation=_CITATION,
81
+ )
82
+
83
+ def _split_generators(self, dl_manager):
84
+ """Returns SplitGenerators."""
85
+ urls_to_download = {
86
+ "train": f"{_URL}{_TRAINING_FILE}",
87
+ "dev": f"{_URL}{_DEV_FILE}",
88
+ "test": f"{_URL}{_TEST_FILE}",
89
+ }
90
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
91
+
92
+ return [
93
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
94
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
95
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
96
+ ]
97
+
98
+ def _generate_examples(self, filepath):
99
+ logger.info("⏳ Generating examples from = %s", filepath)
100
+ with open(filepath, encoding="utf-8") as f:
101
+ current_tokens = []
102
+ current_labels = []
103
+ sentence_counter = 0
104
+ for row in f:
105
+ row = row.rstrip()
106
+ if row:
107
+ token, label = row.split("\t")
108
+ current_tokens.append(token)
109
+ current_labels.append(label)
110
+ else:
111
+ # New sentence
112
+ if not current_tokens:
113
+ # Consecutive empty lines will cause empty sentences
114
+ continue
115
+ assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
116
+ sentence = (
117
+ sentence_counter,
118
+ {
119
+ "id": str(sentence_counter),
120
+ "tokens": current_tokens,
121
+ "ner_tags": current_labels,
122
+ },
123
+ )
124
+ sentence_counter += 1
125
+ current_tokens = []
126
+ current_labels = []
127
+ yield sentence
128
+ # Don't forget last sentence in dataset 🧐
129
+ if current_tokens:
130
+ yield sentence_counter, {
131
+ "id": str(sentence_counter),
132
+ "tokens": current_tokens,
133
+ "ner_tags": current_labels,
134
+ }