muhammadravi251001 commited on
Commit
57333c5
1 Parent(s): e07bf43

Delete idkmrc-nli.py

Browse files
Files changed (1) hide show
  1. idkmrc-nli.py +0 -109
idkmrc-nli.py DELETED
@@ -1,109 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """TODO: Add a description here."""
16
-
17
-
18
- import json
19
- import csv
20
- import datasets
21
-
22
-
23
- # TODO: Add BibTeX citation
24
- # Find for instance the citation on arxiv or on the dataset repo/website
25
- _CITATION = """\
26
- """
27
-
28
- # TODO: Add description of the dataset here
29
- # You can copy an official description
30
- _DESCRIPTION = """\
31
- The IDKMRC-NLI dataset is derived from the IDKMRC question answering dataset, utilizing named entity recognition (NER), chunking tags, Regex, and embedding similarity techniques to determine its contradiction sets.
32
- Collected through this process, the dataset comprises various columns beyond premise, hypothesis, and label, including properties aligned with NER and chunking tags.
33
- This dataset is designed to facilitate Natural Language Inference (NLI) tasks and contains information extracted from diverse sources to provide comprehensive coverage.
34
- Each data instance encapsulates premise, hypothesis, label, and additional properties pertinent to NLI evaluation.
35
- """
36
-
37
- # TODO: Add a link to an official homepage for the dataset here
38
- _HOMEPAGE = "https://huggingface.co/datasets/muhammadravi251001/idkmrc-nli"
39
-
40
- # TODO: Add the licence for the dataset here if you can find it
41
- _LICENSE = """
42
- """
43
-
44
- _TRAIN_DOWNLOAD_URL = "https://huggingface.co/datasets/muhammadravi251001/idkmrc-nli/resolve/main/idk-mrc_nli_train_df.csv?download=true"
45
- _VALID_DOWNLOAD_URL = "https://huggingface.co/datasets/muhammadravi251001/idkmrc-nli/raw/main/idk-mrc_nli_val_df.csv"
46
- _TEST_DOWNLOAD_URL = "https://huggingface.co/datasets/muhammadravi251001/idkmrc-nli/raw/main/idk-mrc_nli_test_df.csv"
47
-
48
- class IDKMRCNLIConfig(datasets.BuilderConfig):
49
- """BuilderConfig for IDKMRC-NLI Config"""
50
-
51
- def __init__(self, **kwargs):
52
- """BuilderConfig for IDKMRC-NLI Config.
53
- Args:
54
- **kwargs: keyword arguments forwarded to super.
55
- """
56
- super(IDKMRCNLIConfig, self).__init__(**kwargs)
57
-
58
- class IDKMRCNLI(datasets.GeneratorBasedBuilder):
59
- """IDKMRC-NLI dataset -- Syntethic NLI dataset derived from QA dataset
60
- utilizing named entity recognition (NER), chunking tags, Regex, and embedding similarity
61
- techniques to determine its contradiction sets"""
62
-
63
- BUILDER_CONFIGS = [
64
- IDKMRCNLIConfig(
65
- name="idkmrc-nli",
66
- version=datasets.Version("1.1.0"),
67
- description="IDKMRC-NLI: Syntethic NLI dataset derived from QA dataset utilizing named entity recognition (NER), chunking tags, Regex, and embedding similarity techniques to determine its contradiction sets",
68
- ),
69
- ]
70
-
71
- def _info(self):
72
-
73
- return datasets.DatasetInfo(
74
- description=_DESCRIPTION,
75
- features=datasets.Features(
76
- {
77
- "premise": datasets.Value("string"),
78
- "hypothesis": datasets.Value("string"),
79
- "label": datasets.ClassLabel(names=["entailment", "neutral", "contradiction"]),
80
- }
81
- ),
82
- supervised_keys=None,
83
- homepage=_HOMEPAGE,
84
- license=_LICENSE,
85
- citation=_CITATION,
86
- )
87
-
88
- def _split_generators(self, dl_manager):
89
- """Returns SplitGenerators."""
90
- train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
91
- valid_path = dl_manager.download_and_extract(_VALID_DOWNLOAD_URL)
92
- test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)
93
-
94
- return [
95
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
96
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": valid_path}),
97
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}),
98
- ]
99
-
100
- def _generate_examples(self, filepath):
101
- """Yields examples."""
102
- with open(filepath, encoding="utf-8") as csv_file:
103
- csv_reader = csv.DictReader(csv_file)
104
- for id_, row in enumerate(csv_reader):
105
- yield id_, {
106
- "premise": row["premise"],
107
- "hypothesis": row["hypothesis"],
108
- "label": row["label"]
109
- }