shivam commited on
Commit
75781e0
·
1 Parent(s): ee9c4d9

Initial Commit

Browse files
Files changed (5) hide show
  1. data/text.en +3 -0
  2. data/text.hi +3 -0
  3. split-test.py +13 -7
  4. text.en +0 -3
  5. text.hi +0 -3
data/text.en ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {"word": "english"}
2
+ {"word": "english"}
3
+ {"word": "english"}
data/text.hi ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {"word": "hindi"}
2
+ {"word": "hindi"}
3
+ {"word": "hindi"}
split-test.py CHANGED
@@ -48,9 +48,15 @@ _LICENSE = ""
48
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
49
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
50
  _URLS = {
51
- "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
52
- "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
53
- }
 
 
 
 
 
 
54
 
55
  _LANGS = {
56
  "english": "en",
@@ -126,15 +132,15 @@ class SplitTest(datasets.GeneratorBasedBuilder):
126
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
127
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
128
 
129
- data_dir = "https://raw.githubusercontent.com/shivamm7/split-test/main/text." + _LANGS[self.config.name]
130
- archive_path = dl_manager.download(data_dir)
131
- print(archive_path)
132
  return [
133
  datasets.SplitGenerator(
134
  name=datasets.Split.TRAIN,
135
  # These kwargs will be passed to _generate_examples
136
  gen_kwargs={
137
- "filepath": archive_path,
138
  "split": "train",
139
  },
140
  )
 
48
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
49
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
50
  _URLS = {
51
+ "english":
52
+ {
53
+ "train": "./data/text.en"
54
+ },
55
+ "hindi":
56
+ {
57
+ "train": "./data/text.hi"
58
+ }
59
+ }
60
 
61
  _LANGS = {
62
  "english": "en",
 
132
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
133
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
134
 
135
+ down = _URLS[self.config.name]
136
+ file_path = dl_manager.download_and_extract(down)
137
+
138
  return [
139
  datasets.SplitGenerator(
140
  name=datasets.Split.TRAIN,
141
  # These kwargs will be passed to _generate_examples
142
  gen_kwargs={
143
+ "filepath": file_path,
144
  "split": "train",
145
  },
146
  )
text.en DELETED
@@ -1,3 +0,0 @@
1
- [{"word": "english"},
2
- {"word": "english"},
3
- {"word": "english"}]
 
 
 
 
text.hi DELETED
@@ -1,3 +0,0 @@
1
- [{"word": "hindi"},
2
- {"word": "hindi"},
3
- {"word": "hindi"}]