varora commited on
Commit
adc7158
1 Parent(s): 78aa55e

update hit.py

Browse files
Files changed (1) hide show
  1. hit.py +9 -4
hit.py CHANGED
@@ -50,6 +50,7 @@ _LICENSE = "see https://huggingface.co/datasets/varora/HIT/blob/main/README.md"
50
  # TODO: Add link to the official dataset URLs here
51
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
52
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
 
53
  _PATHS = {
54
  "male": "/male",
55
  "female": "/female",
@@ -111,14 +112,17 @@ class NewDataset(datasets.GeneratorBasedBuilder):
111
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
112
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
113
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
114
- data_urls = _PATHS[self.config.name]
 
 
115
  archive_paths = dl_manager.download(data_urls)
 
116
  return [
117
  datasets.SplitGenerator(
118
  name=datasets.Split.TRAIN,
119
  # These kwargs will be passed to _generate_examples
120
  gen_kwargs={
121
- "filepath": os.path.join(data_urls, "train"),
122
  "split": "train",
123
  },
124
  ),
@@ -126,7 +130,7 @@ class NewDataset(datasets.GeneratorBasedBuilder):
126
  name=datasets.Split.VALIDATION,
127
  # These kwargs will be passed to _generate_examples
128
  gen_kwargs={
129
- "filepath": os.path.join(data_urls, "val"),
130
  "split": "validation",
131
  },
132
  ),
@@ -134,7 +138,7 @@ class NewDataset(datasets.GeneratorBasedBuilder):
134
  name=datasets.Split.TEST,
135
  # These kwargs will be passed to _generate_examples
136
  gen_kwargs={
137
- "filepath": os.path.join(data_urls, "test"),
138
  "split": "test"
139
  },
140
  ),
@@ -145,6 +149,7 @@ class NewDataset(datasets.GeneratorBasedBuilder):
145
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
146
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
147
  # List all files in the path .gz
 
148
  file_paths = []
149
  for root, dirs, files in os.walk(filepath):
150
  for file in files:
 
50
  # TODO: Add link to the official dataset URLs here
51
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
52
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
53
+ _BASE_URL = "https://huggingface.co/datasets/varora/HIT/tree/main"
54
  _PATHS = {
55
  "male": "/male",
56
  "female": "/female",
 
112
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
113
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
114
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
115
+ rel_path = _PATHS[self.config.name]
116
+ data_urls = os.path.join(_BASE_URL, rel_path)
117
+ print(f"data url: {data_urls}")
118
  archive_paths = dl_manager.download(data_urls)
119
+ print*a
120
  return [
121
  datasets.SplitGenerator(
122
  name=datasets.Split.TRAIN,
123
  # These kwargs will be passed to _generate_examples
124
  gen_kwargs={
125
+ "filepath": os.path.join(rel_path, "train"),
126
  "split": "train",
127
  },
128
  ),
 
130
  name=datasets.Split.VALIDATION,
131
  # These kwargs will be passed to _generate_examples
132
  gen_kwargs={
133
+ "filepath": os.path.join(rel_path, "val"),
134
  "split": "validation",
135
  },
136
  ),
 
138
  name=datasets.Split.TEST,
139
  # These kwargs will be passed to _generate_examples
140
  gen_kwargs={
141
+ "filepath": os.path.join(rel_path, "test"),
142
  "split": "test"
143
  },
144
  ),
 
149
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
150
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
151
  # List all files in the path .gz
152
+ print(f"file path {filepath}")
153
  file_paths = []
154
  for root, dirs, files in os.walk(filepath):
155
  for file in files: