varora commited on
Commit
c186c4c
1 Parent(s): a805815

changes to hit.py

Browse files
Files changed (1) hide show
  1. hit.py +8 -33
hit.py CHANGED
@@ -11,22 +11,14 @@
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
- # TODO: Address all TODOs and remove all explanatory comments
15
- """TODO: Add a description here."""
16
 
17
-
18
- import csv
19
  import json
20
  import os
21
- from glob import glob
22
  import datasets
23
  import pickle
24
- import requests
25
  import gzip
26
 
27
-
28
- # TODO: Add BibTeX citation
29
- # Find for instance the citation on arxiv or on the dataset repo/website
30
  _CITATION = """\
31
  @inproceedings{Keller:CVPR:2024,
32
  title = {{HIT}: Estimating Internal Human Implicit Tissues from the Body Surface},
@@ -38,18 +30,17 @@ _CITATION = """\
38
  month_numeric = {6}}
39
  """
40
 
41
- # TODO: Add description of the dataset here
42
- # You can copy an official description
43
  _DESCRIPTION = """\
44
  The HIT dataset is a structured dataset of paired observations of body's inner tissues and the body surface. More concretely, it is a dataset of paired full-body volumetric segmented (bones, lean, and adipose tissue) MRI scans and SMPL meshes capturing the body surface shape for male (N=157) and female (N=241) subjects respectively. This is relevant for medicine, sports science, biomechanics, and computer graphics as it can ease the creation of personalized anatomic digital twins that model our bones, lean, and adipose tissue."""
45
 
46
- # TODO: Add a link to an official homepage for the dataset here
47
  _HOMEPAGE = "https://hit.is.tue.mpg.de/"
48
 
49
- # TODO: Add the licence for the dataset here if you can find it
50
  _LICENSE = "see https://huggingface.co/datasets/varora/HIT/blob/main/README.md"
51
 
52
- # TODO: Add link to the official dataset URLs here
53
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
54
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
55
  _BASE_URL = "https://huggingface.co/datasets/varora/HIT/tree/main"
@@ -58,10 +49,7 @@ _PATHS = {
58
  "female": "/female",
59
  }
60
 
61
- # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
62
- class NewDataset(datasets.GeneratorBasedBuilder):
63
- """TODO: Short description of my dataset."""
64
-
65
  VERSION = datasets.Version("1.1.0")
66
 
67
  # This is an example of a dataset with multiple configurations.
@@ -123,7 +111,7 @@ class NewDataset(datasets.GeneratorBasedBuilder):
123
  )
124
 
125
  def _split_generators(self, dl_manager):
126
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
127
  # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
128
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
129
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
@@ -131,7 +119,6 @@ class NewDataset(datasets.GeneratorBasedBuilder):
131
  splits = ["train", "val", "test"]
132
 
133
  gender = self.config.name
134
- #data_urls = _BASE_URL + rel_path
135
  print(f"Config: {gender}")
136
 
137
  file_structure_url = "hit_dataset.json"
@@ -174,23 +161,11 @@ class NewDataset(datasets.GeneratorBasedBuilder):
174
 
175
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
176
  def _generate_examples(self, filepath, split):
177
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
178
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
179
  # List all files in the path .gz
180
  for subject_path in filepath:
181
  with gzip.open(subject_path, 'rb') as f:
182
  data = pickle.load(f)
183
- print(data.keys())
184
- print(f"pc shape: {data['body_cont_pc'].shape}")
185
- print(f"mri_seg shape: {data['mri_seg'].shape}")
186
- print(f"mri_seg_dict shape: {data['body_mask'].shape}")
187
  key = data['subject_ID']
188
- #del data['body_mask'] # nan
189
- #del data['smpl_dict']
190
- #del data['mri_seg'] # reshape
191
- #del data['mri_labels']
192
- #del data['resolution']
193
- #del data['center']
194
- #del data['body_cont_pc'] # nan
195
-
196
  yield key, data
 
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
 
 
14
 
 
 
15
  import json
16
  import os
 
17
  import datasets
18
  import pickle
 
19
  import gzip
20
 
21
+ # citation
 
 
22
  _CITATION = """\
23
  @inproceedings{Keller:CVPR:2024,
24
  title = {{HIT}: Estimating Internal Human Implicit Tissues from the Body Surface},
 
30
  month_numeric = {6}}
31
  """
32
 
33
+ # description
 
34
  _DESCRIPTION = """\
35
  The HIT dataset is a structured dataset of paired observations of body's inner tissues and the body surface. More concretely, it is a dataset of paired full-body volumetric segmented (bones, lean, and adipose tissue) MRI scans and SMPL meshes capturing the body surface shape for male (N=157) and female (N=241) subjects respectively. This is relevant for medicine, sports science, biomechanics, and computer graphics as it can ease the creation of personalized anatomic digital twins that model our bones, lean, and adipose tissue."""
36
 
37
+ # link to official homepage for the dataset
38
  _HOMEPAGE = "https://hit.is.tue.mpg.de/"
39
 
40
+ # licence for the dataset
41
  _LICENSE = "see https://huggingface.co/datasets/varora/HIT/blob/main/README.md"
42
 
43
+ # official dataset URLs
44
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
45
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
46
  _BASE_URL = "https://huggingface.co/datasets/varora/HIT/tree/main"
 
49
  "female": "/female",
50
  }
51
 
52
+ class HIT(datasets.GeneratorBasedBuilder):
 
 
 
53
  VERSION = datasets.Version("1.1.0")
54
 
55
  # This is an example of a dataset with multiple configurations.
 
111
  )
112
 
113
  def _split_generators(self, dl_manager):
114
+ # downloading/extracting the data and defining the splits depending on the configuration
115
  # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
116
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
117
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
 
119
  splits = ["train", "val", "test"]
120
 
121
  gender = self.config.name
 
122
  print(f"Config: {gender}")
123
 
124
  file_structure_url = "hit_dataset.json"
 
161
 
162
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
163
  def _generate_examples(self, filepath, split):
164
+ # handling input defined in _split_generators to yield (key, example) tuples from the dataset.
165
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
166
  # List all files in the path .gz
167
  for subject_path in filepath:
168
  with gzip.open(subject_path, 'rb') as f:
169
  data = pickle.load(f)
 
 
 
 
170
  key = data['subject_ID']
 
 
 
 
 
 
 
 
171
  yield key, data