nave cohen
commited on
Commit
•
617fccf
1
Parent(s):
896f56b
dataset-card update
Browse files- dataset.py +59 -5
dataset.py
CHANGED
@@ -1,7 +1,61 @@
|
|
1 |
-
|
|
|
|
|
2 |
|
3 |
-
|
4 |
-
|
5 |
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pandas as pd
|
3 |
+
from datasets import GeneratorBasedBuilder, DatasetInfo, Features, Image, Value, SplitGenerator
|
4 |
|
5 |
+
class UrbanClimateDataset(GeneratorBasedBuilder):
|
6 |
+
"""Urban Climate Dataset for building detection."""
|
7 |
|
8 |
+
VERSION = "1.0.0"
|
9 |
+
|
10 |
+
def _info(self):
|
11 |
+
return DatasetInfo(
|
12 |
+
description="Dataset for building detection in urban environments using image segmentation.",
|
13 |
+
features=Features({
|
14 |
+
"file_name": Value("string"),
|
15 |
+
"image": Image(),
|
16 |
+
"mask": Image(),
|
17 |
+
"type": Value("string"),
|
18 |
+
}),
|
19 |
+
supervised_keys=None,
|
20 |
+
homepage="https://huggingface.co/datasets/nave1616/urban_climate_dataset",
|
21 |
+
license="CC BY-SA 4.0",
|
22 |
+
citation="""@article{your_citation}"""
|
23 |
+
)
|
24 |
+
|
25 |
+
def _split_generators(self, dl_manager):
|
26 |
+
"""Specify data splits."""
|
27 |
+
base_path = dl_manager.download_and_extract(self.config.data_dir)
|
28 |
+
|
29 |
+
return [
|
30 |
+
SplitGenerator(
|
31 |
+
name="train",
|
32 |
+
gen_kwargs={"files_dir": os.path.join(base_path, "train")},
|
33 |
+
),
|
34 |
+
SplitGenerator(
|
35 |
+
name="validation",
|
36 |
+
gen_kwargs={"files_dir": os.path.join(base_path, "validation")},
|
37 |
+
),
|
38 |
+
SplitGenerator(
|
39 |
+
name="test",
|
40 |
+
gen_kwargs={"files_dir": os.path.join(base_path, "test")},
|
41 |
+
),
|
42 |
+
]
|
43 |
+
|
44 |
+
def _generate_examples(self, files_dir):
|
45 |
+
"""Generate examples from metadata CSV files."""
|
46 |
+
metadata_path = os.path.join(files_dir, 'metadata.csv')
|
47 |
+
df = pd.read_csv(metadata_path)
|
48 |
+
for idx, row in df.iterrows():
|
49 |
+
image_path = os.path.join(files_dir, row["file_name"])
|
50 |
+
# Assume that mask images are in the same directory and have the same name with a different prefix
|
51 |
+
if row['type'] == 'image':
|
52 |
+
mask_path = image_path.replace('image', 'mask') # Replace to get the corresponding mask path
|
53 |
+
else:
|
54 |
+
mask_path = image_path
|
55 |
+
|
56 |
+
yield idx, {
|
57 |
+
"file_name": row["file_name"],
|
58 |
+
"image": image_path if row['type'] == 'image' else None,
|
59 |
+
"mask": mask_path if row['type'] == 'mask' else None,
|
60 |
+
"type": row["type"],
|
61 |
+
}
|