import csv import random import datasets import requests import os import py7zr import numpy as np from PIL import Image from io import BytesIO from datasets.tasks import ImageClassification _HOMEPAGE = "https://huggingface.co/datasets/rshrott/renovation" _CITATION = """\ @ONLINE {renovationquality, author="Your Name", title="Renovation Quality Dataset", month="Your Month", year="Your Year", url="https://huggingface.co/datasets/rshrott/renovation" } """ _DESCRIPTION = """\ This dataset contains images of various properties, along with labels indicating the quality of renovation - 'cheap', 'average', 'expensive'. """ _URLS = { "cheap": "https://huggingface.co/datasets/rshrott/renovation/raw/main/cheap.7z", "average": "https://huggingface.co/datasets/rshrott/renovation/raw/main/average.7z", "expensive": "https://huggingface.co/datasets/rshrott/renovation/raw/main/expensive.7z", } _NAMES = ["cheap", "average", "expensive"] class RenovationQualityDataset(datasets.GeneratorBasedBuilder): """Renovation Quality Dataset.""" VERSION = datasets.Version("1.0.0") def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "image_file_path": datasets.Value("string"), "image": datasets.Image(), "labels": datasets.features.ClassLabel(names=_NAMES), } ), supervised_keys=("image", "labels"), homepage=_HOMEPAGE, citation=_CITATION, task_templates=[ImageClassification(image_column="image", label_column="labels")], ) def _split_generators(self, dl_manager): # Download and extract images image_paths = [] for label, url in _URLS.items(): extract_path = dl_manager.download_and_extract(url) # Get image paths for root, _, files in os.walk(extract_path): for file in files: if file.endswith(".jpeg"): # Assuming all images are .jpeg image_paths.append((os.path.join(root, file), label)) # Shuffle image paths random.shuffle(image_paths) # 80% for training, 10% for validation, 10% for testing train_end = int(0.8 * len(image_paths)) val_end = int(0.9 * len(image_paths)) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "rows": image_paths[:train_end], }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "rows": image_paths[train_end:val_end], }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "rows": image_paths[val_end:], }, ), ] def _generate_examples(self, rows): def file_to_image(file_path): img = Image.open(file_path) return np.array(img) for id_, (image_file_path, label) in enumerate(rows): image = file_to_image(image_file_path) yield id_, { 'image_file_path': image_file_path, 'image': image, 'labels': label, }