File size: 3,844 Bytes
1af3915 2fea8b1 1af3915 d61a5ba 9c4c1ff d61a5ba 1af3915 fc74c6f 1af3915 fc74c6f 1af3915 fc74c6f 1af3915 fc74c6f 498fbdd 8aaa997 498fbdd fc74c6f 1af3915 fc74c6f 1af3915 fc74c6f 1af3915 ea0a244 fc74c6f 3f0c3b8 d61a5ba 4058cac fc74c6f 4058cac fc74c6f 4058cac fc74c6f bcded4e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 |
import csv
import random
import datasets
import requests
import os
import py7zr
import numpy as np
from PIL import Image
from io import BytesIO
from datasets.tasks import ImageClassification
_HOMEPAGE = "https://huggingface.co/datasets/rshrott/renovation"
_CITATION = """\
@ONLINE {renovationquality,
author="Your Name",
title="Renovation Quality Dataset",
month="Your Month",
year="Your Year",
url="https://huggingface.co/datasets/rshrott/renovation"
}
"""
_DESCRIPTION = """\
This dataset contains images of various properties, along with labels indicating the quality of renovation - 'cheap', 'average', 'expensive'.
"""
_URLS = {
"cheap": "https://huggingface.co/datasets/rshrott/renovation/raw/main/cheap.7z",
"average": "https://huggingface.co/datasets/rshrott/renovation/raw/main/average.7z",
"expensive": "https://huggingface.co/datasets/rshrott/renovation/raw/main/expensive.7z",
}
_NAMES = ["cheap", "average", "expensive"]
class RenovationQualityDataset(datasets.GeneratorBasedBuilder):
"""Renovation Quality Dataset."""
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image_file_path": datasets.Value("string"),
"image": datasets.Image(),
"labels": datasets.features.ClassLabel(names=_NAMES),
}
),
supervised_keys=("image", "labels"),
homepage=_HOMEPAGE,
citation=_CITATION,
task_templates=[ImageClassification(image_column="image", label_column="labels")],
)
def _split_generators(self, dl_manager):
# Download and extract images
image_paths = []
for label, url in _URLS.items():
extract_path = dl_manager.download_and_extract(url)
print(f"Extracted files for label {label} to path: {extract_path}")
# Get image paths
for root, _, files in os.walk(extract_path):
for file in files:
if file.endswith(".jpeg"): # Assuming all images are .jpeg
image_paths.append((os.path.join(root, file), label))
print(f"Collected a total of {len(image_paths)} image paths.")
# Shuffle image paths
random.shuffle(image_paths)
# 80% for training, 10% for validation, 10% for testing
train_end = int(0.8 * len(image_paths))
val_end = int(0.9 * len(image_paths))
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"rows": image_paths[:train_end],
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"rows": image_paths[train_end:val_end],
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"rows": image_paths[val_end:],
},
),
]
def _generate_examples(self, rows):
def file_to_image(file_path):
img = Image.open(file_path)
return np.array(img)
for id_, (image_file_path, label) in enumerate(rows):
image = file_to_image(image_file_path)
yield id_, {
'image_file_path': image_file_path,
'image': image,
'labels': label,
} |