|
|
|
"""TODO: Add a description here.""" |
|
import csv |
|
import json |
|
import os |
|
|
|
import datasets |
|
from datasets.tasks import ImageClassification |
|
|
|
_DESCRIPTION = """\ |
|
This dataset contains all THIENVIET products images split in training, |
|
validation and testing |
|
""" |
|
|
|
_URLS = { |
|
"train": "https://huggingface.co/datasets/chanelcolgate/image-classification-yenthienviet/resolve/main/data/train.zip", |
|
"val": "https://huggingface.co/datasets/chanelcolgate/image-classification-yenthienviet/resolve/main/data/val.zip", |
|
"test": "https://huggingface.co/datasets/chanelcolgate/image-classification-yenthienviet/resolve/main/data/test.zip" |
|
} |
|
|
|
_CATEGORIES = ['botkhi','thuytinh','ocvit','ban','contrung','kimloai','toc'] |
|
|
|
class YenthienvietConfig(datasets.BuilderConfig): |
|
"""Builder Config for image-classification-yenthienviet""" |
|
def __init__(self, name, data_urls, **kwargs): |
|
""" |
|
BuilderConfig for image-classification-yenthienviet. |
|
|
|
Args: |
|
data_urls: `dict`, name to url to download the zip file from. |
|
**kwargs: keyword arguments forwared to super. |
|
""" |
|
super().__init__(version=datasets.Version("1.0.0", **kwargs)) |
|
self.name |
|
self.data_urls = data_urls |
|
|
|
|
|
class YenthienvietClassification(datasets.GeneratorBasedBuilder): |
|
""" Builder for image-classification-yenthienviet""" |
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIG_CLASS = YenthienvietConfig |
|
BUILDER_CONFIGS = [ |
|
YenthienvietConfig( |
|
name="version-10/10", |
|
description="Version 10/10 of image-classification-yenthienviet dataset.", |
|
data_urls=_URLS, |
|
) |
|
] |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"image_file_path": datasets.Value("string"), |
|
"image": datasets.Image(), |
|
"labels": datasets.features.ClassLabel(names=_CATEGORIES) |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
supervised_keys=("image", "label"), |
|
task_templates=[ImageClassification(image_column="image", label_column="labels")] |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
|
|
|
|
data_files = dl_manager.download_and_extract(self.config.data_urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"files": dl_manager.iter_files([data_files["train"]]), |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"files": dl_manager.iter_files([data_files["val"]]), |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"files": dl_manager.iter_files([data_files["test"]]), |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, files): |
|
for i, path in enumerate(files): |
|
file_name = os.path.basename(path) |
|
if file_name.endswith((".jpg", ".png", ".jpeg", ".bmp", ".tif", ".tiff")): |
|
yield i, { |
|
"image_file_path": path, |
|
"image": path, |
|
"labels": os.path.basename(os.path.dirname(path)), |
|
} |