File size: 2,459 Bytes
586f220
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import datasets
import pandas as pd

_CITATION = """\
@InProceedings{huggingface:dataset,
title = {outdoor_garbage},
author = {TrainingDataPro},
year = {2023}
}
"""

_DESCRIPTION = """\
The dataset consisting of garbage cans of various capacities and types.
Best to train a neural network to monitor the timely removal of garbage and
organize the logistics of vehicles for garbage collection. Dataset is useful
for the recommendation systems, optimization and automization the work of 
community services, smart city.
"""
_NAME = 'outdoor_garbage'

_HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}"

_LICENSE = ""

_DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/"


class OutdoorGarbage(datasets.GeneratorBasedBuilder):
    """Small sample of image-text pairs"""

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features({
                'image_id': datasets.Value('int32'),
                'image': datasets.Image(),
                'annotations': datasets.Value('string')
            }),
            supervised_keys=None,
            homepage=_HOMEPAGE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        images = dl_manager.download(f"{_DATA}images.tar.gz")
        annotations = dl_manager.download(f"{_DATA}{_NAME}.csv")
        images = dl_manager.iter_archive(images)
        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN,
                                    gen_kwargs={
                                        "images": images,
                                        'annotations': annotations
                                    }),
        ]

    def _generate_examples(self, images, annotations):
        annotations_df = pd.read_csv(annotations)

        for idx, (image_path, image) in enumerate(images):
            yield idx, {
                'image_id':
                    annotations_df.loc[
                        annotations_df['image_name'] == image_path]
                    ['image_id'].values[0],
                "image": {
                    "path": image_path,
                    "bytes": image.read()
                },
                'annotations':
                    annotations_df.loc[
                        annotations_df['image_name'] == image_path]
                    ['annotations'].values[0]
            }