qgyd2021 commited on
Commit
80a390f
1 Parent(s): ec99bdb

Delete loading script

Browse files
Files changed (1) hide show
  1. cppe-5.py +0 -135
cppe-5.py DELETED
@@ -1,135 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """CPPE-5 dataset."""
16
-
17
-
18
- import collections
19
- import json
20
- import os
21
-
22
- import datasets
23
-
24
-
25
- _CITATION = """\
26
- @misc{dagli2021cppe5,
27
- title={CPPE-5: Medical Personal Protective Equipment Dataset},
28
- author={Rishit Dagli and Ali Mustufa Shaikh},
29
- year={2021},
30
- eprint={2112.09569},
31
- archivePrefix={arXiv},
32
- primaryClass={cs.CV}
33
- }
34
- """
35
-
36
- _DESCRIPTION = """\
37
- CPPE - 5 (Medical Personal Protective Equipment) is a new challenging dataset with the goal
38
- to allow the study of subordinate categorization of medical personal protective equipments,
39
- which is not possible with other popular data sets that focus on broad level categories.
40
- """
41
-
42
- _HOMEPAGE = "https://sites.google.com/view/cppe5"
43
-
44
- _LICENSE = "Unknown"
45
-
46
- # _URL = "https://drive.google.com/uc?id=1MGnaAfbckUmigGUvihz7uiHGC6rBIbvr"
47
- _URL = "data/dataset.tar.gz"
48
-
49
- _CATEGORIES = ["Coverall", "Face_Shield", "Gloves", "Goggles", "Mask"]
50
-
51
-
52
- class CPPE5(datasets.GeneratorBasedBuilder):
53
- """CPPE - 5 dataset."""
54
-
55
- VERSION = datasets.Version("1.0.0")
56
-
57
- def _info(self):
58
- features = datasets.Features(
59
- {
60
- "image_id": datasets.Value("int64"),
61
- "image": datasets.Image(),
62
- "width": datasets.Value("int32"),
63
- "height": datasets.Value("int32"),
64
- "objects": datasets.Sequence(
65
- {
66
- "id": datasets.Value("int64"),
67
- "area": datasets.Value("int64"),
68
- "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
69
- "category": datasets.ClassLabel(names=_CATEGORIES),
70
- }
71
- ),
72
- }
73
- )
74
- return datasets.DatasetInfo(
75
- description=_DESCRIPTION,
76
- features=features,
77
- homepage=_HOMEPAGE,
78
- license=_LICENSE,
79
- citation=_CITATION,
80
- )
81
-
82
- def _split_generators(self, dl_manager):
83
- archive = dl_manager.download(_URL)
84
- return [
85
- datasets.SplitGenerator(
86
- name=datasets.Split.TRAIN,
87
- gen_kwargs={
88
- "annotation_file_path": "annotations/train.json",
89
- "files": dl_manager.iter_archive(archive),
90
- },
91
- ),
92
- datasets.SplitGenerator(
93
- name=datasets.Split.TEST,
94
- gen_kwargs={
95
- "annotation_file_path": "annotations/test.json",
96
- "files": dl_manager.iter_archive(archive),
97
- },
98
- ),
99
- ]
100
-
101
- def _generate_examples(self, annotation_file_path, files):
102
- def process_annot(annot, category_id_to_category):
103
- return {
104
- "id": annot["id"],
105
- "area": annot["area"],
106
- "bbox": annot["bbox"],
107
- "category": category_id_to_category[annot["category_id"]],
108
- }
109
-
110
- image_id_to_image = {}
111
- idx = 0
112
- # This loop relies on the ordering of the files in the archive:
113
- # Annotation files come first, then the images.
114
- for path, f in files:
115
- file_name = os.path.basename(path)
116
- if path == annotation_file_path:
117
- annotations = json.load(f)
118
- category_id_to_category = {category["id"]: category["name"] for category in annotations["categories"]}
119
- image_id_to_annotations = collections.defaultdict(list)
120
- for annot in annotations["annotations"]:
121
- image_id_to_annotations[annot["image_id"]].append(annot)
122
- image_id_to_image = {annot["file_name"]: annot for annot in annotations["images"]}
123
- elif file_name in image_id_to_image:
124
- image = image_id_to_image[file_name]
125
- objects = [
126
- process_annot(annot, category_id_to_category) for annot in image_id_to_annotations[image["id"]]
127
- ]
128
- yield idx, {
129
- "image_id": image["id"],
130
- "image": {"path": path, "bytes": f.read()},
131
- "width": image["width"],
132
- "height": image["height"],
133
- "objects": objects,
134
- }
135
- idx += 1