artificialhoney
commited on
Commit
•
ffd62d6
0
Parent(s):
Initial commit
Browse files- .gitattributes +63 -0
- .gitignore +3 -0
- README.md +32 -0
- cli.py +240 -0
- data/graffiti-database.com/images.tar.gz +3 -0
- data/graffiti-database.com/metadata.jsonl +3 -0
- data/graffiti.org/images.tar.gz +3 -0
- data/graffiti.org/metadata.jsonl +3 -0
- graffiti.py +121 -0
- prepare.sh +19 -0
- requirements.txt +3 -0
.gitattributes
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
27 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
37 |
+
# Audio files - uncompressed
|
38 |
+
*.pcm filter=lfs diff=lfs merge=lfs -text
|
39 |
+
*.sam filter=lfs diff=lfs merge=lfs -text
|
40 |
+
*.raw filter=lfs diff=lfs merge=lfs -text
|
41 |
+
# Audio files - compressed
|
42 |
+
*.aac filter=lfs diff=lfs merge=lfs -text
|
43 |
+
*.flac filter=lfs diff=lfs merge=lfs -text
|
44 |
+
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
45 |
+
*.ogg filter=lfs diff=lfs merge=lfs -text
|
46 |
+
*.wav filter=lfs diff=lfs merge=lfs -text
|
47 |
+
# Image files - uncompressed
|
48 |
+
*.bmp filter=lfs diff=lfs merge=lfs -text
|
49 |
+
*.gif filter=lfs diff=lfs merge=lfs -text
|
50 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
51 |
+
*.tiff filter=lfs diff=lfs merge=lfs -text
|
52 |
+
# Image files - compressed
|
53 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
+
*.webp filter=lfs diff=lfs merge=lfs -text
|
56 |
+
|
57 |
+
*.jsonl filter=lfs diff=lfs merge=lfs -text
|
58 |
+
|
59 |
+
*.py !text !filter !merge !diff
|
60 |
+
*.sh !text !filter !merge !diff
|
61 |
+
*.md !text !filter !merge !diff
|
62 |
+
.gitignore !text !filter !merge !diff
|
63 |
+
*.txt !text !filter !merge !diff
|
.gitignore
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
.DS_Store
|
2 |
+
images/
|
3 |
+
files.list
|
README.md
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
dataset_info:
|
3 |
+
features:
|
4 |
+
- name: image
|
5 |
+
dtype: image
|
6 |
+
- name: text
|
7 |
+
dtype: string
|
8 |
+
splits:
|
9 |
+
- name: train
|
10 |
+
num_bytes: 690468770
|
11 |
+
num_examples: 77487
|
12 |
+
download_size: 11106922968
|
13 |
+
dataset_size: 690468770
|
14 |
+
---
|
15 |
+
# Dataset Card for Graffiti
|
16 |
+
|
17 |
+
## Dataset Description
|
18 |
+
Graffiti dataset taken from https://www.graffiti.org/ and https://www.graffiti-database.com/.
|
19 |
+
|
20 |
+
## Data
|
21 |
+
Images and meta data are located in `data/`.
|
22 |
+
|
23 |
+
## Citation Information
|
24 |
+
|
25 |
+
```bibtex
|
26 |
+
@InProceedings{huggingface:dataset,
|
27 |
+
title = {Graffiti},
|
28 |
+
author={UR
|
29 |
+
},
|
30 |
+
year={2023}
|
31 |
+
}
|
32 |
+
```
|
cli.py
ADDED
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from transformers import pipeline
|
3 |
+
from bs4 import BeautifulSoup
|
4 |
+
import requests
|
5 |
+
import urllib
|
6 |
+
import json
|
7 |
+
import argparse
|
8 |
+
from pathlib import Path
|
9 |
+
import os
|
10 |
+
import codecs
|
11 |
+
|
12 |
+
from PIL import UnidentifiedImageError
|
13 |
+
from PIL import Image, ImageFile
|
14 |
+
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
15 |
+
|
16 |
+
|
17 |
+
class Scraper(object):
|
18 |
+
def __init__(self, urls):
|
19 |
+
self.urls = urls
|
20 |
+
|
21 |
+
def fetch(self, url):
|
22 |
+
return requests.get(url, headers={"User-Agent": "Mozilla/5.0"})
|
23 |
+
|
24 |
+
def parse(self, text):
|
25 |
+
return BeautifulSoup(text, 'html.parser')
|
26 |
+
|
27 |
+
def download_image(self, url, path):
|
28 |
+
image = requests.get(url)
|
29 |
+
open(path, 'wb').write(image.content)
|
30 |
+
|
31 |
+
|
32 |
+
class GraffitiDatabaseScraper(Scraper):
|
33 |
+
def __init__(self):
|
34 |
+
Scraper.__init__(self, ["https://graffiti-database.com"])
|
35 |
+
|
36 |
+
def get_image_urls(self, parsed):
|
37 |
+
pages = parsed.select('.image-info a')
|
38 |
+
image_urls = []
|
39 |
+
for page in pages:
|
40 |
+
image_urls.append(page.get('href'))
|
41 |
+
return image_urls
|
42 |
+
|
43 |
+
def scrape_image(self, url):
|
44 |
+
print("Scraping image page {0}".format(url))
|
45 |
+
|
46 |
+
fetched = self.fetch(url)
|
47 |
+
|
48 |
+
if fetched.status_code != 200:
|
49 |
+
raise "Scraping image page did fail"
|
50 |
+
|
51 |
+
text = fetched.text
|
52 |
+
parsed = self.parse(text)
|
53 |
+
|
54 |
+
image = parsed.select('.img-fluid')[0].get('src')
|
55 |
+
|
56 |
+
tag_links = parsed.select('a.tag')
|
57 |
+
|
58 |
+
url_parts = urllib.parse.unquote(
|
59 |
+
urllib.parse.unquote(url)).rsplit("/", 2)
|
60 |
+
artist = url_parts[1]
|
61 |
+
return {
|
62 |
+
"image_url": image,
|
63 |
+
"city": tag_links[-1].get_text().strip(),
|
64 |
+
"artist": artist if artist != "Writer Unknown" and artist != "Writers Unknown" else None
|
65 |
+
}
|
66 |
+
|
67 |
+
def scrape_page(self, url):
|
68 |
+
print("Scraping page {0}".format(url))
|
69 |
+
|
70 |
+
text = self.fetch(url).text
|
71 |
+
parsed = self.parse(text)
|
72 |
+
|
73 |
+
image_urls = self.get_image_urls(parsed)
|
74 |
+
|
75 |
+
if len(image_urls) == 0:
|
76 |
+
return False
|
77 |
+
|
78 |
+
for image_url in image_urls:
|
79 |
+
try:
|
80 |
+
result = self.scrape_image(
|
81 |
+
"https://graffiti-database.com" + image_url)
|
82 |
+
except:
|
83 |
+
continue
|
84 |
+
|
85 |
+
file_name = result["image_url"].split('/')[-1]
|
86 |
+
|
87 |
+
self.download_image(result["image_url"],
|
88 |
+
'./images/' + file_name)
|
89 |
+
|
90 |
+
result["file"] = file_name
|
91 |
+
|
92 |
+
with open('./images/' + file_name + '.json', 'w', encoding='utf-8') as f:
|
93 |
+
f.write(json.dumps(result, indent=2, ensure_ascii=False))
|
94 |
+
|
95 |
+
return True
|
96 |
+
|
97 |
+
def scrape(self):
|
98 |
+
url = self.urls[0]
|
99 |
+
print("Scraping {0}".format(
|
100 |
+
url))
|
101 |
+
|
102 |
+
count = 1
|
103 |
+
has_images = True
|
104 |
+
while has_images:
|
105 |
+
has_images = self.scrape_page(url + "?page=" + str(count))
|
106 |
+
count += 1
|
107 |
+
|
108 |
+
|
109 |
+
class GraffitiScraper(Scraper):
|
110 |
+
def __init__(self):
|
111 |
+
Scraper.__init__(self, ["https://www.graffiti.org/index/world.html",
|
112 |
+
"https://www.graffiti.org/index/europe.html", "https://www.graffiti.org/index/usa.html"])
|
113 |
+
|
114 |
+
def scrape_page(self, url, city_name):
|
115 |
+
print("Scraping page {0}".format(url))
|
116 |
+
|
117 |
+
text = self.fetch(url).text
|
118 |
+
parsed = self.parse(text)
|
119 |
+
|
120 |
+
image_elements = parsed.select('a[href*=".jpg"]')
|
121 |
+
|
122 |
+
for image_element in image_elements:
|
123 |
+
if image_element.get_text().strip() == "":
|
124 |
+
tags = image_element.find_next_sibling(string=True).get_text(
|
125 |
+
).strip() if image_element.find_next_sibling(string=True) else ""
|
126 |
+
else:
|
127 |
+
continue
|
128 |
+
image_url = image_element.get("href").replace("/", "_")
|
129 |
+
url_parts = url.split('/')
|
130 |
+
url_parts[-1] = image_element.get("href")
|
131 |
+
self.download_image("/".join(url_parts),
|
132 |
+
'./images/' + image_url)
|
133 |
+
|
134 |
+
with open('./images/' + image_url + '.json', 'w', encoding='utf-8') as f:
|
135 |
+
f.write(json.dumps({
|
136 |
+
"file": image_url,
|
137 |
+
"image_url": "/".join(url_parts),
|
138 |
+
"artist": tags,
|
139 |
+
"city": city_name if city_name != "Various cities" else None
|
140 |
+
}, indent=2, ensure_ascii=False))
|
141 |
+
|
142 |
+
def scrape_url(self, url):
|
143 |
+
print("Scraping url {0}".format(url))
|
144 |
+
|
145 |
+
text = self.fetch(url).text
|
146 |
+
parsed = self.parse(text)
|
147 |
+
|
148 |
+
cities = parsed.find_all("h4")
|
149 |
+
|
150 |
+
for city in cities:
|
151 |
+
city_name = city.get_text().split("\n")[0].strip()
|
152 |
+
pages = city.find_all("a")
|
153 |
+
for page in pages:
|
154 |
+
if page.get_text().strip() == "§":
|
155 |
+
continue
|
156 |
+
self.scrape_page(
|
157 |
+
"https://www.graffiti.org/index/" + page.get("href"), city_name)
|
158 |
+
|
159 |
+
def scrape(self):
|
160 |
+
for url in self.urls:
|
161 |
+
self.scrape_url(url)
|
162 |
+
|
163 |
+
|
164 |
+
class CLI():
|
165 |
+
def __init__(self):
|
166 |
+
parser = argparse.ArgumentParser(
|
167 |
+
prog='graffiti-cli',
|
168 |
+
description='Tools for setting up the dataset')
|
169 |
+
subparsers = parser.add_subparsers(dest="command", required=True)
|
170 |
+
|
171 |
+
scrape = subparsers.add_parser(
|
172 |
+
'scrape', help='Scrapes data sources and downloads images')
|
173 |
+
scrape.add_argument('--source',
|
174 |
+
default='graffiti.org',
|
175 |
+
choices=['graffiti.org', 'graffiti-database.com'],
|
176 |
+
help='Choose data source to scrape')
|
177 |
+
subparsers.add_parser('cleanup', help='Cleans up downloaded images')
|
178 |
+
subparsers.add_parser('caption', help='Captions downloaded images')
|
179 |
+
metadata = subparsers.add_parser('metadata', help='Creates single meta files from metadata.jsonl')
|
180 |
+
metadata.add_argument('--source',
|
181 |
+
default='graffiti.org',
|
182 |
+
choices=['graffiti.org', 'graffiti-database.com'],
|
183 |
+
help='Choose data source to use')
|
184 |
+
|
185 |
+
args = parser.parse_args()
|
186 |
+
if args.command == 'scrape':
|
187 |
+
if args.source == 'graffiti.org':
|
188 |
+
GraffitiScraper().scrape()
|
189 |
+
elif args.source == 'graffiti-database.com':
|
190 |
+
GraffitiDatabaseScraper().scrape()
|
191 |
+
elif args.command == 'cleanup':
|
192 |
+
path = Path("./images").rglob("*.jpg")
|
193 |
+
for i, img_p in enumerate(path):
|
194 |
+
try:
|
195 |
+
Image.open(img_p)
|
196 |
+
except UnidentifiedImageError:
|
197 |
+
path_name = str(img_p)
|
198 |
+
print(path_name + " is broken. Deleting!")
|
199 |
+
os.remove(path_name)
|
200 |
+
os.remove(path_name + ".json")
|
201 |
+
elif args.command == 'caption':
|
202 |
+
captioner = pipeline(
|
203 |
+
"image-to-text", model="Salesforce/blip-image-captioning-base")
|
204 |
+
path = Path("./images").rglob("*.jpg")
|
205 |
+
count = len(list(Path("./images").rglob("*.jpg")))
|
206 |
+
for i, img_p in enumerate(path):
|
207 |
+
path_name = str(img_p)
|
208 |
+
with open(path_name + ".json", 'r+', encoding='utf-8') as f:
|
209 |
+
data = json.load(f)
|
210 |
+
|
211 |
+
f.seek(0)
|
212 |
+
|
213 |
+
caption = captioner(path_name)[0]["generated_text"]
|
214 |
+
|
215 |
+
if "album" in caption:
|
216 |
+
caption = "a wall with graffiti on it"
|
217 |
+
|
218 |
+
json.dump({
|
219 |
+
"file": data["file"],
|
220 |
+
"image_url": data["image_url"],
|
221 |
+
"artist": data["artist"],
|
222 |
+
"city": data["city"],
|
223 |
+
"caption": caption
|
224 |
+
}, f, indent=2)
|
225 |
+
f.truncate()
|
226 |
+
|
227 |
+
print("{0} / {1}".format(i + 1, count), path_name, caption)
|
228 |
+
elif args.command == 'metadata':
|
229 |
+
with open("data/" + args.source + "/metadata.jsonl", encoding="utf-8") as f:
|
230 |
+
for row in f:
|
231 |
+
data = json.loads(row)
|
232 |
+
with open('./images/' + data["file"] + '.json', 'w') as j:
|
233 |
+
j.write(json.dumps(data, indent=2, ensure_ascii=False))
|
234 |
+
|
235 |
+
def main():
|
236 |
+
CLI()
|
237 |
+
|
238 |
+
|
239 |
+
if __name__ == "__main__":
|
240 |
+
main()
|
data/graffiti-database.com/images.tar.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b34717128b3e16572f8221ffa0533d46172efd097e214a5a54c06b3307a3086f
|
3 |
+
size 7873618410
|
data/graffiti-database.com/metadata.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8aeece507dde6a9ea31c746226a4133ed03cf344cb5513918be1fb7018efb2d8
|
3 |
+
size 13270223
|
data/graffiti.org/images.tar.gz
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:97cb1cd4fc428561c70dd620036fea594e3d38aeef2037b40bd253fa43aebd19
|
3 |
+
size 3213661340
|
data/graffiti.org/metadata.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b7b1c382ba1ea07f6107d8b3a813a15f96fe87a74773dea75ce74796314e8449
|
3 |
+
size 6372995
|
graffiti.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
|
16 |
+
import json
|
17 |
+
|
18 |
+
import datasets
|
19 |
+
|
20 |
+
|
21 |
+
_CITATION = """\
|
22 |
+
@InProceedings{huggingface:dataset,
|
23 |
+
title = {Graffiti},
|
24 |
+
author={UR
|
25 |
+
},
|
26 |
+
year={2023}
|
27 |
+
}
|
28 |
+
"""
|
29 |
+
|
30 |
+
_DESCRIPTION = """\
|
31 |
+
Graffiti dataset taken from https://www.graffiti.org/ and https://www.graffiti-database.com/.
|
32 |
+
"""
|
33 |
+
|
34 |
+
_HOMEPAGE = "https://huggingface.co/datasets/artificialhoney/graffiti"
|
35 |
+
|
36 |
+
_LICENSE = "Apache License 2.0"
|
37 |
+
|
38 |
+
_VERSION = "0.1.0"
|
39 |
+
|
40 |
+
_SOURCES = [
|
41 |
+
"graffiti.org",
|
42 |
+
"graffiti-database.com"
|
43 |
+
]
|
44 |
+
|
45 |
+
|
46 |
+
class GraffitiConfig(datasets.BuilderConfig):
|
47 |
+
"""BuilderConfig for Graffiti."""
|
48 |
+
|
49 |
+
def __init__(self, **kwargs):
|
50 |
+
"""BuilderConfig for Graffiti.
|
51 |
+
Args:
|
52 |
+
**kwargs: keyword arguments forwarded to super.
|
53 |
+
"""
|
54 |
+
super(GraffitiConfig, self).__init__(**kwargs)
|
55 |
+
|
56 |
+
|
57 |
+
class Graffiti(datasets.GeneratorBasedBuilder):
|
58 |
+
"""Graffiti dataset taken from https://www.graffiti.org/ and https://www.graffiti-database.com/."""
|
59 |
+
|
60 |
+
BUILDER_CONFIG_CLASS = GraffitiConfig
|
61 |
+
|
62 |
+
BUILDER_CONFIGS = [
|
63 |
+
GraffitiConfig(
|
64 |
+
name="default",
|
65 |
+
),
|
66 |
+
]
|
67 |
+
|
68 |
+
def _info(self):
|
69 |
+
return datasets.DatasetInfo(
|
70 |
+
description=_DESCRIPTION,
|
71 |
+
features=datasets.Features(
|
72 |
+
{
|
73 |
+
"image": datasets.Image(),
|
74 |
+
"text": datasets.Value("string")
|
75 |
+
}
|
76 |
+
),
|
77 |
+
homepage=_HOMEPAGE,
|
78 |
+
license=_LICENSE,
|
79 |
+
citation=_CITATION,
|
80 |
+
version=_VERSION,
|
81 |
+
task_templates=[],
|
82 |
+
)
|
83 |
+
|
84 |
+
def _split_generators(self, dl_manager):
|
85 |
+
"""Returns SplitGenerators."""
|
86 |
+
images = []
|
87 |
+
metadata = []
|
88 |
+
for source in _SOURCES:
|
89 |
+
images.append(dl_manager.iter_archive(dl_manager.download("./data/{0}/images.tar.gz".format(source))))
|
90 |
+
metadata.append(dl_manager.download("./data/{0}/metadata.jsonl".format(source)))
|
91 |
+
return [
|
92 |
+
datasets.SplitGenerator(
|
93 |
+
name=datasets.Split.TRAIN,
|
94 |
+
# These kwargs will be passed to _generate_examples
|
95 |
+
gen_kwargs={
|
96 |
+
"images": images,
|
97 |
+
"metadata": metadata,
|
98 |
+
},
|
99 |
+
)
|
100 |
+
]
|
101 |
+
|
102 |
+
def _generate_examples(self, metadata, images):
|
103 |
+
idx = 0
|
104 |
+
for index, meta in enumerate(metadata):
|
105 |
+
m = []
|
106 |
+
with open(meta, encoding="utf-8") as f:
|
107 |
+
for row in f:
|
108 |
+
m.append(json.loads(row))
|
109 |
+
for file_path, file_obj in images[index]:
|
110 |
+
data = [x for x in m if file_path.endswith(x["file"])][0]
|
111 |
+
text = data["caption"]
|
112 |
+
if data["artist"] != None:
|
113 |
+
text += ", with text \"" + data["artist"] + "\""
|
114 |
+
text += ", in the art of \"" + data["artist"] + "\""
|
115 |
+
if data["city"] != None:
|
116 |
+
text += ", located in " + data["city"]
|
117 |
+
yield idx, {
|
118 |
+
"image": {"path": file_path, "bytes": file_obj.read()},
|
119 |
+
"text": text
|
120 |
+
}
|
121 |
+
idx+=1
|
prepare.sh
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env bash
|
2 |
+
|
3 |
+
args=("$@")
|
4 |
+
|
5 |
+
rm ./data/${args[0]}/images.tar.gz
|
6 |
+
rm ./data/${args[0]}/metadata.jsonl
|
7 |
+
|
8 |
+
find ./images -name '*.jpg' -print > files.list
|
9 |
+
|
10 |
+
while read p; do
|
11 |
+
cat "$p".json | tr -d "\n" >> ./data/${args[0]}/metadata.jsonl
|
12 |
+
echo >> ./data/${args[0]}/metadata.jsonl
|
13 |
+
done < files.list
|
14 |
+
|
15 |
+
tar czf ./data/${args[0]}/images.tar.gz --files-from files.list
|
16 |
+
|
17 |
+
rm files.list
|
18 |
+
|
19 |
+
datasets-cli test graffiti.py --save_info --all_config
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
bs4
|
2 |
+
requests
|
3 |
+
transformers
|