Datasets:
Update README.md
Browse files
README.md
CHANGED
@@ -75,6 +75,89 @@ total: 3.93 GiB
|
|
75 |
|
76 |
### Data Splits
|
77 |
|
78 |
-
|
|
79 |
-
|
|
80 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
### Data Splits
|
77 |
|
78 |
+
| | train |
|
79 |
+
| ---------- | -------- |
|
80 |
+
| # of texts | 11376263 |
|
81 |
+
|
82 |
+
|
83 |
+
## Note
|
84 |
+
|
85 |
+
### Height, Width
|
86 |
+
|
87 |
+
λͺ¨λ λ°μ΄ν°λ₯Ό μ΄ν΄λ³Έ κ²μ μλμ§λ§, μ΄λ―Έμ§μ κ°λ‘κ° `HEIGHT`λ‘, μΈλ‘κ° `WIDTH`λ‘ λμ΄μλ κ² κ°μ΅λλ€.
|
88 |
+
|
89 |
+
```pycon
|
90 |
+
>>> dataset["train"][98]
|
91 |
+
{'SAMPLE_ID': 2937471001780,
|
92 |
+
'URL': 'https://image.ajunews.com/content/image/2019/04/12/20190412175643597949.png',
|
93 |
+
'TEXT': 'μΈμ²μκ΅μ‘μ², μΈμ² μꡰꡬλ°μ νμν μμμ§κ³Όμ κ°λ΄ν κ°μ΅',
|
94 |
+
'HEIGHT': 640,
|
95 |
+
'WIDTH': 321,
|
96 |
+
'LICENSE': '?',
|
97 |
+
'LANGUAGE': 'ko',
|
98 |
+
'NSFW': 'UNLIKELY',
|
99 |
+
'similarity': 0.33347243070602417}
|
100 |
+
```
|
101 |
+
|
102 |
+
[image](https://image.ajunews.com/content/image/2019/04/12/20190412175643597949.png)
|
103 |
+
|
104 |
+
### Code used to generate
|
105 |
+
|
106 |
+
```py
|
107 |
+
import csv
|
108 |
+
import re
|
109 |
+
|
110 |
+
from datasets import load_dataset
|
111 |
+
from tqdm import tqdm
|
112 |
+
|
113 |
+
|
114 |
+
pattern = re.compile(r"[κ°-ν£]")
|
115 |
+
|
116 |
+
|
117 |
+
def quote(s: str) -> str:
|
118 |
+
s = s.replace('"""', "")
|
119 |
+
return s
|
120 |
+
|
121 |
+
|
122 |
+
def filter_func(example) -> bool:
|
123 |
+
lang = example.get("LANGUAGE")
|
124 |
+
text = example.get("TEXT")
|
125 |
+
if not isinstance(lang, str) or not isinstance(text, str):
|
126 |
+
return False
|
127 |
+
return lang == "ko" or pattern.search(text) is not None
|
128 |
+
|
129 |
+
|
130 |
+
file = open("./laion2B-mulit_korean_subset.csv", "w", encoding="utf-8", newline="")
|
131 |
+
|
132 |
+
ds = load_dataset("laion/laion2B-multi", split="train", streaming=True)
|
133 |
+
dsf = ds.filter(filter_func)
|
134 |
+
header = [
|
135 |
+
"SAMPLE_ID",
|
136 |
+
"URL",
|
137 |
+
"TEXT",
|
138 |
+
"HEIGHT",
|
139 |
+
"WIDTH",
|
140 |
+
"LICENSE",
|
141 |
+
"LANGUAGE",
|
142 |
+
"NSFW",
|
143 |
+
"similarity",
|
144 |
+
]
|
145 |
+
writer = csv.DictWriter(file, fieldnames=header, delimiter="\t")
|
146 |
+
writer.writeheader()
|
147 |
+
|
148 |
+
try:
|
149 |
+
for data in tqdm(dsf):
|
150 |
+
data["TEXT"] = quote(data.get("TEXT", ""))
|
151 |
+
if data["TEXT"]:
|
152 |
+
writer.writerow(data)
|
153 |
+
finally:
|
154 |
+
file.close()
|
155 |
+
|
156 |
+
print("Done!")
|
157 |
+
```
|
158 |
+
|
159 |
+
μ΄νμ `HEIGHT`λ `WIDTH`κ° NoneμΈ λ°μ΄ν°λ₯Ό μ κ±°νκ³ μ
λ‘λνμμ΅λλ€.
|
160 |
+
|
161 |
+
### img2dataset
|
162 |
+
|
163 |
+
[img2dataset](https://github.com/rom1504/img2dataset)μ μ¬μ©νμ¬ URLλ‘λ μ΄λ―Έμ§λ€μ λ°μ΄ν°μ
ννλ‘ λ§λ€ μ μμ΅λλ€.
|