hayden-donnelly
commited on
Commit
•
5ca205e
1
Parent(s):
db8bb92
Create README.md
Browse files
README.md
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: apache-2.0
|
3 |
+
task_categories:
|
4 |
+
- image-classification
|
5 |
+
- text-to-image
|
6 |
+
- unconditional-image-generation
|
7 |
+
size_categories:
|
8 |
+
- 100K<n<1M
|
9 |
+
---
|
10 |
+
|
11 |
+
# World Heightmaps 256px
|
12 |
+
This is a dataset of 256x256 Earth heightmaps generated from [SRTM 1 Arc-Second Global](https://huggingface.co/datasets/hayden-donnelly/srtm-1-arc-second-global).
|
13 |
+
Each heightmap is labelled according to its latitude and longitude. There are 573,995 samples. It is the same as
|
14 |
+
[World Heightmaps 360px](https://huggingface.co/datasets/novaia/world-heightmaps-360px) but downsampled to 256x256.
|
15 |
+
|
16 |
+
## Method
|
17 |
+
1. Convert GeoTIFFs into PNGs with Rasterio.
|
18 |
+
```python
|
19 |
+
import rasterio
|
20 |
+
import matplotlib.pyplot as plt
|
21 |
+
import os
|
22 |
+
|
23 |
+
input_directory = '...'
|
24 |
+
output_directory = '...'
|
25 |
+
file_list = os.listdir(input_directory)
|
26 |
+
|
27 |
+
for i in range(len(file_list)):
|
28 |
+
image = rasterio.open(input_directory + file_list[i])
|
29 |
+
plt.imsave(output_directory + file_list[i][0:-4] + '.png', image.read(1), cmap='gray')
|
30 |
+
```
|
31 |
+
|
32 |
+
2. Split PNGs into 100 patches with Split Image.
|
33 |
+
```python
|
34 |
+
from split_image import split_image
|
35 |
+
import os
|
36 |
+
|
37 |
+
input_directory = '...'
|
38 |
+
output_directory = '...'
|
39 |
+
file_list = os.listdir(input_directory)
|
40 |
+
|
41 |
+
for i in range(len(file_list)):
|
42 |
+
split_image(input_directory + file_list[i], 10, 10, should_square=True, should_cleanup=False, output_dir=output_directory)
|
43 |
+
```
|
44 |
+
|
45 |
+
3. Hand pick a dataset of corrupted and uncorrupted heightmaps then train a discriminator to automatically filter the whole dataset.
|
46 |
+
|
47 |
+
4. Downsample from 360x360 to 256x256 with Pillow and the Lanczos resampling method.
|
48 |
+
```python
|
49 |
+
import glob
|
50 |
+
from PIL import Image
|
51 |
+
|
52 |
+
paths = glob.glob('world-heightmaps-360px-png/data/*/*')
|
53 |
+
|
54 |
+
for file_name in paths:
|
55 |
+
image = Image.open(file_name)
|
56 |
+
if image.width == 256:
|
57 |
+
continue
|
58 |
+
print(file_name)
|
59 |
+
image = image.resize((256, 256), resample=Image.LANCZOS)
|
60 |
+
image.save(file_name)
|
61 |
+
```
|
62 |
+
|
63 |
+
5. Compile images into parquet files.
|
64 |
+
```python
|
65 |
+
import pyarrow as pa
|
66 |
+
import pyarrow.parquet as pq
|
67 |
+
import pandas as pd
|
68 |
+
from PIL import Image
|
69 |
+
import os
|
70 |
+
import io
|
71 |
+
import json
|
72 |
+
|
73 |
+
samples_per_file = 10_000
|
74 |
+
|
75 |
+
root_dir = 'data/datasets/world-heightmaps-256px-png'
|
76 |
+
df = pd.read_csv(os.path.join(root_dir, 'metadata.csv'))
|
77 |
+
df = df.sample(frac=1).reset_index(drop=True)
|
78 |
+
|
79 |
+
print(df.head())
|
80 |
+
|
81 |
+
def save_table(image_data, table_number):
|
82 |
+
print(f'Entries in table {table_number}: {len(image_data)}')
|
83 |
+
schema = pa.schema(
|
84 |
+
fields=[
|
85 |
+
('heightmap', pa.struct([('bytes', pa.binary()), ('path', pa.string())])),
|
86 |
+
('latitude', pa.string()),
|
87 |
+
('longitude', pa.string())
|
88 |
+
],
|
89 |
+
metadata={
|
90 |
+
b'huggingface': json.dumps({
|
91 |
+
'info': {
|
92 |
+
'features': {
|
93 |
+
'heightmap': {'_type': 'Image'},
|
94 |
+
'latitude': {'_type': 'Value', 'dtype': 'string'},
|
95 |
+
'longitude': {'_type': 'Value', 'dtype': 'string'}
|
96 |
+
}
|
97 |
+
}
|
98 |
+
}).encode('utf-8')
|
99 |
+
}
|
100 |
+
)
|
101 |
+
|
102 |
+
table = pa.Table.from_pylist(image_data, schema=schema)
|
103 |
+
pq.write_table(table, f'data/world-heightmaps-256px-parquet/{str(table_number).zfill(4)}.parquet')
|
104 |
+
|
105 |
+
image_data = []
|
106 |
+
samples_in_current_file = 0
|
107 |
+
current_file_number = 0
|
108 |
+
for i, row in df.iterrows():
|
109 |
+
if samples_in_current_file >= samples_per_file:
|
110 |
+
save_table(image_data, current_file_number)
|
111 |
+
image_data = []
|
112 |
+
samples_in_current_file = 0
|
113 |
+
current_file_number += 1
|
114 |
+
samples_in_current_file += 1
|
115 |
+
image_path = row['file_name']
|
116 |
+
with Image.open(os.path.join(root_dir, image_path)) as image:
|
117 |
+
image_bytes = io.BytesIO()
|
118 |
+
image.save(image_bytes, format='PNG')
|
119 |
+
image_dict = {
|
120 |
+
'heightmap': {
|
121 |
+
'bytes': image_bytes.getvalue(),
|
122 |
+
'path': image_path
|
123 |
+
},
|
124 |
+
'latitude': str(row['latitude']),
|
125 |
+
'longitude': str(row['longitude'])
|
126 |
+
}
|
127 |
+
image_data.append(image_dict)
|
128 |
+
|
129 |
+
save_table(image_data, current_file_number)
|
130 |
+
```
|