ezhang7423
commited on
Commit
•
6c72399
1
Parent(s):
565258f
add findings
Browse files- .gitignore +1 -0
- .vscode/launch.json +16 -0
- README.md +15 -1
- lichess_2023_janoct_shards.py +18 -2
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
venv
|
.vscode/launch.json
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
// Use IntelliSense to learn about possible attributes.
|
3 |
+
// Hover to view descriptions of existing attributes.
|
4 |
+
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
5 |
+
"version": "0.2.0",
|
6 |
+
"configurations": [
|
7 |
+
{
|
8 |
+
"name": "Python Debugger: Current File",
|
9 |
+
"type": "debugpy",
|
10 |
+
"request": "launch",
|
11 |
+
"program": "${file}",
|
12 |
+
"console": "integratedTerminal",
|
13 |
+
"justMyCode": false
|
14 |
+
}
|
15 |
+
]
|
16 |
+
}
|
README.md
CHANGED
@@ -2,15 +2,29 @@
|
|
2 |
license: apache-2.0
|
3 |
---
|
4 |
|
|
|
5 |
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
from datasets import load_dataset
|
7 |
|
8 |
dataset = load_dataset("ezipe/lichess_2023_janoct_shards")
|
9 |
|
10 |
-
```
|
11 |
|
12 |
or more slowly,
|
13 |
|
14 |
```
|
|
|
|
|
15 |
git clone https://huggingface.co/datasets/ezipe/lichess_2023_janoct_shards/
|
16 |
```
|
|
|
|
2 |
license: apache-2.0
|
3 |
---
|
4 |
|
5 |
+
## Installation
|
6 |
```
|
7 |
+
pip install datasets numpy zstandard
|
8 |
+
```
|
9 |
+
|
10 |
+
## Usage
|
11 |
+
|
12 |
+
i've given up on trying to get this to work natively with huggingface datasets. this is nice because it allows streaming (https://huggingface.co/docs/datasets/en/about_mapstyle_vs_iterable) and has functions like 'map' which easily parallelize operations over the dataset. maybe i'll try to get this working in the future, but for now it gets stuck as the download and extracted zstd files are not decompressible for some reason, and it has to rewrite the entire dataset into arrow first.
|
13 |
+
|
14 |
+
|
15 |
+
|
16 |
+
<!-- ```
|
17 |
from datasets import load_dataset
|
18 |
|
19 |
dataset = load_dataset("ezipe/lichess_2023_janoct_shards")
|
20 |
|
21 |
+
``` -->
|
22 |
|
23 |
or more slowly,
|
24 |
|
25 |
```
|
26 |
+
kconda install git-lfs
|
27 |
+
git lfs install
|
28 |
git clone https://huggingface.co/datasets/ezipe/lichess_2023_janoct_shards/
|
29 |
```
|
30 |
+
|
lichess_2023_janoct_shards.py
CHANGED
@@ -14,6 +14,7 @@
|
|
14 |
"""Lichess data in 2023 from Jan-Oct."""
|
15 |
|
16 |
|
|
|
17 |
import re
|
18 |
import io
|
19 |
|
@@ -69,6 +70,7 @@ class StreamingPGNDataset:
|
|
69 |
|
70 |
def read_game(self):
|
71 |
dctx = zstandard.ZstdDecompressor()
|
|
|
72 |
with open(self.file_path, "rb") as pgn_file:
|
73 |
stream_reader = dctx.stream_reader(pgn_file)
|
74 |
text_stream = io.TextIOWrapper(stream_reader, encoding="utf-8")
|
@@ -190,9 +192,13 @@ class Lichess2023JanOct(datasets.GeneratorBasedBuilder):
|
|
190 |
filepaths = [
|
191 |
f"data/lichess_db_standard_rated_2023-{k:02}.pgn.{s:02}.zst" for s in range(32) for k in range(1, 11)
|
192 |
]
|
193 |
-
|
|
|
|
|
|
|
|
|
194 |
generator = datasets.SplitGenerator(
|
195 |
-
name=datasets.Split.TRAIN, gen_kwargs={"filepaths":
|
196 |
)
|
197 |
return [generator]
|
198 |
|
@@ -266,3 +272,13 @@ class Lichess2023JanOct(datasets.GeneratorBasedBuilder):
|
|
266 |
_id = i
|
267 |
i += 1
|
268 |
yield _id, {'moves': np.array([TOKENIZER['stoi'][c] for c in out], dtype=np.uint8)}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
"""Lichess data in 2023 from Jan-Oct."""
|
15 |
|
16 |
|
17 |
+
import multiprocessing
|
18 |
import re
|
19 |
import io
|
20 |
|
|
|
70 |
|
71 |
def read_game(self):
|
72 |
dctx = zstandard.ZstdDecompressor()
|
73 |
+
# with open(self.file_path.get_origin(), "rb") as pgn_file:
|
74 |
with open(self.file_path, "rb") as pgn_file:
|
75 |
stream_reader = dctx.stream_reader(pgn_file)
|
76 |
text_stream = io.TextIOWrapper(stream_reader, encoding="utf-8")
|
|
|
192 |
filepaths = [
|
193 |
f"data/lichess_db_standard_rated_2023-{k:02}.pgn.{s:02}.zst" for s in range(32) for k in range(1, 11)
|
194 |
]
|
195 |
+
# filepaths = [
|
196 |
+
# f"data/lichess_db_standard_rated_2023-{k:02}.pgn.{s:02}.zst" for s in range(2) for k in range(1, 2)
|
197 |
+
# ]
|
198 |
+
|
199 |
+
# downloaded_files = dl_manager.download_and_extract(filepaths)
|
200 |
generator = datasets.SplitGenerator(
|
201 |
+
name=datasets.Split.TRAIN, gen_kwargs={"filepaths": filepaths}
|
202 |
)
|
203 |
return [generator]
|
204 |
|
|
|
272 |
_id = i
|
273 |
i += 1
|
274 |
yield _id, {'moves': np.array([TOKENIZER['stoi'][c] for c in out], dtype=np.uint8)}
|
275 |
+
|
276 |
+
|
277 |
+
if __name__ == '__main__':
|
278 |
+
dataset = datasets.load_dataset("/mnt/data/lichess_2023_janoct_shards", streaming=True)
|
279 |
+
k = iter(dataset['train'])
|
280 |
+
print(next(k))
|
281 |
+
print(next(k))
|
282 |
+
print(next(k))
|
283 |
+
print(next(k))
|
284 |
+
|