ezhang7423 commited on
Commit
6523719
·
1 Parent(s): 1ae4c44

add dataloader

Browse files
Files changed (2) hide show
  1. lichess_2023_janoct.py +268 -0
  2. test.pgn.zst +3 -0
lichess_2023_janoct.py ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Lichess data in 2023 from Jan-Oct."""
15
+
16
+
17
+ import csv
18
+ import io
19
+ import json
20
+ import os
21
+
22
+ import zstandard
23
+ import numpy as np
24
+ import datasets
25
+
26
+ _DESCRIPTION = """\
27
+ Lichess data in 2023 from Jan-Oct
28
+ """
29
+
30
+
31
+ class LichessConfig(datasets.BuilderConfig):
32
+ def __init__(self, features, **kwargs):
33
+ super(LichessConfig, self).__init__(**kwargs)
34
+ self.features = features
35
+
36
+
37
+ _HOMEPAGE = "https://huggingface.co/datasets/ezipe/lichess-2023-janoct"
38
+
39
+
40
+ def process_wrapper():
41
+ vocab = "#+-.0123456789;=BKNOQRabcdefghx "
42
+ del_chars = "".join(c for c in map(chr, range(1114111)) if not c in vocab)
43
+ del_map = str.maketrans("", "", del_chars)
44
+
45
+ def process(game_str):
46
+ res = {}
47
+
48
+ for g in game_str.split("\n"):
49
+ if g.startswith("["):
50
+ k, v = g[1:-1].split(' "')
51
+ res[k] = v[:-1]
52
+ elif g.startswith("1. "):
53
+ no_brackets_string = re.sub(r"\{.*?\}", "", g) # , flags=re.DOTALL
54
+ no_brackets_string = no_brackets_string.translate(del_map)
55
+ remove_dots = re.sub(r"\b\d+\.\.\. ", "", no_brackets_string)
56
+ remove_game_result = re.sub(r"1-0|0-1|1/2-1/2", "", remove_dots)[:-2]
57
+ remove_spaces = re.sub(r"(\d+)\.\s+", r"\1.", remove_game_result)
58
+ remove_double_spaces = re.sub(r" ", r" ", remove_spaces)
59
+ res["transcript"] = remove_double_spaces
60
+
61
+ return res
62
+
63
+ return process
64
+
65
+
66
+ class StreamingPGNDataset:
67
+ def __init__(self, file_path, transform=None):
68
+ self.file_path = file_path
69
+ self.transform = transform
70
+ self.process = process_wrapper()
71
+
72
+ def read_game(self):
73
+ dctx = zstandard.ZstdDecompressor()
74
+ with open(self.file_path, "rb") as pgn_file:
75
+ stream_reader = dctx.stream_reader(pgn_file)
76
+ text_stream = io.TextIOWrapper(stream_reader, encoding="utf-8")
77
+
78
+ fg = ""
79
+ # while True:
80
+ for i in text_stream:
81
+ fg += i
82
+ if i.startswith("1. "):
83
+ game = self.process(fg)
84
+ fg = ""
85
+ yield game
86
+
87
+ def __iter__(self):
88
+ return self.read_game()
89
+
90
+
91
+ TOKENIZER = {
92
+ "vocab_size": 32,
93
+ "itos": {
94
+ 0: " ",
95
+ 1: "#",
96
+ 2: "+",
97
+ 3: "-",
98
+ 4: ".",
99
+ 5: "0",
100
+ 6: "1",
101
+ 7: "2",
102
+ 8: "3",
103
+ 9: "4",
104
+ 10: "5",
105
+ 11: "6",
106
+ 12: "7",
107
+ 13: "8",
108
+ 14: "9",
109
+ 15: ";",
110
+ 16: "=",
111
+ 17: "B",
112
+ 18: "K",
113
+ 19: "N",
114
+ 20: "O",
115
+ 21: "Q",
116
+ 22: "R",
117
+ 23: "a",
118
+ 24: "b",
119
+ 25: "c",
120
+ 26: "d",
121
+ 27: "e",
122
+ 28: "f",
123
+ 29: "g",
124
+ 30: "h",
125
+ 31: "x",
126
+ },
127
+ "stoi": {
128
+ " ": 0,
129
+ "#": 1,
130
+ "+": 2,
131
+ "-": 3,
132
+ ".": 4,
133
+ "0": 5,
134
+ "1": 6,
135
+ "2": 7,
136
+ "3": 8,
137
+ "4": 9,
138
+ "5": 10,
139
+ "6": 11,
140
+ "7": 12,
141
+ "8": 13,
142
+ "9": 14,
143
+ ";": 15,
144
+ "=": 16,
145
+ "B": 17,
146
+ "K": 18,
147
+ "N": 19,
148
+ "O": 20,
149
+ "Q": 21,
150
+ "R": 22,
151
+ "a": 23,
152
+ "b": 24,
153
+ "c": 25,
154
+ "d": 26,
155
+ "e": 27,
156
+ "f": 28,
157
+ "g": 29,
158
+ "h": 30,
159
+ "x": 31,
160
+ },
161
+ }
162
+ BLOCK_SIZE = 1024
163
+
164
+ class Lichess2023JanOct(datasets.GeneratorBasedBuilder):
165
+ """Lichess data from Jan-Oct in transformer block format: Similar to https://huggingface.co/datasets/adamkarvonen/chess_games"""
166
+
167
+ VERSION = datasets.Version("1.1.0")
168
+
169
+ BUILDER_CONFIG_CLASS = LichessConfig
170
+ BUILDER_CONFIGS = [LichessConfig(features=["moves"])]
171
+
172
+ def _info(self):
173
+ features = datasets.Features(
174
+ {
175
+ "moves": datasets.Value("null"), # np array
176
+ }
177
+ )
178
+ return datasets.DatasetInfo(
179
+ # This is the description that will appear on the datasets page.
180
+ description=_DESCRIPTION,
181
+ # This defines the different columns of the dataset and their types
182
+ features=features, # Here we define them above because they are different between the two configurations
183
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
184
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
185
+ # supervised_keys=("sentence", "label"),
186
+ # Homepage of the dataset for documentation
187
+ homepage=_HOMEPAGE,
188
+ # citation=_CITATION,
189
+ )
190
+
191
+ def _split_generators(self, dl_manager):
192
+ filepaths = [
193
+ f"lichess_db_standard_rated_2023-{k.zfill(2)}.pgn.zst" for k in range(10)
194
+ ]
195
+ downloaded_files = dl_manager.download_and_extract(filepaths)
196
+ generator = datasets.SplitGenerator(
197
+ name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_files}
198
+ )
199
+ return [generator]
200
+
201
+ # return [ # TODO figure out how to do split
202
+ # datasets.SplitGenerator(
203
+ # name=datasets.Split.TRAIN,
204
+ # # These kwargs will be passed to _generate_examples
205
+ # gen_kwargs={
206
+ # "filepath": os.path.join(data_dir, "train.jsonl"),
207
+ # "split": "train",
208
+ # },
209
+ # ),
210
+ # datasets.SplitGenerator(
211
+ # name=datasets.Split.VALIDATION,
212
+ # # These kwargs will be passed to _generate_examples
213
+ # gen_kwargs={
214
+ # "filepath": os.path.join(data_dir, "dev.jsonl"),
215
+ # "split": "dev",
216
+ # },
217
+ # ),
218
+ # datasets.SplitGenerator(
219
+ # name=datasets.Split.TEST,
220
+ # # These kwargs will be passed to _generate_examples
221
+ # gen_kwargs={
222
+ # "filepath": os.path.join(data_dir, "test.jsonl"),
223
+ # "split": "test",
224
+ # },
225
+ # ),
226
+ # ]
227
+
228
+ # # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
229
+ def _generate_examples(self, filepaths):
230
+ """Each worker receives a random set of the .zst files (the raw dataset).
231
+ Each worker will cycle through its set of files. They read a single game
232
+ from file 1, then a single game from file 2, etc. ...
233
+ The purpose is to create batches that contain games from a diverse mix
234
+ of time periods. -> Reduces distribution shift. #? Is this real? Or just for engineering simplicity?
235
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
236
+ """
237
+
238
+ i = 0
239
+ streamers = [iter(StreamingPGNDataset(file)) for file in filepaths]
240
+ game = None
241
+ full_block = ""
242
+
243
+ def get_game():
244
+ if len(streamers) == 0:
245
+ return None
246
+ try:
247
+ game = next(streamers[i % len(streamers)])
248
+ except StopIteration:
249
+ del streamers[i % len(streamers)]
250
+ return get_game()
251
+
252
+ return game
253
+
254
+ while len(streamers) > 0:
255
+ # cycle through the different shards
256
+ if game is not None: # use the previous game that was cut off in the last block
257
+ full_block += f";{game['WhiteElo']} {game['BlackElo']} {game['transcript']}"
258
+
259
+ while len(full_block) < BLOCK_SIZE:
260
+ game = get_game()
261
+ if game is None: continue
262
+
263
+ full_block += f";{game['WhiteElo']} {game['BlackElo']} {game['transcript']}"
264
+
265
+ # add np array
266
+ out = full_block[:BLOCK_SIZE]
267
+ full_block = ""
268
+ yield np.array([TOKENIZER['stoi'][c] for c in out], dtype=np.uint8)
test.pgn.zst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2c0c3d54cc0d99f18a891d5479702b2d409ca05916df329687d10b7a5a3eb04
3
+ size 51200