lichess_2021_to_2024_shards / lichess_2023_janoct_shards.py
ezhang7423
add findings
6c72399
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lichess data in 2023 from Jan-Oct."""
import multiprocessing
import re
import io
import zstandard
import numpy as np
import datasets
_DESCRIPTION = """\
Lichess data in 2023 from Jan-Oct
"""
class LichessConfig(datasets.BuilderConfig):
def __init__(self, features, **kwargs):
super(LichessConfig, self).__init__(**kwargs)
self.features = features
_HOMEPAGE = "https://huggingface.co/datasets/ezipe/lichess-2023-janoct"
def process_wrapper():
vocab = "#+-.0123456789;=BKNOQRabcdefghx "
del_chars = "".join(c for c in map(chr, range(1114111)) if not c in vocab)
del_map = str.maketrans("", "", del_chars)
def process(game_str):
res = {}
for g in game_str.split("\n"):
if g.startswith("["):
k, v = g[1:-1].split(' "')
res[k] = v[:-1]
elif g.startswith("1. "):
no_brackets_string = re.sub(r"\{.*?\}", "", g) # , flags=re.DOTALL
no_brackets_string = no_brackets_string.translate(del_map)
remove_dots = re.sub(r"\b\d+\.\.\. ", "", no_brackets_string)
remove_game_result = re.sub(r"1-0|0-1|1/2-1/2", "", remove_dots)[:-2]
remove_spaces = re.sub(r"(\d+)\.\s+", r"\1.", remove_game_result)
remove_double_spaces = re.sub(r" ", r" ", remove_spaces)
res["transcript"] = remove_double_spaces
return res
return process
class StreamingPGNDataset:
def __init__(self, file_path, transform=None):
self.file_path = file_path
self.transform = transform
self.process = process_wrapper()
def read_game(self):
dctx = zstandard.ZstdDecompressor()
# with open(self.file_path.get_origin(), "rb") as pgn_file:
with open(self.file_path, "rb") as pgn_file:
stream_reader = dctx.stream_reader(pgn_file)
text_stream = io.TextIOWrapper(stream_reader, encoding="utf-8")
fg = ""
# while True:
for i in text_stream:
fg += i
if i.startswith("1. "):
game = self.process(fg)
fg = ""
yield game
def __iter__(self):
return self.read_game()
TOKENIZER = {
"vocab_size": 32,
"itos": {
0: " ",
1: "#",
2: "+",
3: "-",
4: ".",
5: "0",
6: "1",
7: "2",
8: "3",
9: "4",
10: "5",
11: "6",
12: "7",
13: "8",
14: "9",
15: ";",
16: "=",
17: "B",
18: "K",
19: "N",
20: "O",
21: "Q",
22: "R",
23: "a",
24: "b",
25: "c",
26: "d",
27: "e",
28: "f",
29: "g",
30: "h",
31: "x",
},
"stoi": {
" ": 0,
"#": 1,
"+": 2,
"-": 3,
".": 4,
"0": 5,
"1": 6,
"2": 7,
"3": 8,
"4": 9,
"5": 10,
"6": 11,
"7": 12,
"8": 13,
"9": 14,
";": 15,
"=": 16,
"B": 17,
"K": 18,
"N": 19,
"O": 20,
"Q": 21,
"R": 22,
"a": 23,
"b": 24,
"c": 25,
"d": 26,
"e": 27,
"f": 28,
"g": 29,
"h": 30,
"x": 31,
},
}
BLOCK_SIZE = 1024
class Lichess2023JanOct(datasets.GeneratorBasedBuilder):
"""Lichess data from Jan-Oct in transformer block format: Similar to https://huggingface.co/datasets/adamkarvonen/chess_games"""
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIG_CLASS = LichessConfig
BUILDER_CONFIGS = [LichessConfig(features=["moves"])]
def _info(self):
features = datasets.Features(
{
"moves": datasets.Sequence(datasets.Value("uint8")),
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
# supervised_keys=("sentence", "label"),
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# citation=_CITATION,
)
def _split_generators(self, dl_manager):
filepaths = [
f"data/lichess_db_standard_rated_2023-{k:02}.pgn.{s:02}.zst" for s in range(32) for k in range(1, 11)
]
# filepaths = [
# f"data/lichess_db_standard_rated_2023-{k:02}.pgn.{s:02}.zst" for s in range(2) for k in range(1, 2)
# ]
# downloaded_files = dl_manager.download_and_extract(filepaths)
generator = datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"filepaths": filepaths}
)
return [generator]
# return [ # TODO figure out how to do split
# datasets.SplitGenerator(
# name=datasets.Split.TRAIN,
# # These kwargs will be passed to _generate_examples
# gen_kwargs={
# "filepath": os.path.join(data_dir, "train.jsonl"),
# "split": "train",
# },
# ),
# datasets.SplitGenerator(
# name=datasets.Split.VALIDATION,
# # These kwargs will be passed to _generate_examples
# gen_kwargs={
# "filepath": os.path.join(data_dir, "dev.jsonl"),
# "split": "dev",
# },
# ),
# datasets.SplitGenerator(
# name=datasets.Split.TEST,
# # These kwargs will be passed to _generate_examples
# gen_kwargs={
# "filepath": os.path.join(data_dir, "test.jsonl"),
# "split": "test",
# },
# ),
# ]
# # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, filepaths):
"""Each worker receives a random set of the .zst files (the raw dataset).
Each worker will cycle through its set of files. They read a single game
from file 1, then a single game from file 2, etc. ...
The purpose is to create batches that contain games from a diverse mix
of time periods. -> Reduces distribution shift. #? Is this real? Or just for engineering simplicity?
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
"""
i = 0
streamers = [iter(StreamingPGNDataset(file)) for file in filepaths]
game = None
full_block = ""
def get_game():
if len(streamers) == 0:
return None
try:
game = next(streamers[i % len(streamers)])
except StopIteration:
del streamers[i % len(streamers)]
return get_game()
return game
while len(streamers) > 0:
# cycle through the different shards
if game is not None: # use the previous game that was cut off in the last block
full_block += f";{game['WhiteElo']} {game['BlackElo']} {game['transcript']}"
while len(full_block) < BLOCK_SIZE:
game = get_game()
if game is None: continue
full_block += f";{game['WhiteElo']} {game['BlackElo']} {game['transcript']}"
# add np array
out = full_block[:BLOCK_SIZE]
full_block = ""
_id = i
i += 1
yield _id, {'moves': np.array([TOKENIZER['stoi'][c] for c in out], dtype=np.uint8)}
if __name__ == '__main__':
dataset = datasets.load_dataset("/mnt/data/lichess_2023_janoct_shards", streaming=True)
k = iter(dataset['train'])
print(next(k))
print(next(k))
print(next(k))
print(next(k))