|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Lichess data in 2023 from Jan-Oct.""" |
|
|
|
|
|
import multiprocessing |
|
import re |
|
import io |
|
|
|
import zstandard |
|
import numpy as np |
|
import datasets |
|
|
|
_DESCRIPTION = """\ |
|
Lichess data in 2023 from Jan-Oct |
|
""" |
|
|
|
|
|
class LichessConfig(datasets.BuilderConfig): |
|
def __init__(self, features, **kwargs): |
|
super(LichessConfig, self).__init__(**kwargs) |
|
self.features = features |
|
|
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/ezipe/lichess-2023-janoct" |
|
|
|
|
|
def process_wrapper(): |
|
vocab = "#+-.0123456789;=BKNOQRabcdefghx " |
|
del_chars = "".join(c for c in map(chr, range(1114111)) if not c in vocab) |
|
del_map = str.maketrans("", "", del_chars) |
|
|
|
def process(game_str): |
|
res = {} |
|
|
|
for g in game_str.split("\n"): |
|
if g.startswith("["): |
|
k, v = g[1:-1].split(' "') |
|
res[k] = v[:-1] |
|
elif g.startswith("1. "): |
|
no_brackets_string = re.sub(r"\{.*?\}", "", g) |
|
no_brackets_string = no_brackets_string.translate(del_map) |
|
remove_dots = re.sub(r"\b\d+\.\.\. ", "", no_brackets_string) |
|
remove_game_result = re.sub(r"1-0|0-1|1/2-1/2", "", remove_dots)[:-2] |
|
remove_spaces = re.sub(r"(\d+)\.\s+", r"\1.", remove_game_result) |
|
remove_double_spaces = re.sub(r" ", r" ", remove_spaces) |
|
res["transcript"] = remove_double_spaces |
|
|
|
return res |
|
|
|
return process |
|
|
|
|
|
class StreamingPGNDataset: |
|
def __init__(self, file_path, transform=None): |
|
self.file_path = file_path |
|
self.transform = transform |
|
self.process = process_wrapper() |
|
|
|
def read_game(self): |
|
dctx = zstandard.ZstdDecompressor() |
|
|
|
with open(self.file_path, "rb") as pgn_file: |
|
stream_reader = dctx.stream_reader(pgn_file) |
|
text_stream = io.TextIOWrapper(stream_reader, encoding="utf-8") |
|
|
|
fg = "" |
|
|
|
for i in text_stream: |
|
fg += i |
|
if i.startswith("1. "): |
|
game = self.process(fg) |
|
fg = "" |
|
yield game |
|
|
|
def __iter__(self): |
|
return self.read_game() |
|
|
|
|
|
TOKENIZER = { |
|
"vocab_size": 32, |
|
"itos": { |
|
0: " ", |
|
1: "#", |
|
2: "+", |
|
3: "-", |
|
4: ".", |
|
5: "0", |
|
6: "1", |
|
7: "2", |
|
8: "3", |
|
9: "4", |
|
10: "5", |
|
11: "6", |
|
12: "7", |
|
13: "8", |
|
14: "9", |
|
15: ";", |
|
16: "=", |
|
17: "B", |
|
18: "K", |
|
19: "N", |
|
20: "O", |
|
21: "Q", |
|
22: "R", |
|
23: "a", |
|
24: "b", |
|
25: "c", |
|
26: "d", |
|
27: "e", |
|
28: "f", |
|
29: "g", |
|
30: "h", |
|
31: "x", |
|
}, |
|
"stoi": { |
|
" ": 0, |
|
"#": 1, |
|
"+": 2, |
|
"-": 3, |
|
".": 4, |
|
"0": 5, |
|
"1": 6, |
|
"2": 7, |
|
"3": 8, |
|
"4": 9, |
|
"5": 10, |
|
"6": 11, |
|
"7": 12, |
|
"8": 13, |
|
"9": 14, |
|
";": 15, |
|
"=": 16, |
|
"B": 17, |
|
"K": 18, |
|
"N": 19, |
|
"O": 20, |
|
"Q": 21, |
|
"R": 22, |
|
"a": 23, |
|
"b": 24, |
|
"c": 25, |
|
"d": 26, |
|
"e": 27, |
|
"f": 28, |
|
"g": 29, |
|
"h": 30, |
|
"x": 31, |
|
}, |
|
} |
|
BLOCK_SIZE = 1024 |
|
|
|
class Lichess2023JanOct(datasets.GeneratorBasedBuilder): |
|
"""Lichess data from Jan-Oct in transformer block format: Similar to https://huggingface.co/datasets/adamkarvonen/chess_games""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
BUILDER_CONFIG_CLASS = LichessConfig |
|
BUILDER_CONFIGS = [LichessConfig(features=["moves"])] |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"moves": datasets.Sequence(datasets.Value("uint8")), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
filepaths = [ |
|
f"data/lichess_db_standard_rated_2023-{k:02}.pgn.{s:02}.zst" for s in range(32) for k in range(1, 11) |
|
] |
|
|
|
|
|
|
|
|
|
|
|
generator = datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, gen_kwargs={"filepaths": filepaths} |
|
) |
|
return [generator] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _generate_examples(self, filepaths): |
|
"""Each worker receives a random set of the .zst files (the raw dataset). |
|
Each worker will cycle through its set of files. They read a single game |
|
from file 1, then a single game from file 2, etc. ... |
|
The purpose is to create batches that contain games from a diverse mix |
|
of time periods. -> Reduces distribution shift. #? Is this real? Or just for engineering simplicity? |
|
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. |
|
""" |
|
|
|
i = 0 |
|
streamers = [iter(StreamingPGNDataset(file)) for file in filepaths] |
|
game = None |
|
full_block = "" |
|
|
|
def get_game(): |
|
if len(streamers) == 0: |
|
return None |
|
try: |
|
game = next(streamers[i % len(streamers)]) |
|
except StopIteration: |
|
del streamers[i % len(streamers)] |
|
return get_game() |
|
|
|
return game |
|
|
|
while len(streamers) > 0: |
|
|
|
if game is not None: |
|
full_block += f";{game['WhiteElo']} {game['BlackElo']} {game['transcript']}" |
|
|
|
while len(full_block) < BLOCK_SIZE: |
|
game = get_game() |
|
if game is None: continue |
|
|
|
full_block += f";{game['WhiteElo']} {game['BlackElo']} {game['transcript']}" |
|
|
|
|
|
out = full_block[:BLOCK_SIZE] |
|
full_block = "" |
|
_id = i |
|
i += 1 |
|
yield _id, {'moves': np.array([TOKENIZER['stoi'][c] for c in out], dtype=np.uint8)} |
|
|
|
|
|
if __name__ == '__main__': |
|
dataset = datasets.load_dataset("/mnt/data/lichess_2023_janoct_shards", streaming=True) |
|
k = iter(dataset['train']) |
|
print(next(k)) |
|
print(next(k)) |
|
print(next(k)) |
|
print(next(k)) |
|
|
|
|