File size: 3,329 Bytes
2855028 92dd194 fbb5670 2855028 92dd194 2855028 92dd194 2855028 92dd194 2855028 fbb5670 87ccba2 2855028 92dd194 2855028 92dd194 2855028 fbb5670 2855028 87ccba2 2855028 fbb5670 87ccba2 fbb5670 2855028 87ccba2 fbb5670 87ccba2 2855028 92dd194 2855028 92dd194 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 |
import datasets
import zstandard as zstd
import io
class LichessConfig(datasets.BuilderConfig):
def __init__(self, features, **kwargs):
super(LichessConfig, self).__init__(**kwargs)
self.features = features
class Lichess(datasets.GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = LichessConfig
BUILDER_CONFIGS = [LichessConfig(features=["WhiteElo",
"BlackElo",
"fens",
"moves",
"scores"])]
def _info(self):
features_dict = {feature: datasets.Value("uint16") for feature in self.config.features}
features_dict["fens"] = datasets.Value("null")
features_dict["moves"] = datasets.Value("null")
features_dict["scores"] = datasets.Value("null")
info = datasets.DatasetInfo(datasets.Features(features_dict))
return info
def _get_filepaths(self):
months = ["01", "02", "03", "04", "05", "06",
"07", "08", "09", "10", "11", "12"]
shards = ["0", "1", "2", "3"]
filepaths = ["fen/2023/" + m for m in months]
paths = []
for shard in shards:
paths.extend([filepath + "/" + shard + "_fen.zst" for filepath in filepaths])
return paths
def _split_generators(self, dl_manager: datasets.DownloadManager):
filepaths = self._get_filepaths()
downloaded_files = dl_manager.download(filepaths)
generator = datasets.SplitGenerator(name=datasets.Split.TRAIN,
gen_kwargs={'filepaths': downloaded_files})
return [generator]
def _generate_examples(self, filepaths):
""" Each worker receives a random set of the .zst files (the raw dataset).
Each worker will cycle through its set of files. They read a single game
from file 1, then a single game from file 2, etc. ...
The purpose is to create batches that contain games from a diverse mix
of time periods. -> Reduces distribution shift.
"""
files = [open(filepath, "rb") for filepath in filepaths]
dctxs = [zstd.ZstdDecompressor() for file in files]
stream_readers = [dctx.stream_reader(file) for dctx, file in zip(dctxs, files)]
pgns = [io.TextIOWrapper(sr) for sr in stream_readers]
n = 0
n_files = len(files)
# approximate number of positions per .zst file
n_positions = 2 * 10**6
while n <= n_files * n_positions:
# cycle through the different shards
pgn = pgns[n % n_files]
elos = pgn.readline()
game = pgn.readline()
if game:
white_elo, black_elo = elos.split(" ")
fens, moves, scores = game.split(";")
_id = n
n += 1
yield _id, {"WhiteElo": int(white_elo),
"BlackElo": int(black_elo),
"fens": fens.split(","),
"moves": moves.split(","),
"scores": scores.rstrip("\n").split(",")}
else:
break |