# original dataset: https://huggingface.co/datasets/ezipe/lichess_2023_janoct # you'll need around 6TB or so for this. it'll take around 8 hours or so import os NUM_SPLITS = 32 def SPLIT_CMD(fname, num=32): return f'split -d -n l/{num} {fname} {fname}.' # for each file in dir that ends in zstd: uncompress, save fnames fnames = [] for fname in os.listdir(): if fname.endswith('.zst') and os.path.getsize(fname) > 15072192300: #os.system(f'unzstd -T0 {fname}') fnames.append(fname[:-4]) # split each #for fname in fnames: # os.system(SPLIT_CMD(fname, NUM_SPLITS)) # recompress each for fname in fnames: for i in range(NUM_SPLITS): rm = (f'rm {fname}.{i:02d}.zst') os.system(rm) cm = (f'zstd -T0 {fname}.{i:02d}') print(cm) os.system(cm)