# This is adapted from the GPT-Neox library import os import struct from functools import lru_cache from itertools import accumulate from pathlib import Path import numpy as np from torch.utils.data import Dataset dtypes = {1: np.uint8, 2: np.int8, 3: np.int16, 4: np.int32, 5: np.int64, 6: np.float32, 7: np.float64, 8: np.uint16} def code(dtype): for k in dtypes: if dtypes[k] == dtype: return k raise ValueError(dtype) def index_file_path(prefix_path): return prefix_path + ".idx" def data_file_path(prefix_path): return prefix_path + ".bin" def _warmup_mmap_file(path): with open(path, "rb") as stream: while stream.read(100 * 1024 * 1024): pass class MMapIndexedDataset(Dataset): class Index: _HDR_MAGIC = b"MMIDIDX\x00\x00" @classmethod def writer(cls, path, dtype): class _Writer: def __enter__(self): self._file = open(path, "wb") # noqa: SIM115 # Write Magic string so we can check the file format then opening it again. self._file.write(cls._HDR_MAGIC) # Write version number # Little endian unsigned 64 Bit integer self._file.write(struct.pack(" dict[str, np.ndarray]: # Get the shuffled index. idx = self.shuffle_idx[idx] # Start and end documents and offsets. doc_index_f = self.sample_idx[idx][0] doc_index_l = self.sample_idx[idx + 1][0] offset_f = self.sample_idx[idx][1] offset_l = self.sample_idx[idx + 1][1] # If we are within the same document, just extract the chunk. if doc_index_f == doc_index_l: sample = self.indexed_dataset.get( self.doc_idx[doc_index_f], offset=offset_f, length=offset_l - offset_f + 1 ) else: # Otherwise, get the rest of the initial document. sample_list = [self.indexed_dataset.get(self.doc_idx[doc_index_f], offset=offset_f)] # Loop over all in between documents and add the entire document. for i in range(doc_index_f + 1, doc_index_l): sample_list.append(self.indexed_dataset.get(self.doc_idx[i])) # And finally add the relevant portion of last document. sample_list.append(self.indexed_dataset.get(self.doc_idx[doc_index_l], length=offset_l + 1)) sample = np.concatenate(sample_list) return {"text": np.array(sample, dtype=np.int64)} def read_dataset(file_path: str | Path, prefix: str, document_path: str | Path = ".") -> GPT2Dataset: # e.g., pile_20B_tokenizer_text_document_train_indexmap_120ns_2048sl_1234s_doc_idx.npy # prefix: pile_20B_tokenizer_text_document_train_indexmap_120ns_2048sl_1234s file_path = Path(file_path) document_path = Path(document_path) doc_idx = np.load(file_path / f"{prefix}_doc_idx.npy", allow_pickle=True, mmap_mode="r") sample_idx = np.load(file_path / f"{prefix}_sample_idx.npy", allow_pickle=True, mmap_mode="r") shuffle_idx = np.load(file_path / f"{prefix}_shuffle_idx.npy", allow_pickle=True, mmap_mode="r") indexed_dataset = MMapIndexedDataset(str(document_path / "pile_20B_tokenizer_text_document"), skip_warmup=True) ds = GPT2Dataset(indexed_dataset=indexed_dataset, doc_idx=doc_idx, sample_idx=sample_idx, shuffle_idx=shuffle_idx) # check seqlen is correct print("Seq length ==", len(ds[0]["text"])) print("Num batches ==", len(ds) / 1024, "(should be 143k)") return ds