File size: 2,407 Bytes
553c80f
 
 
 
 
 
 
 
 
 
 
 
 
00568c1
553c80f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
"""Module for testing streaming dataset sequence packing"""
import unittest
from functools import partial

import torch
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoTokenizer

from axolotl.utils.collators import PretrainingBatchSamplerDataCollatorForSeq2Seq
from axolotl.utils.data import encode_packed_pretraining


class TestPretrainingPacking(unittest.TestCase):
    """
    Test class for packing streaming dataset sequences
    """

    def setUp(self) -> None:
        # pylint: disable=duplicate-code
        self.tokenizer = AutoTokenizer.from_pretrained("huggyllama/llama-7b")
        self.tokenizer.pad_token = "</s>"
        self.max_seq_length = 2048
        self.batch_size = 2

    def test_packing_stream_dataset(self):
        # pylint: disable=duplicate-code
        dataset = load_dataset(
            "c4",
            "en",
            streaming=True,
        )["train"]

        collate_fn = PretrainingBatchSamplerDataCollatorForSeq2Seq(
            self.tokenizer,
            return_tensors="pt",
            padding=True,
            pad_to_multiple_of=self.max_seq_length,
        )

        encode = partial(
            encode_packed_pretraining,
            self.tokenizer,
            collate_fn,
            max_seq_length=self.max_seq_length,
            batch_size=self.batch_size,
        )

        dataset = dataset.map(
            encode,
            batched=True,
            input_columns="text",
            remove_columns=dataset.features.keys(),
        )

        trainer_loader = DataLoader(
            dataset,
            batch_size=1,
            collate_fn=None,
            drop_last=True,
        )
        idx = 0
        for data in trainer_loader:
            if idx > 10:
                break
            assert data["input_ids"].shape == torch.Size(
                [1, self.batch_size * self.max_seq_length]
            )
            assert data["position_ids"].shape == torch.Size(
                [1, self.batch_size * self.max_seq_length]
            )
            assert data["labels"].shape == torch.Size(
                [1, self.batch_size * self.max_seq_length]
            )
            assert data["attention_mask"].shape == torch.Size(
                [1, self.batch_size * self.max_seq_length]
            )
            idx += 1


if __name__ == "__main__":
    unittest.main()