Codex_Prime / src /data_loader.py
dnnsdunca's picture
Create src/data_loader.py
be0843c verified
raw
history blame
853 Bytes
from datasets import load_dataset
from torch.utils.data import DataLoader
def get_dataloader(config, tokenizer, split='train'):
dataset = load_dataset("code_search_net", "python", split=split)
def tokenize_function(examples):
return tokenizer(examples['whole_func_string'], truncation=True, padding='max_length', max_length=config['model']['max_length'])
tokenized_dataset = dataset.map(tokenize_function, batched=True)
tokenized_dataset = tokenized_dataset.remove_columns(['repo', 'path', 'func_name', 'whole_func_string', 'language', 'func_code_string', 'func_code_tokens', 'func_documentation_string', 'func_documentation_tokens', 'split_name', 'func_code_url'])
tokenized_dataset.set_format("torch")
return DataLoader(tokenized_dataset, batch_size=config['training']['batch_size'], shuffle=(split == 'train'))