# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. from __future__ import absolute_import, division, print_function import argparse import glob import logging import os import pickle import random import re import gc import shutil import json import numpy as np import torch from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset from torch.utils.data.distributed import DistributedSampler from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup, BertConfig, BertForMaskedLM, BertTokenizer, GPT2Config, GPT2LMHeadModel, GPT2Tokenizer, OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer) class TextDataset(Dataset): def __init__(self, tokenizer, args, logger, file_type='train', block_size=1024): if args.local_rank==-1: local_rank=0 world_size=1 else: local_rank=args.local_rank world_size=torch.distributed.get_world_size() if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) cached_file = os.path.join(args.output_dir, file_type+"_langs_%s"%(args.langs)+"_blocksize_%d"%(block_size)+"_wordsize_%d"%(world_size)+"_rank_%d"%(local_rank)) if os.path.exists(cached_file) and not args.overwrite_cache: if file_type == 'train': logger.warning("Loading features from cached file %s", cached_file) with open(cached_file, 'rb') as handle: self.inputs = pickle.load(handle) else: self.inputs = [] if args.langs == 'all': langs = os.listdir(args.data_dir) else: langs = [args.langs] data=[] for lang in langs: datafile = os.path.join(args.data_dir, lang, file_type+'.pkl') if file_type == 'train': logger.warning("Creating features from dataset file at %s", datafile) # with open(datafile) as f: # data.extend([json.loads(x)['code'] for idx,x in enumerate(f.readlines()) if idx%world_size==local_rank]) dataset = pickle.load(open(datafile, 'rb')) data.extend([' '+' '.join(x['function'].split())+' ' for idx,x in enumerate(dataset) if idx%world_size==local_rank]) # random.shuffle(data) data = data length = len(data) logger.warning("Data size: %d"%(length)) input_ids = [] for idx,x in enumerate(data): try: input_ids.extend(tokenizer.encode(x)) except Exception: pass if idx % (length//10) == 0: percent = idx / (length//10) * 10 logger.warning("Rank %d, load %d"%(local_rank, percent)) del data gc.collect() length = len(input_ids) for i in range(0, length-block_size, block_size): self.inputs.append(input_ids[i : i + block_size]) del input_ids gc.collect() if file_type == 'train': logger.warning("Rank %d Training %d token, %d samples"%(local_rank, length, len(self.inputs))) logger.warning("Saving features into cached file %s", cached_file) with open(cached_file, 'wb') as handle: pickle.dump(self.inputs, handle, protocol=pickle.HIGHEST_PROTOCOL) def __len__(self): return len(self.inputs) def __getitem__(self, item): return torch.tensor(self.inputs[item]) class finetuneDataset(Dataset): def __init__(self, tokenizer, args, logger, file_type='train', block_size=1024): if args.local_rank==-1: local_rank=0 world_size=1 else: local_rank=args.local_rank world_size=torch.distributed.get_world_size() if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) cached_file = os.path.join(args.output_dir, file_type+"_blocksize_%d"%(block_size)+"_wordsize_%d"%(world_size)+"_rank_%d"%(local_rank)) if os.path.exists(cached_file) and not args.overwrite_cache: if file_type == 'train': logger.warning("Loading features from cached file %s", cached_file) with open(cached_file, 'rb') as handle: self.inputs = pickle.load(handle) else: self.inputs = [] datafile = os.path.join(args.data_dir, f"{file_type}.txt") if file_type == 'train': logger.warning("Creating features from dataset file at %s", datafile) with open(datafile) as f: data = f.readlines() length = len(data) logger.info("Data size: %d"%(length)) input_ids = [] for idx,x in enumerate(data): x = x.strip() if x.startswith("") and x.endswith(""): pass else: x = " " + x + " " try: input_ids.extend(tokenizer.encode(x)) except Exception: pass if idx % (length//10) == 0: percent = idx / (length//10) * 10 logger.warning("Rank %d, load %d"%(local_rank, percent)) del data gc.collect() length = len(input_ids) // world_size logger.info(f"tokens: {length*world_size}") input_ids = input_ids[local_rank*length: (local_rank+1)*length] for i in range(0, length-block_size, block_size): self.inputs.append(input_ids[i : i + block_size]) del input_ids gc.collect() if file_type == 'train': logger.warning("Rank %d Training %d token, %d samples"%(local_rank, length, len(self.inputs))) logger.warning("Saving features into cached file %s", cached_file) with open(cached_file, 'wb') as handle: pickle.dump(self.inputs, handle, protocol=pickle.HIGHEST_PROTOCOL) def __len__(self): return len(self.inputs) def __getitem__(self, item): return torch.tensor(self.inputs[item]) class EvalDataset(Dataset): def __init__(self, tokenizer, args, logger, file_type='train', block_size=1024): if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) cached_file = os.path.join(args.output_dir, file_type+"_blocksize_%d"%(block_size)) if os.path.exists(cached_file) and not args.overwrite_cache: with open(cached_file, 'rb') as handle: self.inputs = pickle.load(handle) else: self.inputs = [] datafile = os.path.join(args.data_dir, f"{file_type}.txt") with open(datafile) as f: data = f.readlines() length = len(data) logger.info("Data size: %d"%(length)) input_ids = [] for idx,x in enumerate(data): x = x.strip() if x.startswith("") and x.endswith(""): pass else: x = " " + x + " " try: input_ids.extend(tokenizer.encode(x)) except Exception: pass if idx % (length//10) == 0: percent = idx / (length//10) * 10 logger.warning("load %d"%(percent)) del data gc.collect() logger.info(f"tokens: {len(input_ids)}") self.split(input_ids, tokenizer, logger, block_size=block_size) del input_ids gc.collect() with open(cached_file, 'wb') as handle: pickle.dump(self.inputs, handle, protocol=pickle.HIGHEST_PROTOCOL) def split(self, input_ids, tokenizer, logger, block_size=1024): sample = [] i = 0 while i < len(input_ids): sample = input_ids[i: i+block_size] if len(sample) == block_size: for j in range(block_size): if tokenizer.convert_ids_to_tokens(sample[block_size-1-j])[0] == '\u0120' or tokenizer.convert_ids_to_tokens(sample[block_size-1-j]).startswith("