diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..28df5f900b358436f0267334b3e3e9af33f917ba --- /dev/null +++ b/.gitattributes @@ -0,0 +1,55 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.lz4 filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text +# Audio files - uncompressed +*.pcm filter=lfs diff=lfs merge=lfs -text +*.sam filter=lfs diff=lfs merge=lfs -text +*.raw filter=lfs diff=lfs merge=lfs -text +# Audio files - compressed +*.aac filter=lfs diff=lfs merge=lfs -text +*.flac filter=lfs diff=lfs merge=lfs -text +*.mp3 filter=lfs diff=lfs merge=lfs -text +*.ogg filter=lfs diff=lfs merge=lfs -text +*.wav filter=lfs diff=lfs merge=lfs -text +# Image files - uncompressed +*.bmp filter=lfs diff=lfs merge=lfs -text +*.gif filter=lfs diff=lfs merge=lfs -text +*.png filter=lfs diff=lfs merge=lfs -text +*.tiff filter=lfs diff=lfs merge=lfs -text +# Image files - compressed +*.jpg filter=lfs diff=lfs merge=lfs -text +*.jpeg filter=lfs diff=lfs merge=lfs -text +*.webp filter=lfs diff=lfs merge=lfs -text diff --git a/Code-Code/Clone-detection-BigCloneBench/code/eval.sh b/Code-Code/Clone-detection-BigCloneBench/code/eval.sh new file mode 100644 index 0000000000000000000000000000000000000000..2527ebf2480c652ff550e728c1fd60f77360cdb6 --- /dev/null +++ b/Code-Code/Clone-detection-BigCloneBench/code/eval.sh @@ -0,0 +1,19 @@ +CUDA_VISIBLE_DEVICES=0,1 python run.py \ + --output_dir=../model \ + --model_type=roberta \ + --config_name=microsoft/codebert-base \ + --model_name_or_path=microsoft/codebert-base \ + --tokenizer_name=roberta-base \ + --do_eval \ + --do_test \ + --train_data_file=../dataset/train.txt \ + --eval_data_file=../dataset/valid.txt \ + --test_data_file=../dataset/valid.txt \ + --epoch 2 \ + --block_size 400 \ + --train_batch_size 16 \ + --eval_batch_size 32 \ + --learning_rate 5e-5 \ + --max_grad_norm 1.0 \ + --evaluate_during_training \ + --seed 123456 \ No newline at end of file diff --git a/Code-Code/Clone-detection-BigCloneBench/code/evaluate.sh b/Code-Code/Clone-detection-BigCloneBench/code/evaluate.sh new file mode 100644 index 0000000000000000000000000000000000000000..397b8875cc487f399cfd6ed0eaab928f2e54b0ed --- /dev/null +++ b/Code-Code/Clone-detection-BigCloneBench/code/evaluate.sh @@ -0,0 +1,3 @@ +python evaluator.py \ + -a ../dataset/valid.txt \ + -p ../model/predictions.txt \ No newline at end of file diff --git a/Code-Code/Clone-detection-BigCloneBench/code/evaluator.py b/Code-Code/Clone-detection-BigCloneBench/code/evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..041db72784671ac496f1d8d6d9e7b1d136ec6870 --- /dev/null +++ b/Code-Code/Clone-detection-BigCloneBench/code/evaluator.py @@ -0,0 +1,53 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. +import logging +import sys +from sklearn.metrics import recall_score,precision_score,f1_score + +def read_answers(filename): + answers={} + with open(filename) as f: + for line in f: + line=line.strip() + idx1,idx2,label=line.split() + answers[(idx1,idx2)]=int(label) + return answers + +def read_predictions(filename): + predictions={} + with open(filename) as f: + for line in f: + line=line.strip() + idx1,idx2,label=line.split() + predictions[(idx1,idx2)]=int(label) + return predictions + +def calculate_scores(answers,predictions): + y_trues,y_preds=[],[] + for key in answers: + if key not in predictions: + logging.error("Missing prediction for ({},{}) pair.".format(key[0],key[1])) + sys.exit() + y_trues.append(answers[key]) + y_preds.append(predictions[key]) + scores={} + scores['Recall']=recall_score(y_trues, y_preds) + scores['Precision']=precision_score(y_trues, y_preds) + scores['F1']=f1_score(y_trues, y_preds) + return scores + +def main(): + import argparse + parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for BigCloneBench dataset.') + parser.add_argument('--answers', '-a',help="filename of the labels, in txt format.") + parser.add_argument('--predictions', '-p',help="filename of the leaderboard predictions, in txt format.") + + + args = parser.parse_args() + answers=read_answers(args.answers) + predictions=read_predictions(args.predictions) + scores=calculate_scores(answers,predictions) + print(scores) + +if __name__ == '__main__': + main() diff --git a/Code-Code/Clone-detection-BigCloneBench/code/model.py b/Code-Code/Clone-detection-BigCloneBench/code/model.py new file mode 100644 index 0000000000000000000000000000000000000000..e48296c60a941d39954c3dfe1066887315b9a1b2 --- /dev/null +++ b/Code-Code/Clone-detection-BigCloneBench/code/model.py @@ -0,0 +1,62 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. +import torch +import torch.nn as nn +import torch +from torch.autograd import Variable +import copy +import torch.nn.functional as F +from torch.nn import CrossEntropyLoss, MSELoss + +class RobertaClassificationHead(nn.Module): + """Head for sentence-level classification tasks.""" + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size*2, config.hidden_size) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.out_proj = nn.Linear(config.hidden_size, 2) + + def forward(self, features, **kwargs): + x = features[:, 0, :] # take token (equiv. to [CLS]) + x = x.reshape(-1,x.size(-1)*2) + x = self.dropout(x) + x = self.dense(x) + x = torch.tanh(x) + x = self.dropout(x) + x = self.out_proj(x) + return x + +class Model(nn.Module): + def __init__(self, encoder,config,tokenizer,args): + super(Model, self).__init__() + self.encoder = encoder + self.config=config + self.tokenizer=tokenizer + self.classifier=RobertaClassificationHead(config) + self.args=args + + + def forward(self, input_ids=None,labels=None, return_vec=None): + input_ids=input_ids.view(-1,self.args.block_size) + outputs = self.encoder(input_ids= input_ids,attention_mask=input_ids.ne(1)) + + if return_vec: + return outputs.pooler_output + + outputs = outputs[0] + logits=self.classifier(outputs) + prob=F.softmax(logits) + + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits, labels) + return loss,prob + else: + return prob + + + + + + diff --git a/Code-Code/Clone-detection-BigCloneBench/code/run.py b/Code-Code/Clone-detection-BigCloneBench/code/run.py new file mode 100644 index 0000000000000000000000000000000000000000..202b40e8fb0854b2e26f814c6dd2c5ab66993eed --- /dev/null +++ b/Code-Code/Clone-detection-BigCloneBench/code/run.py @@ -0,0 +1,649 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa). +GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned +using a masked language modeling (MLM) loss. +""" + +from __future__ import absolute_import, division, print_function + +import argparse +import glob +import logging +import os +import pickle +import random +import re +import shutil +import json +import numpy as np +import torch +from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset +from torch.utils.data.distributed import DistributedSampler + +try: + from torch.utils.tensorboard import SummaryWriter +except: + from tensorboardX import SummaryWriter + +from tqdm import tqdm, trange +import multiprocessing +from model import Model + +cpu_cont = 16 +from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup, + BertConfig, BertForMaskedLM, BertTokenizer, + GPT2Config, GPT2LMHeadModel, GPT2Tokenizer, + OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, + RobertaConfig, RobertaModel, RobertaTokenizer, + DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer) + +logger = logging.getLogger(__name__) + +MODEL_CLASSES = { + 'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer), + 'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer), + 'bert': (BertConfig, BertForMaskedLM, BertTokenizer), + 'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer), + 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer) +} + +def get_example(item): + url1,url2,label,tokenizer,args,cache,url_to_code=item + if url1 in cache: + code1=cache[url1].copy() + else: + try: + code=' '.join(url_to_code[url1].split()) + except: + code="" + code1=tokenizer.tokenize(code) + if url2 in cache: + code2=cache[url2].copy() + else: + try: + code=' '.join(url_to_code[url2].split()) + except: + code="" + code2=tokenizer.tokenize(code) + + return convert_examples_to_features(code1,code2,label,url1,url2,tokenizer,args,cache) + + +class InputFeatures(object): + """A single training/test features for a example.""" + def __init__(self, + input_tokens, + input_ids, + label, + url1, + url2 + + ): + self.input_tokens = input_tokens + self.input_ids = input_ids + self.label=label + self.url1=url1 + self.url2=url2 + +def convert_examples_to_features(code1_tokens,code2_tokens,label,url1,url2,tokenizer,args,cache): + #source + code1_tokens=code1_tokens[:args.block_size-2] + code1_tokens =[tokenizer.cls_token]+code1_tokens+[tokenizer.sep_token] + code2_tokens=code2_tokens[:args.block_size-2] + code2_tokens =[tokenizer.cls_token]+code2_tokens+[tokenizer.sep_token] + + code1_ids=tokenizer.convert_tokens_to_ids(code1_tokens) + padding_length = args.block_size - len(code1_ids) + code1_ids+=[tokenizer.pad_token_id]*padding_length + + code2_ids=tokenizer.convert_tokens_to_ids(code2_tokens) + padding_length = args.block_size - len(code2_ids) + code2_ids+=[tokenizer.pad_token_id]*padding_length + + source_tokens=code1_tokens+code2_tokens + source_ids=code1_ids+code2_ids + return InputFeatures(source_tokens,source_ids,label,url1,url2) + +class TextDataset(Dataset): + def __init__(self, tokenizer, args, file_path='train', block_size=512,pool=None): + postfix=file_path.split('/')[-1].split('.txt')[0] + self.examples = [] + index_filename=file_path + logger.info("Creating features from index file at %s ", index_filename) + url_to_code={} + with open('/'.join(index_filename.split('/')[:-1])+'/data.jsonl') as f: + for line in f: + line=line.strip() + js=json.loads(line) + url_to_code[js['idx']]=js['func'] + + data=[] + cache={} + f=open(index_filename) + with open(index_filename) as f: + for line in f: + line=line.strip() + url1,url2,label=line.split('\t') + if url1 not in url_to_code or url2 not in url_to_code: + continue + if label=='0': + label=0 + else: + label=1 + data.append((url1,url2,label,tokenizer, args,cache,url_to_code)) + if 'test' not in postfix: + data=random.sample(data,int(len(data)*0.1)) + + self.examples=pool.map(get_example,tqdm(data,total=len(data))) + if 'train' in postfix: + for idx, example in enumerate(self.examples[:3]): + logger.info("*** Example ***") + logger.info("idx: {}".format(idx)) + logger.info("label: {}".format(example.label)) + logger.info("input_tokens: {}".format([x.replace('\u0120','_') for x in example.input_tokens])) + logger.info("input_ids: {}".format(' '.join(map(str, example.input_ids)))) + + + + def __len__(self): + return len(self.examples) + + def __getitem__(self, item): + + return torch.tensor(self.examples[item].input_ids),torch.tensor(self.examples[item].label) + + +def load_and_cache_examples(args, tokenizer, evaluate=False,test=False,pool=None): + dataset = TextDataset(tokenizer, args, file_path=args.test_data_file if test else (args.eval_data_file if evaluate else args.train_data_file),block_size=args.block_size,pool=pool) + return dataset + +def set_seed(seed=42): + random.seed(seed) + os.environ['PYHTONHASHSEED'] = str(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.backends.cudnn.deterministic = True + +def train(args, train_dataset, model, tokenizer,pool): + """ Train the model """ + + args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) + train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) + + train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) + args.max_steps=args.epoch*len( train_dataloader) + args.save_steps=len( train_dataloader) + args.warmup_steps=len( train_dataloader) + args.logging_steps=len( train_dataloader) + args.num_train_epochs=args.epoch + model.to(args.device) + # Prepare optimizer and schedule (linear warmup and decay) + no_decay = ['bias', 'LayerNorm.weight'] + optimizer_grouped_parameters = [ + {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], + 'weight_decay': args.weight_decay}, + {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} + ] + optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) + scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, + num_training_steps=args.max_steps) + if args.fp16: + try: + from apex import amp + except ImportError: + raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") + model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) + + # multi-gpu training (should be after apex fp16 initialization) + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + # Distributed training (should be after apex fp16 initialization) + if args.local_rank != -1: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], + output_device=args.local_rank, + find_unused_parameters=True) + + checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last') + scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt') + optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt') + if os.path.exists(scheduler_last): + scheduler.load_state_dict(torch.load(scheduler_last)) + if os.path.exists(optimizer_last): + optimizer.load_state_dict(torch.load(optimizer_last)) + # Train! + logger.info("***** Running training *****") + logger.info(" Num examples = %d", len(train_dataset)) + logger.info(" Num Epochs = %d", args.num_train_epochs) + logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) + logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", + args.train_batch_size * args.gradient_accumulation_steps * ( + torch.distributed.get_world_size() if args.local_rank != -1 else 1)) + logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) + logger.info(" Total optimization steps = %d", args.max_steps) + + global_step = args.start_step + tr_loss, logging_loss,avg_loss,tr_nb,tr_num,train_loss = 0.0, 0.0,0.0,0,0,0 + best_mrr=0.0 + best_f1=0 + # model.resize_token_embeddings(len(tokenizer)) + model.zero_grad() + set_seed(args.seed) # Added here for reproducibility (even between python 2 and 3) + + for idx in range(args.start_epoch, int(args.num_train_epochs)): + bar = tqdm(train_dataloader,total=len(train_dataloader)) + tr_num=0 + train_loss=0 + for step, batch in enumerate(bar): + inputs = batch[0].to(args.device) + labels=batch[1].to(args.device) + model.train() + loss,logits = model(inputs,labels) + + + if args.n_gpu > 1: + loss = loss.mean() # mean() to average on multi-gpu parallel training + if args.gradient_accumulation_steps > 1: + loss = loss / args.gradient_accumulation_steps + + if args.fp16: + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) + else: + loss.backward() + torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) + + tr_loss += loss.item() + tr_num+=1 + train_loss+=loss.item() + if avg_loss==0: + avg_loss=tr_loss + avg_loss=round(train_loss/tr_num,5) + bar.set_description("epoch {} loss {}".format(idx,avg_loss)) + + + if (step + 1) % args.gradient_accumulation_steps == 0: + optimizer.step() + optimizer.zero_grad() + scheduler.step() + global_step += 1 + output_flag=True + avg_loss=round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4) + if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: + logging_loss = tr_loss + tr_nb=global_step + + if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: + + if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well + results = evaluate(args, model, tokenizer,pool=pool,eval_when_training=True) + # Save model checkpoint + + if results['eval_f1']>best_f1: + best_f1=results['eval_f1'] + logger.info(" "+"*"*20) + logger.info(" Best f1:%s",round(best_f1,4)) + logger.info(" "+"*"*20) + + checkpoint_prefix = 'checkpoint-best-f1' + output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix)) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + model_to_save = model.module if hasattr(model,'module') else model + output_dir = os.path.join(output_dir, '{}'.format('model.bin')) + torch.save(model_to_save.state_dict(), output_dir) + logger.info("Saving model checkpoint to %s", output_dir) + + # 每一轮记录checkpoint + output_dir = os.path.join(args.output_dir, 'epoch_{}'.format(idx+1)) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + model_to_save = model.module if hasattr(model, 'module') else model + ckpt_output_path = os.path.join(output_dir, 'subject_model.pth') + logger.info("Saving model checkpoint to %s", ckpt_output_path) + torch.save(model_to_save.state_dict(), ckpt_output_path) + + if args.max_steps > 0 and global_step > args.max_steps: + train_iterator.close() + break + return global_step, tr_loss / global_step + + +def evaluate(args, model, tokenizer, prefix="",pool=None,eval_when_training=False): + # Loop to handle MNLI double evaluation (matched, mis-matched) + eval_output_dir = args.output_dir + eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True,pool=pool) + if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: + os.makedirs(eval_output_dir) + + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + # Note that DistributedSampler samples randomly + eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) + eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=4,pin_memory=True) + + # multi-gpu evaluate + if args.n_gpu > 1 and eval_when_training is False: + model = torch.nn.DataParallel(model) + + # Eval! + logger.info("***** Running evaluation {} *****".format(prefix)) + logger.info(" Num examples = %d", len(eval_dataset)) + logger.info(" Batch size = %d", args.eval_batch_size) + eval_loss = 0.0 + nb_eval_steps = 0 + model.eval() + logits=[] + y_trues=[] + for batch in eval_dataloader: + inputs = batch[0].to(args.device) + labels=batch[1].to(args.device) + with torch.no_grad(): + lm_loss,logit = model(inputs,labels) + eval_loss += lm_loss.mean().item() + logits.append(logit.cpu().numpy()) + y_trues.append(labels.cpu().numpy()) + nb_eval_steps += 1 + logits=np.concatenate(logits,0) + y_trues=np.concatenate(y_trues,0) + best_threshold=0 + best_f1=0 + for i in range(1,100): + threshold=i/100 + y_preds=logits[:,1]>threshold + from sklearn.metrics import recall_score + recall=recall_score(y_trues, y_preds) + from sklearn.metrics import precision_score + precision=precision_score(y_trues, y_preds) + from sklearn.metrics import f1_score + f1=f1_score(y_trues, y_preds) + if f1>best_f1: + best_f1=f1 + best_threshold=threshold + + y_preds=logits[:,1]>best_threshold + from sklearn.metrics import recall_score + recall=recall_score(y_trues, y_preds) + from sklearn.metrics import precision_score + precision=precision_score(y_trues, y_preds) + from sklearn.metrics import f1_score + f1=f1_score(y_trues, y_preds) + result = { + "eval_recall": float(recall), + "eval_precision": float(precision), + "eval_f1": float(f1), + "eval_threshold":best_threshold, + + } + + logger.info("***** Eval results {} *****".format(prefix)) + for key in sorted(result.keys()): + logger.info(" %s = %s", key, str(round(result[key],4))) + + return result + +def test(args, model, tokenizer, prefix="",pool=None,best_threshold=0): + # Loop to handle MNLI double evaluation (matched, mis-matched) + eval_dataset = load_and_cache_examples(args, tokenizer, test=True,pool=pool) + + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + # Note that DistributedSampler samples randomly + eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) + eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=4,pin_memory=True) + + # multi-gpu evaluate + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + # Eval! + logger.info("***** Running Test {} *****".format(prefix)) + logger.info(" Num examples = %d", len(eval_dataset)) + logger.info(" Batch size = %d", args.eval_batch_size) + eval_loss = 0.0 + nb_eval_steps = 0 + model.eval() + logits=[] + y_trues=[] + for batch in eval_dataloader: + inputs = batch[0].to(args.device) + labels=batch[1].to(args.device) + with torch.no_grad(): + lm_loss,logit = model(inputs,labels) + eval_loss += lm_loss.mean().item() + logits.append(logit.cpu().numpy()) + y_trues.append(labels.cpu().numpy()) + nb_eval_steps += 1 + logits=np.concatenate(logits,0) + y_preds=logits[:,1]>best_threshold + with open(os.path.join(args.output_dir,"predictions.txt"),'w') as f: + for example,pred in zip(eval_dataset.examples,y_preds): + if pred: + f.write(example.url1+'\t'+example.url2+'\t'+'1'+'\n') + else: + f.write(example.url1+'\t'+example.url2+'\t'+'0'+'\n') + +def main(): + parser = argparse.ArgumentParser() + + ## Required parameters + parser.add_argument("--train_data_file", default=None, type=str, required=True, + help="The input training data file (a text file).") + parser.add_argument("--output_dir", default=None, type=str, required=True, + help="The output directory where the model predictions and checkpoints will be written.") + + ## Other parameters + parser.add_argument("--eval_data_file", default=None, type=str, + help="An optional input evaluation data file to evaluate the perplexity on (a text file).") + parser.add_argument("--test_data_file", default=None, type=str, + help="An optional input evaluation data file to evaluate the perplexity on (a text file).") + + parser.add_argument("--model_type", default="bert", type=str, + help="The model architecture to be fine-tuned.") + parser.add_argument("--model_name_or_path", default=None, type=str, + help="The model checkpoint for weights initialization.") + + parser.add_argument("--mlm", action='store_true', + help="Train with masked-language modeling loss instead of language modeling.") + parser.add_argument("--mlm_probability", type=float, default=0.15, + help="Ratio of tokens to mask for masked language modeling loss") + + parser.add_argument("--config_name", default="", type=str, + help="Optional pretrained config name or path if not the same as model_name_or_path") + parser.add_argument("--tokenizer_name", default="", type=str, + help="Optional pretrained tokenizer name or path if not the same as model_name_or_path") + parser.add_argument("--cache_dir", default="", type=str, + help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)") + parser.add_argument("--block_size", default=-1, type=int, + help="Optional input sequence length after tokenization." + "The training dataset will be truncated in block of this size for training." + "Default to the model max input length for single sentence inputs (take into account special tokens).") + parser.add_argument("--do_train", action='store_true', + help="Whether to run training.") + parser.add_argument("--do_eval", action='store_true', + help="Whether to run eval on the dev set.") + parser.add_argument("--do_test", action='store_true', + help="Whether to run eval on the dev set.") + parser.add_argument("--evaluate_during_training", action='store_true', + help="Run evaluation during training at each logging step.") + parser.add_argument("--do_lower_case", action='store_true', + help="Set this flag if you are using an uncased model.") + + parser.add_argument("--train_batch_size", default=4, type=int, + help="Batch size per GPU/CPU for training.") + parser.add_argument("--eval_batch_size", default=4, type=int, + help="Batch size per GPU/CPU for evaluation.") + parser.add_argument('--gradient_accumulation_steps', type=int, default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.") + parser.add_argument("--learning_rate", default=5e-5, type=float, + help="The initial learning rate for Adam.") + parser.add_argument("--weight_decay", default=0.0, type=float, + help="Weight deay if we apply some.") + parser.add_argument("--adam_epsilon", default=1e-8, type=float, + help="Epsilon for Adam optimizer.") + parser.add_argument("--max_grad_norm", default=1.0, type=float, + help="Max gradient norm.") + parser.add_argument("--num_train_epochs", default=1.0, type=float, + help="Total number of training epochs to perform.") + parser.add_argument("--max_steps", default=-1, type=int, + help="If > 0: set total number of training steps to perform. Override num_train_epochs.") + parser.add_argument("--warmup_steps", default=0, type=int, + help="Linear warmup over warmup_steps.") + + parser.add_argument('--logging_steps', type=int, default=50, + help="Log every X updates steps.") + parser.add_argument('--save_steps', type=int, default=50, + help="Save checkpoint every X updates steps.") + parser.add_argument('--save_total_limit', type=int, default=None, + help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default') + parser.add_argument("--eval_all_checkpoints", action='store_true', + help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number") + parser.add_argument("--no_cuda", action='store_true', + help="Avoid using CUDA when available") + parser.add_argument('--overwrite_output_dir', action='store_true', + help="Overwrite the content of the output directory") + parser.add_argument('--overwrite_cache', action='store_true', + help="Overwrite the cached training and evaluation sets") + parser.add_argument('--seed', type=int, default=42, + help="random seed for initialization") + parser.add_argument('--epoch', type=int, default=42, + help="random seed for initialization") + parser.add_argument('--fp16', action='store_true', + help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") + parser.add_argument('--fp16_opt_level', type=str, default='O1', + help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." + "See details at https://nvidia.github.io/apex/amp.html") + parser.add_argument("--local_rank", type=int, default=-1, + help="For distributed training: local_rank") + parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.") + parser.add_argument('--server_port', type=str, default='', help="For distant debugging.") + + + pool = multiprocessing.Pool(cpu_cont) + args = parser.parse_args() + + # Setup distant debugging if needed + if args.server_ip and args.server_port: + # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script + import ptvsd + print("Waiting for debugger attach") + ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) + ptvsd.wait_for_attach() + + # Setup CUDA, GPU & distributed training + if args.local_rank == -1 or args.no_cuda: + device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + args.n_gpu = torch.cuda.device_count() + else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs + torch.cuda.set_device(args.local_rank) + device = torch.device("cuda", args.local_rank) + torch.distributed.init_process_group(backend='nccl') + args.n_gpu = 1 + args.device = device + args.per_gpu_train_batch_size=args.train_batch_size//args.n_gpu + args.per_gpu_eval_batch_size=args.eval_batch_size//args.n_gpu + # Setup logging + logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', + datefmt='%m/%d/%Y %H:%M:%S', + level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN) + logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", + args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) + + # Set seed + set_seed(args.seed) + + # Load pretrained model and tokenizer + if args.local_rank not in [-1, 0]: + torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab + + args.start_epoch = 0 + args.start_step = 0 + checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last') + if os.path.exists(checkpoint_last) and os.listdir(checkpoint_last): + args.model_name_or_path = os.path.join(checkpoint_last, 'pytorch_model.bin') + args.config_name = os.path.join(checkpoint_last, 'config.json') + idx_file = os.path.join(checkpoint_last, 'idx_file.txt') + with open(idx_file, encoding='utf-8') as idxf: + args.start_epoch = int(idxf.readlines()[0].strip()) + 1 + + step_file = os.path.join(checkpoint_last, 'step_file.txt') + if os.path.exists(step_file): + with open(step_file, encoding='utf-8') as stepf: + args.start_step = int(stepf.readlines()[0].strip()) + + logger.info("reload model from {}, resume from {} epoch".format(checkpoint_last, args.start_epoch)) + + config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] + config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, + cache_dir=args.cache_dir if args.cache_dir else None) + config.num_labels=2 + tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name, + do_lower_case=args.do_lower_case, + cache_dir=args.cache_dir if args.cache_dir else None) + if args.block_size <= 0: + args.block_size = tokenizer.max_len_single_sentence # Our input block size will be the max possible for the model + args.block_size = min(args.block_size, tokenizer.max_len_single_sentence) + if args.model_name_or_path: + model = model_class.from_pretrained(args.model_name_or_path, + from_tf=bool('.ckpt' in args.model_name_or_path), + config=config, + cache_dir=args.cache_dir if args.cache_dir else None) + else: + model = model_class(config) + + model=Model(model,config,tokenizer,args) + if args.local_rank == 0: + torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab + + logger.info("Training/evaluation parameters %s", args) + + # Training + if args.do_train: + if args.local_rank not in [-1, 0]: + torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache + + train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False,pool=pool) + + if args.local_rank == 0: + torch.distributed.barrier() + + global_step, tr_loss = train(args, train_dataset, model, tokenizer,pool) + + + # Evaluation + results = {} + if args.do_eval and args.local_rank in [-1, 0]: + checkpoint_prefix = 'epoch_2/subject_model.pth' + output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix)) + model.load_state_dict(torch.load(output_dir)) + model.to(args.device) + result=evaluate(args, model, tokenizer,pool=pool) + + if args.do_test and args.local_rank in [-1, 0]: + checkpoint_prefix = 'epoch_2/subject_model.pth' + output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix)) + model.load_state_dict(torch.load(output_dir)) + model.to(args.device) + test(args, model, tokenizer,pool=pool,best_threshold=0.5) + + return results + + +if __name__ == "__main__": + main() + diff --git a/Code-Code/Clone-detection-BigCloneBench/code/train.log b/Code-Code/Clone-detection-BigCloneBench/code/train.log new file mode 100644 index 0000000000000000000000000000000000000000..b574a5519cf2294bc0809ddff500c32caac600de --- /dev/null +++ b/Code-Code/Clone-detection-BigCloneBench/code/train.log @@ -0,0 +1,157 @@ +04/23/2024 15:43:47 - WARNING - __main__ - Process rank: -1, device: cuda, n_gpu: 4, distributed training: False, 16-bits training: False +04/23/2024 15:43:49 - INFO - __main__ - Training/evaluation parameters Namespace(train_data_file='../dataset/train.txt', output_dir='../model', eval_data_file='../dataset/valid.txt', test_data_file='../dataset/test.txt', model_type='roberta', model_name_or_path='microsoft/codebert-base', mlm=False, mlm_probability=0.15, config_name='microsoft/codebert-base', tokenizer_name='roberta-base', cache_dir='', block_size=400, do_train=True, do_eval=False, do_test=False, evaluate_during_training=True, do_lower_case=False, train_batch_size=16, eval_batch_size=32, gradient_accumulation_steps=1, learning_rate=5e-05, weight_decay=0.0, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=1.0, max_steps=-1, warmup_steps=0, logging_steps=50, save_steps=50, save_total_limit=None, eval_all_checkpoints=False, no_cuda=False, overwrite_output_dir=False, overwrite_cache=False, seed=123456, epoch=2, fp16=False, fp16_opt_level='O1', local_rank=-1, server_ip='', server_port='', n_gpu=4, device=device(type='cuda'), per_gpu_train_batch_size=4, per_gpu_eval_batch_size=8, start_epoch=0, start_step=0) +04/23/2024 15:43:49 - INFO - __main__ - Creating features from index file at ../dataset/train.txt + 0%| | 0/90102 [00:00', 'public', '_static', '_boolean', '_copy', '(', 'File', '_source', ',', '_File', '_target', ')', '_{', '_try', '_{', '_if', '_(!', 'source', '.', 'ex', 'ists', '())', '_return', '_false', ';', '_target', '.', 'get', 'Parent', 'File', '().', 'mk', 'dir', 's', '();', '_Input', 'Stream', '_input', '_=', '_new', '_File', 'Input', 'Stream', '(', 'source', ');', '_Output', 'Stream', '_output', '_=', '_new', '_File', 'Output', 'Stream', '(', 'target', ');', '_byte', '[]', '_buf', '_=', '_new', '_byte', '[', '1024', '];', '_int', '_len', ';', '_while', '_((', 'len', '_=', '_input', '.', 'read', '(', 'buf', '))', '_>', '_0', ')', '_output', '.', 'write', '(', 'buf', ',', '_0', ',', '_len', ');', '_input', '.', 'close', '();', '_output', '.', 'close', '();', '_return', '_true', ';', '_}', '_catch', '_(', 'Exception', '_exc', ')', '_{', '_exc', '.', 'print', 'Stack', 'Tr', 'ace', '();', '_return', '_false', ';', '_}', '_}', '', '', 'private', '_String', '_fetch', 'Compare', 'Content', '()', '_throws', '_IO', 'Exception', '_{', '_URL', '_url', '_=', '_new', '_URL', '(', 'comp', 'are', 'To', ');', '_String', 'Writer', '_sw', '_=', '_new', '_String', 'Writer', '();', '_I', 'OU', 't', 'ils', '.', 'copy', '(', 'url', '.', 'open', 'Stream', '(),', '_sw', ');', '_return', '_sw', '.', 'get', 'Buffer', '().', 'to', 'String', '();', '_}', ''] +04/23/2024 15:44:15 - INFO - __main__ - input_ids: 0 15110 25156 49378 5375 1640 9966 1300 6 8655 1002 43 25522 860 25522 114 48209 17747 4 3463 1952 49338 671 3950 131 1002 4 6460 46102 9966 49123 43503 41292 29 47006 41327 36757 8135 5457 92 8655 48214 36757 1640 17747 4397 38252 36757 4195 5457 92 8655 48293 36757 1640 23976 4397 47893 48992 49125 5457 92 47893 10975 47477 44082 6979 25528 131 150 41006 8476 5457 8135 4 12745 1640 48939 35122 8061 321 43 4195 4 29631 1640 48939 6 321 6 25528 4397 8135 4 22641 47006 4195 4 22641 47006 671 1528 131 35524 2916 36 48847 12562 43 25522 12562 4 17265 43274 12667 4450 47006 671 3950 131 35524 35524 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 22891 26602 23366 45448 45463 43048 6989 38266 48847 25522 33000 46471 5457 92 33000 1640 11828 1322 3972 4397 26602 45489 3514 5457 92 26602 45489 47006 38 5061 90 5290 4 44273 1640 6423 4 12592 36757 49196 3514 4397 671 3514 4 6460 49334 49123 560 34222 47006 35524 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 +04/23/2024 15:44:15 - INFO - __main__ - *** Example *** +04/23/2024 15:44:15 - INFO - __main__ - idx: 1 +04/23/2024 15:44:15 - INFO - __main__ - label: 0 +04/23/2024 15:44:15 - INFO - __main__ - input_tokens: ['', 'private', '_void', '_copy', 'File', '(', 'String', '_path', ')', '_{', '_try', '_{', '_File', '_src', 'file', '_=', '_new', '_File', '(', 'src', 'dir', ',', '_path', ');', '_File', '_dest', 'file', '_=', '_new', '_File', '(', 'dest', 'dir', ',', '_path', ');', '_File', '_parent', '_=', '_dest', 'file', '.', 'get', 'Parent', 'File', '();', '_if', '_(!', 'parent', '.', 'ex', 'ists', '())', '_{', '_parent', '.', 'mk', 'dir', 's', '();', '_}', '_File', 'Input', 'Stream', '_f', 'is', '_=', '_new', '_File', 'Input', 'Stream', '(', 'src', 'file', ');', '_File', 'Output', 'Stream', '_f', 'os', '_=', '_new', '_File', 'Output', 'Stream', '(', 'dest', 'file', ');', '_int', '_bytes', '_', 'read', '_=', '_0', ';', '_byte', '_buffer', '[]', '_=', '_new', '_byte', '[', '512', '];', '_while', '_((', 'bytes', '_', 'read', '_=', '_f', 'is', '.', 'read', '(', 'buffer', '))', '_!=', '_-', '1', ')', '_{', '_f', 'os', '.', 'write', '(', 'buffer', ',', '_0', ',', '_bytes', '_', 'read', ');', '_}', '_f', 'is', '.', 'close', '();', '_f', 'os', '.', 'close', '();', '_}', '_catch', '_(', 'IO', 'Exception', '_e', ')', '_{', '_throw', '_new', '_Build', 'Exception', '("', 'Error', '_while', '_copying', '_file', '_"', '_+', '_path', ');', '_}', '_}', '', '', 'public', '_static', '_void', '_main', '(', 'String', '[]', '_args', ')', '_throws', '_IO', 'Exception', '_{', '_Post', 'Parameter', '_a', '1', '_=', '_new', '_Post', 'Parameter', '("', 'v', '",', '_Ut', 'ils', '.', 'en', 'code', '("', '1', '.', '0', '")', ');', '_Post', 'Parameter', '_a', '2', '_=', '_new', '_Post', 'Parameter', '("', 'api', '_', 'key', '",', '_Ut', 'ils', '.', 'en', 'code', '(', 'Ren', 'Ren', 'Con', 'stant', '.', 'api', 'Key', '));', '_Post', 'Parameter', '_a', '3', '_=', '_new', '_Post', 'Parameter', '("', 'method', '",', '_Ut', 'ils', '.', 'en', 'code', '("', 'not', 'ifications', '.', 'send', '")', ');', '_Post', 'Parameter', '_a', '4', '_=', '_new', '_Post', 'Parameter', '("', 'call', '_', 'id', '",', '_System', '.', 'n', 'ano', 'Time', '());', '_Post', 'Parameter', '_a', '5', '_=', '_new', '_Post', 'Parameter', '("', 'session', '_', 'key', '",', '_Ut', 'ils', '.', 'en', 'code', '("', '5', '.', '22', 'af', '9', 'ee', '9', 'ad', '8', '42', 'c', '7', 'eb', '5', '2004', 'e', 'ce', '6', 'e', '96', 'b', '10', '.', '864', '00', '.', '12', '98', '646', '000', '-', '350', '7', '279', '14', '")', ');', '_Post', 'Parameter', '_a', '6', '_=', '_new', '_Post', 'Parameter', '("', 'to', '_', 'ids', '",', '_Ut', 'ils', '.', 'en', 'code', '("', '350', '7', '279', '14', '")', ');', '_Post', 'Parameter', '_a', '7', '_=', '_new', '_Post', 'Parameter', '("', 'not', 'ification', '",', '_"', 'åı', 'Ī', 'åĪ', '°', 'äº', 'Ĩ', 'è¦', 'ģ', 'ç', 'Ŀ', '¡', 'è', '§', 'ī', 'çļĦ', 'æĹ', '¶', 'éĹ', '´', 'äº', 'Ĩ', 'ãĢĤ', '");', '_Post', 'Parameter', '_a', '8', '_=', '_new', '_Post', 'Parameter', '("', 'format', '",', '_Ut', 'ils', '.', 'en', 'code', '("', 'JSON', '")', ');', '_Ren', 'Ren', 'Post', 'Parameters', '_ps', '_=', '_new', '_Ren', 'Ren', 'Post', 'Parameters', '(', 'Ut', 'ils', '.', 'en', 'code', '(', 'Ren', 'Ren', 'Con', 'stant', '.', 'secret', '));', '_ps', '.', 'add', 'Parameter', '(', 'a', '1', ');', '_ps', '.', 'add', 'Parameter', '(', 'a', '2', ');', '_ps', '.', 'add', 'Parameter', '(', 'a', '3', ');', '_ps', '.', 'add', 'Parameter', '(', 'a', '4', ');', '_ps', '.', 'add', 'Parameter', '(', 'a', '5', ');', '_ps', '.', 'add', 'Parameter', '(', 'a', '6', ');', '_ps', '.', 'add', 'Parameter', '(', 'a', '7', ');', '_ps', '.', 'add', 'Parameter', '(', 'a', '8', ');', '_System', '.', 'out', '.', 'println', '(', 'Ren', 'Ren', 'Con', 'stant', '.', 'api', 'Url', '_+', '_"', '?"', '_+', '_ps', '.', 'gener', 'ate', 'Url', '());', '_URL', '_url', '_=', '_new', '_URL', '(', 'Ren', 'Ren', 'Con', 'stant', '.', 'api', 'Url', '_+', '_"', '?"', '_+', '_ps', '.', 'gener', 'ate', 'Url', '());', '_H', 'ttp', 'URL', 'Connection', '_request', '_=', '_(', 'Http', 'URL', 'Connection', ')', '_url', '.', 'open', 'Connection', '();', '_request', '.', 'set', 'Do', 'Output', '(', ''] +04/23/2024 15:44:15 - INFO - __main__ - input_ids: 0 22891 13842 5375 9966 1640 34222 2718 43 25522 860 25522 8655 47215 21710 5457 92 8655 1640 45692 41292 6 2718 4397 8655 15357 21710 5457 92 8655 1640 31549 41292 6 2718 4397 8655 4095 5457 15357 21710 4 6460 46102 9966 47006 114 48209 20985 4 3463 1952 49338 25522 4095 4 43503 41292 29 47006 35524 8655 48214 36757 856 354 5457 92 8655 48214 36757 1640 45692 21710 4397 8655 48293 36757 856 366 5457 92 8655 48293 36757 1640 31549 21710 4397 6979 46487 1215 12745 5457 321 131 47893 21944 48992 5457 92 47893 10975 35033 44082 150 41006 46823 1215 12745 5457 856 354 4 12745 1640 47438 35122 49333 111 134 43 25522 856 366 4 29631 1640 47438 6 321 6 46487 1215 12745 4397 35524 856 354 4 22641 47006 856 366 4 22641 47006 35524 2916 36 6454 48847 364 43 25522 3211 92 15195 48847 46469 30192 150 34236 2870 22 2055 2718 4397 35524 35524 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 15110 25156 13842 1049 1640 34222 48992 49503 43 6989 38266 48847 25522 1869 49752 10 134 5457 92 1869 49752 46469 705 1297 11183 5290 4 225 20414 46469 134 4 288 8070 4397 1869 49752 10 176 5457 92 1869 49752 46469 30602 1215 5282 1297 11183 5290 4 225 20414 1640 34837 34837 9157 20034 4 30602 28152 48749 1869 49752 10 246 5457 92 1869 49752 46469 45416 1297 11183 5290 4 225 20414 46469 3654 14868 4 37785 8070 4397 1869 49752 10 306 5457 92 1869 49752 46469 16395 1215 808 1297 5149 4 282 2601 14699 49291 1869 49752 10 245 5457 92 1869 49752 46469 39035 1215 5282 1297 11183 5290 4 225 20414 46469 245 4 2036 2001 466 1942 466 625 398 3714 438 406 3209 245 34972 242 1755 401 242 5607 428 698 4 40093 612 4 1092 5208 33075 151 12 10056 406 28854 1570 8070 4397 1869 49752 10 401 5457 92 1869 49752 46469 560 1215 7823 1297 11183 5290 4 225 20414 46469 10056 406 28854 1570 8070 4397 1869 49752 10 406 5457 92 1869 49752 46469 3654 5000 1297 22 45262 23133 47166 7487 46499 27819 48334 10172 36714 46 5543 36484 6248 23171 44574 47954 19002 49117 20024 46499 27819 45682 45751 1869 49752 10 398 5457 92 1869 49752 46469 34609 1297 11183 5290 4 225 20414 46469 49437 8070 4397 6340 34837 21585 49725 27778 5457 92 6340 34837 21585 49725 1640 41967 5290 4 225 20414 1640 34837 34837 9157 20034 4 19301 48749 27778 4 4917 49752 1640 102 134 4397 27778 4 4917 49752 1640 102 176 4397 27778 4 4917 49752 1640 102 246 4397 27778 4 4917 49752 1640 102 306 4397 27778 4 4917 49752 1640 102 245 4397 27778 4 4917 49752 1640 102 401 4397 27778 4 4917 49752 1640 102 406 4397 27778 4 4917 49752 1640 102 398 4397 5149 4 995 4 49396 1640 34837 34837 9157 20034 4 30602 49009 2055 22 1917 2055 27778 4 20557 877 49009 49291 33000 46471 5457 92 33000 1640 34837 34837 9157 20034 4 30602 49009 2055 22 1917 2055 27778 4 20557 877 49009 49291 289 48741 42703 48467 2069 5457 36 49549 42703 48467 43 46471 4 12592 48467 47006 2069 4 8738 8275 48293 1640 2 +04/23/2024 15:44:15 - INFO - __main__ - *** Example *** +04/23/2024 15:44:15 - INFO - __main__ - idx: 2 +04/23/2024 15:44:15 - INFO - __main__ - label: 0 +04/23/2024 15:44:15 - INFO - __main__ - input_tokens: ['', 'public', '_GG', 'Photo', 'Info', '_get', 'Photo', 'Info', '(', 'String', '_photo', 'Id', ',', '_String', '_language', ')', '_throws', '_Illegal', 'State', 'Exception', ',', '_GG', 'Exception', ',', '_Exception', '_{', '_List', '<', 'Name', 'Value', 'P', 'air', '>', '_q', 'params', '_=', '_new', '_Array', 'List', '<', 'Name', 'Value', 'P', 'air', '>', '();', '_q', 'params', '.', 'add', '(', 'new', '_Basic', 'Name', 'Value', 'P', 'air', '("', 'method', '",', '_"', 'gg', '.', 'photos', '.', 'get', 'Info', '")', ');', '_q', 'params', '.', 'add', '(', 'new', '_Basic', 'Name', 'Value', 'P', 'air', '("', 'key', '",', '_this', '.', 'key', '));', '_q', 'params', '.', 'add', '(', 'new', '_Basic', 'Name', 'Value', 'P', 'air', '("', 'photo', '_', 'id', '",', '_photo', 'Id', '));', '_if', '_(', 'null', '_!=', '_language', ')', '_{', '_q', 'params', '.', 'add', '(', 'new', '_Basic', 'Name', 'Value', 'P', 'air', '("', 'language', '",', '_language', '));', '_}', '_String', '_url', '_=', '_REST', '_', 'URL', '_+', '_"', '?"', '_+', '_UR', 'LE', 'nc', 'oded', 'Ut', 'ils', '.', 'format', '(', 'q', 'params', ',', '_"', 'UTF', '-', '8', '");', '_URI', '_ur', 'i', '_=', '_new', '_URI', '(', 'url', ');', '_H', 'ttp', 'Get', '_http', 'get', '_=', '_new', '_H', 'ttp', 'Get', '(', 'uri', ');', '_H', 'ttp', 'Response', '_response', '_=', '_http', 'Client', '.', 'execute', '(', 'http', 'get', ');', '_int', '_status', '_=', '_response', '.', 'get', 'Status', 'Line', '().', 'get', 'Status', 'Code', '();', '_error', 'Check', '(', 'response', ',', '_status', ');', '_Input', 'Stream', '_content', '_=', '_response', '.', 'get', 'Entity', '().', 'get', 'Content', '();', '_GG', 'Photo', 'Info', '_photo', '_=', '_J', 'AX', 'B', '.', 'un', 'm', 'arsh', 'al', '(', 'content', ',', '_GG', 'Photo', 'Info', '.', 'class', ');', '_return', '_photo', ';', '_}', '', '', 'public', '_static', '_Document', '_get', 'Document', '(', 'URL', '_url', ',', '_Entity', 'Res', 'olver', '_res', 'olver', ',', '_boolean', '_valid', 'ating', ')', '_throws', '_Illegal', 'Arg', 'ument', 'Exception', ',', '_IO', 'Exception', '_{', '_if', '_(', 'url', '_==', '_null', ')', '_throw', '_new', '_Illegal', 'Arg', 'ument', 'Exception', '("', 'URL', '_is', '_null', '");', '_Input', 'Stream', '_is', '_=', '_null', ';', '_try', '_{', '_is', '_=', '_url', '.', 'open', 'Stream', '();', '_Input', 'Source', '_source', '_=', '_new', '_Input', 'Source', '(', 'is', ');', '_source', '.', 'set', 'System', 'Id', '(', 'url', '.', 'to', 'String', '());', '_return', '_get', 'Document', '(', 'source', ',', '_res', 'olver', ',', '_valid', 'ating', ');', '_}', '_finally', '_{', '_try', '_{', '_if', '_(', 'is', '_!=', '_null', ')', '_is', '.', 'close', '();', '_}', '_catch', '_(', 'IO', 'Exception', '_io', 'e', ')', '_{', '_}', '_}', '_}', ''] +04/23/2024 15:44:15 - INFO - __main__ - input_ids: 0 15110 43934 2411 39863 120 2411 39863 1640 34222 1345 28081 6 26602 2777 43 6989 36993 13360 48847 6 43934 48847 6 47617 25522 9527 41552 31723 33977 510 2456 15698 2231 49237 5457 92 42719 36583 41552 31723 33977 510 2456 15698 47006 2231 49237 4 4917 1640 4651 17255 31723 33977 510 2456 46469 45416 1297 22 6149 4 40259 4 6460 39863 8070 4397 2231 49237 4 4917 1640 4651 17255 31723 33977 510 2456 46469 5282 1297 42 4 5282 48749 2231 49237 4 4917 1640 4651 17255 31723 33977 510 2456 46469 17827 1215 808 1297 1345 28081 48749 114 36 15755 49333 2777 43 25522 2231 49237 4 4917 1640 4651 17255 31723 33977 510 2456 46469 19527 1297 2777 48749 35524 26602 46471 5457 40746 1215 42703 2055 22 1917 2055 38343 3850 11326 31819 41967 5290 4 34609 1640 1343 49237 6 22 44987 12 398 45751 45802 11540 118 5457 92 45802 1640 6423 4397 289 48741 14181 2054 6460 5457 92 289 48741 14181 1640 6151 4397 289 48741 47806 1263 5457 2054 47952 4 48592 1640 8166 6460 4397 6979 2194 5457 1263 4 6460 47731 18997 49123 6460 47731 41555 47006 5849 26615 1640 41510 6 2194 4397 41327 36757 1383 5457 1263 4 6460 49448 49123 6460 45463 47006 43934 2411 39863 1345 5457 344 26624 387 4 879 119 14980 337 1640 10166 6 43934 2411 39863 4 4684 4397 671 1345 131 35524 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 15110 25156 27246 120 47088 1640 42703 46471 6 46718 20028 35934 5032 35934 6 49378 8218 1295 43 6989 36993 45621 18816 48847 6 38266 48847 25522 114 36 6423 45994 23796 43 3211 92 36993 45621 18816 48847 46469 42703 16 23796 45751 41327 36757 16 5457 23796 131 860 25522 16 5457 46471 4 12592 36757 47006 41327 7061 1300 5457 92 41327 7061 1640 354 4397 1300 4 8738 36383 28081 1640 6423 4 560 34222 49291 671 120 47088 1640 17747 6 5032 35934 6 8218 1295 4397 35524 1747 25522 860 25522 114 36 354 49333 23796 43 16 4 22641 47006 35524 2916 36 6454 48847 46155 242 43 25522 35524 35524 35524 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 +/home/yifei/anaconda3/envs/nlp/lib/python3.10/site-packages/transformers/optimization.py:411: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning + warnings.warn( +04/23/2024 15:44:16 - INFO - __main__ - ***** Running training ***** +04/23/2024 15:44:16 - INFO - __main__ - Num examples = 90102 +04/23/2024 15:44:16 - INFO - __main__ - Num Epochs = 2 +04/23/2024 15:44:16 - INFO - __main__ - Instantaneous batch size per GPU = 4 +04/23/2024 15:44:16 - INFO - __main__ - Total train batch size (w. parallel, distributed & accumulation) = 16 +04/23/2024 15:44:16 - INFO - __main__ - Gradient Accumulation steps = 1 +04/23/2024 15:44:16 - INFO - __main__ - Total optimization steps = 11264 + 0%| | 0/5632 [00:00 1: + outputs = outputs[1] + else: + outputs = outputs[0][:, 0, :] + outputs=outputs.split(bs,0) + + prob_1=(outputs[0]*outputs[1]).sum(-1) + prob_2=(outputs[0]*outputs[2]).sum(-1) + temp=torch.cat((outputs[0],outputs[1]),0) + temp_labels=torch.cat((labels,labels),0) + prob_3= torch.mm(outputs[0],temp.t()) + mask=labels[:,None]==temp_labels[None,:] + prob_3=prob_3*(1-mask.float())-1e9*mask.float() + + prob=torch.softmax(torch.cat((prob_1[:,None],prob_2[:,None],prob_3),-1),-1) + loss=torch.log(prob[:,0]+1e-10) + loss=-loss.mean() + return loss,outputs[0] + + + + diff --git a/Code-Code/Clone-detection-POJ-104/code/run.py b/Code-Code/Clone-detection-POJ-104/code/run.py new file mode 100644 index 0000000000000000000000000000000000000000..696b78dc87c7703f8d17b7adca5690a85bf6e6e6 --- /dev/null +++ b/Code-Code/Clone-detection-POJ-104/code/run.py @@ -0,0 +1,632 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa). +GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned +using a masked language modeling (MLM) loss. +""" + +from __future__ import absolute_import, division, print_function + +import argparse +import glob +import logging +import os +import pickle +import random +import re +import shutil + +import numpy as np +import torch +from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset +from torch.utils.data.distributed import DistributedSampler +import json +try: + from torch.utils.tensorboard import SummaryWriter +except: + from tensorboardX import SummaryWriter + +from tqdm import tqdm, trange +import multiprocessing +from model import Model +cpu_cont = multiprocessing.cpu_count() +from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup, + BertConfig, BertModel, BertTokenizer, + GPT2Config, GPT2LMHeadModel, GPT2Tokenizer, + OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, + RobertaConfig, RobertaModel, RobertaTokenizer, + DistilBertConfig, DistilBertModel, DistilBertTokenizer) + +logger = logging.getLogger(__name__) + +MODEL_CLASSES = { + 'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer), + 'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer), + 'bert': (BertConfig, BertModel, BertTokenizer), + 'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer), + 'distilbert': (DistilBertConfig, DistilBertModel, DistilBertTokenizer) +} + + +class InputFeatures(object): + """A single training/test features for a example.""" + def __init__(self, + input_tokens, + input_ids, + index, + label, + + ): + self.input_tokens = input_tokens + self.input_ids = input_ids + self.index=index + self.label=label + + +def convert_examples_to_features(js,tokenizer,args): + #source + code=' '.join(js['code'].split()) + code_tokens=tokenizer.tokenize(code)[:args.block_size-2] + source_tokens =[tokenizer.cls_token]+code_tokens+[tokenizer.sep_token] + source_ids = tokenizer.convert_tokens_to_ids(source_tokens) + padding_length = args.block_size - len(source_ids) + source_ids+=[tokenizer.pad_token_id]*padding_length + return InputFeatures(source_tokens,source_ids,js['index'],int(js['label'])) + +class TextDataset(Dataset): + def __init__(self, tokenizer, args, file_path=None): + self.examples = [] + data=[] + with open(file_path) as f: + for line in f: + line=line.strip() + js=json.loads(line) + data.append(js) + for js in data: + self.examples.append(convert_examples_to_features(js,tokenizer,args)) + if 'train' in file_path: + for idx, example in enumerate(self.examples[:3]): + logger.info("*** Example ***") + logger.info("idx: {}".format(idx)) + logger.info("label: {}".format(example.label)) + logger.info("input_tokens: {}".format([x.replace('\u0120','_') for x in example.input_tokens])) + logger.info("input_ids: {}".format(' '.join(map(str, example.input_ids)))) + self.label_examples={} + for e in self.examples: + if e.label not in self.label_examples: + self.label_examples[e.label]=[] + self.label_examples[e.label].append(e) + + def __len__(self): + return len(self.examples) + + def __getitem__(self, i): + label=self.examples[i].label + index=self.examples[i].index + labels=list(self.label_examples) + labels.remove(label) + while True: + shuffle_example=random.sample(self.label_examples[label],1)[0] + if shuffle_example.index!=index: + p_example=shuffle_example + break + n_example=random.sample(self.label_examples[random.sample(labels,1)[0]],1)[0] + + return (torch.tensor(self.examples[i].input_ids),torch.tensor(p_example.input_ids), + torch.tensor(n_example.input_ids),torch.tensor(label)) + + +def set_seed(seed=42): + random.seed(seed) + os.environ['PYHTONHASHSEED'] = str(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.backends.cudnn.deterministic = True + + +def train(args, train_dataset, model, tokenizer): + """ Train the model """ + + args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) + train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) + + train_dataloader = DataLoader(train_dataset, sampler=train_sampler, + batch_size=args.train_batch_size,num_workers=4,pin_memory=True) + args.max_steps=args.epoch*len( train_dataloader) + args.save_steps=len( train_dataloader) + args.warmup_steps=len( train_dataloader) + args.logging_steps=len( train_dataloader) + args.num_train_epochs=args.epoch + model.to(args.device) + # Prepare optimizer and schedule (linear warmup and decay) + no_decay = ['bias', 'LayerNorm.weight'] + optimizer_grouped_parameters = [ + {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], + 'weight_decay': args.weight_decay}, + {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} + ] + optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) + scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.max_steps*0.1, + num_training_steps=args.max_steps) + if args.fp16: + try: + from apex import amp + except ImportError: + raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") + model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) + + # multi-gpu training (should be after apex fp16 initialization) + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + # Distributed training (should be after apex fp16 initialization) + if args.local_rank != -1: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], + output_device=args.local_rank, + find_unused_parameters=True) + + checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last') + scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt') + optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt') + if os.path.exists(scheduler_last): + scheduler.load_state_dict(torch.load(scheduler_last)) + if os.path.exists(optimizer_last): + optimizer.load_state_dict(torch.load(optimizer_last)) + # Train! + logger.info("***** Running training *****") + logger.info(" Num examples = %d", len(train_dataset)) + logger.info(" Num Epochs = %d", args.num_train_epochs) + logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) + logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", + args.train_batch_size * args.gradient_accumulation_steps * ( + torch.distributed.get_world_size() if args.local_rank != -1 else 1)) + logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) + logger.info(" Total optimization steps = %d", args.max_steps) + + global_step = args.start_step + tr_loss, logging_loss,avg_loss,tr_nb,tr_num,train_loss = 0.0, 0.0,0.0,0,0,0 + best_acc=0.0 + # model.resize_token_embeddings(len(tokenizer)) + model.zero_grad() + for idx in range(args.start_epoch, int(args.num_train_epochs)): + bar = train_dataloader + tr_num=0 + train_loss=0 + for step, batch in enumerate(bar): + inputs = batch[0].to(args.device) + p_inputs = batch[1].to(args.device) + n_inputs = batch[2].to(args.device) + labels = batch[3].to(args.device) + model.train() + loss,vec = model(inputs,p_inputs,n_inputs,labels) + + + if args.n_gpu > 1: + loss = loss.mean() # mean() to average on multi-gpu parallel training + if args.gradient_accumulation_steps > 1: + loss = loss / args.gradient_accumulation_steps + + if args.fp16: + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) + else: + loss.backward() + torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) + + tr_loss += loss.item() + tr_num+=1 + train_loss+=loss.item() + if avg_loss==0: + avg_loss=tr_loss + avg_loss=round(train_loss/tr_num,5) + if (step+1)% 100==0: + logger.info("epoch {} step {} loss {}".format(idx,step+1,avg_loss)) + #bar.set_description("epoch {} loss {}".format(idx,avg_loss)) + + + if (step + 1) % args.gradient_accumulation_steps == 0: + optimizer.step() + optimizer.zero_grad() + scheduler.step() + global_step += 1 + output_flag=True + avg_loss=round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4) + if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: + logging_loss = tr_loss + tr_nb=global_step + + if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: + + if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well + results = evaluate(args, model, tokenizer,eval_when_training=True) + for key, value in results.items(): + logger.info(" %s = %s", key, round(value,4)) + # Save model checkpoint + tr_num=0 + train_loss=0 + + if results['eval_map']>best_acc: + best_acc=results['eval_map'] + logger.info(" "+"*"*20) + logger.info(" Best map:%s",round(best_acc,4)) + logger.info(" "+"*"*20) + + checkpoint_prefix = 'checkpoint-best-map' + output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix)) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + model_to_save = model.module if hasattr(model,'module') else model + output_dir = os.path.join(output_dir, '{}'.format('model.bin')) + torch.save(model_to_save.state_dict(), output_dir) + logger.info("Saving model checkpoint to %s", output_dir) + + # 每一轮记录checkpoint + output_dir = os.path.join(args.output_dir, 'epoch_{}'.format(idx)) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + model_to_save = model.module if hasattr(model, 'module') else model + ckpt_output_path = os.path.join(output_dir, 'subject_model.pth') + logger.info("Saving model checkpoint to %s", ckpt_output_path) + torch.save(model_to_save.state_dict(), ckpt_output_path) + + +eval_dataset=None +def evaluate(args, model, tokenizer,eval_when_training=False): + # Loop to handle MNLI double evaluation (matched, mis-matched) + eval_output_dir = args.output_dir + global eval_dataset + if eval_dataset is None: + eval_dataset = TextDataset(tokenizer, args,args.eval_data_file) + + if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: + os.makedirs(eval_output_dir) + + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + # Note that DistributedSampler samples randomly + eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) + eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=4,pin_memory=True) + + # multi-gpu evaluate + if args.n_gpu > 1 and eval_when_training is False: + model = torch.nn.DataParallel(model) + + # Eval! + logger.info("***** Running evaluation *****") + logger.info(" Num examples = %d", len(eval_dataset)) + logger.info(" Batch size = %d", args.eval_batch_size) + eval_loss = 0.0 + nb_eval_steps = 0 + model.eval() + vecs=[] + labels=[] + for batch in eval_dataloader: + inputs = batch[0].to(args.device) + p_inputs = batch[1].to(args.device) + n_inputs = batch[2].to(args.device) + label = batch[3].to(args.device) + with torch.no_grad(): + lm_loss,vec = model(inputs,p_inputs,n_inputs,label) + eval_loss += lm_loss.mean().item() + vecs.append(vec.cpu().numpy()) + labels.append(label.cpu().numpy()) + nb_eval_steps += 1 + vecs=np.concatenate(vecs,0) + labels=np.concatenate(labels,0) + eval_loss = eval_loss / nb_eval_steps + perplexity = torch.tensor(eval_loss) + + scores=np.matmul(vecs,vecs.T) + dic={} + for i in range(scores.shape[0]): + scores[i,i]=-1000000 + if int(labels[i]) not in dic: + dic[int(labels[i])]=-1 + dic[int(labels[i])]+=1 + sort_ids=np.argsort(scores, axis=-1, kind='quicksort', order=None)[:,::-1] + MAP=[] + for i in range(scores.shape[0]): + cont=0 + label=int(labels[i]) + Avep = [] + for j in range(dic[label]): + index=sort_ids[i,j] + if int(labels[index])==label: + Avep.append((len(Avep)+1)/(j+1)) + MAP.append(sum(Avep)/dic[label]) + + result = { + "eval_loss": float(perplexity), + "eval_map":float(np.mean(MAP)) + } + + + return result + +def test(args, model, tokenizer): + # Loop to handle MNLI double evaluation (matched, mis-matched) + eval_dataset = TextDataset(tokenizer, args,args.test_data_file) + + + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + # Note that DistributedSampler samples randomly + eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) + eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) + + # multi-gpu evaluate + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + # Eval! + logger.info("***** Running Test *****") + logger.info(" Num examples = %d", len(eval_dataset)) + logger.info(" Batch size = %d", args.eval_batch_size) + eval_loss = 0.0 + nb_eval_steps = 0 + model.eval() + vecs=[] + labels=[] + for batch in eval_dataloader: + inputs = batch[0].to(args.device) + p_inputs = batch[1].to(args.device) + n_inputs = batch[2].to(args.device) + label = batch[3].to(args.device) + with torch.no_grad(): + lm_loss,vec = model(inputs,p_inputs,n_inputs,label) + eval_loss += lm_loss.mean().item() + vecs.append(vec.cpu().numpy()) + labels.append(label.cpu().numpy()) + nb_eval_steps += 1 + vecs=np.concatenate(vecs,0) + labels=np.concatenate(labels,0) + eval_loss = eval_loss / nb_eval_steps + perplexity = torch.tensor(eval_loss) + + scores=np.matmul(vecs,vecs.T) + for i in range(scores.shape[0]): + scores[i,i]=-1000000 + sort_ids=np.argsort(scores, axis=-1, kind='quicksort', order=None)[:,::-1] + indexs=[] + for example in eval_dataset.examples: + indexs.append(example.index) + with open(os.path.join(args.output_dir,"predictions.jsonl"),'w') as f: + for index,sort_id in zip(indexs,sort_ids): + js={} + js['index']=index + js['answers']=[] + for idx in sort_id[:499]: + js['answers'].append(indexs[int(idx)]) + f.write(json.dumps(js)+'\n') + + + +def main(): + parser = argparse.ArgumentParser() + + ## Required parameters + parser.add_argument("--train_data_file", default=None, type=str, required=True, + help="The input training data file (a text file).") + parser.add_argument("--output_dir", default=None, type=str, required=True, + help="The output directory where the model predictions and checkpoints will be written.") + + ## Other parameters + parser.add_argument("--eval_data_file", default=None, type=str, + help="An optional input evaluation data file to evaluate the perplexity on (a text file).") + parser.add_argument("--test_data_file", default=None, type=str, + help="An optional input evaluation data file to evaluate the perplexity on (a text file).") + + parser.add_argument("--model_type", default="bert", type=str, + help="The model architecture to be fine-tuned.") + parser.add_argument("--model_name_or_path", default=None, type=str, + help="The model checkpoint for weights initialization.") + + parser.add_argument("--mlm", action='store_true', + help="Train with masked-language modeling loss instead of language modeling.") + parser.add_argument("--mlm_probability", type=float, default=0.15, + help="Ratio of tokens to mask for masked language modeling loss") + + parser.add_argument("--config_name", default="", type=str, + help="Optional pretrained config name or path if not the same as model_name_or_path") + parser.add_argument("--tokenizer_name", default="", type=str, + help="Optional pretrained tokenizer name or path if not the same as model_name_or_path") + parser.add_argument("--cache_dir", default="", type=str, + help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)") + parser.add_argument("--block_size", default=-1, type=int, + help="Optional input sequence length after tokenization." + "The training dataset will be truncated in block of this size for training." + "Default to the model max input length for single sentence inputs (take into account special tokens).") + parser.add_argument("--do_train", action='store_true', + help="Whether to run training.") + parser.add_argument("--do_eval", action='store_true', + help="Whether to run eval on the dev set.") + parser.add_argument("--do_test", action='store_true', + help="Whether to run eval on the dev set.") + parser.add_argument("--evaluate_during_training", action='store_true', + help="Run evaluation during training at each logging step.") + parser.add_argument("--do_lower_case", action='store_true', + help="Set this flag if you are using an uncased model.") + + parser.add_argument("--train_batch_size", default=4, type=int, + help="Batch size per GPU/CPU for training.") + parser.add_argument("--eval_batch_size", default=4, type=int, + help="Batch size per GPU/CPU for evaluation.") + parser.add_argument('--gradient_accumulation_steps', type=int, default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.") + parser.add_argument("--learning_rate", default=5e-5, type=float, + help="The initial learning rate for Adam.") + parser.add_argument("--weight_decay", default=0.0, type=float, + help="Weight deay if we apply some.") + parser.add_argument("--adam_epsilon", default=1e-8, type=float, + help="Epsilon for Adam optimizer.") + parser.add_argument("--max_grad_norm", default=1.0, type=float, + help="Max gradient norm.") + parser.add_argument("--num_train_epochs", default=1.0, type=float, + help="Total number of training epochs to perform.") + parser.add_argument("--max_steps", default=-1, type=int, + help="If > 0: set total number of training steps to perform. Override num_train_epochs.") + parser.add_argument("--warmup_steps", default=0, type=int, + help="Linear warmup over warmup_steps.") + + parser.add_argument('--logging_steps', type=int, default=50, + help="Log every X updates steps.") + parser.add_argument('--save_steps', type=int, default=50, + help="Save checkpoint every X updates steps.") + parser.add_argument('--save_total_limit', type=int, default=None, + help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default') + parser.add_argument("--eval_all_checkpoints", action='store_true', + help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number") + parser.add_argument("--no_cuda", action='store_true', + help="Avoid using CUDA when available") + parser.add_argument('--overwrite_output_dir', action='store_true', + help="Overwrite the content of the output directory") + parser.add_argument('--overwrite_cache', action='store_true', + help="Overwrite the cached training and evaluation sets") + parser.add_argument('--seed', type=int, default=42, + help="random seed for initialization") + parser.add_argument('--epoch', type=int, default=42, + help="random seed for initialization") + parser.add_argument('--fp16', action='store_true', + help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") + parser.add_argument('--fp16_opt_level', type=str, default='O1', + help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." + "See details at https://nvidia.github.io/apex/amp.html") + parser.add_argument("--local_rank", type=int, default=-1, + help="For distributed training: local_rank") + parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.") + parser.add_argument('--server_port', type=str, default='', help="For distant debugging.") + + + args = parser.parse_args() + + + # Setup distant debugging if needed + if args.server_ip and args.server_port: + # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script + import ptvsd + print("Waiting for debugger attach") + ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) + ptvsd.wait_for_attach() + + # Setup CUDA, GPU & distributed training + if args.local_rank == -1 or args.no_cuda: + device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + args.n_gpu = torch.cuda.device_count() + else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs + torch.cuda.set_device(args.local_rank) + device = torch.device("cuda", args.local_rank) + torch.distributed.init_process_group(backend='nccl') + args.n_gpu = 1 + args.device = device + args.per_gpu_train_batch_size=args.train_batch_size//args.n_gpu + args.per_gpu_eval_batch_size=args.eval_batch_size//args.n_gpu + # Setup logging + logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', + datefmt='%m/%d/%Y %H:%M:%S', + level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN) + logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", + args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) + + + # Set seed + set_seed(args.seed) + + # Load pretrained model and tokenizer + if args.local_rank not in [-1, 0]: + torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab + + args.start_epoch = 0 + args.start_step = 0 + checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last') + if os.path.exists(checkpoint_last) and os.listdir(checkpoint_last): + args.model_name_or_path = os.path.join(checkpoint_last, 'pytorch_model.bin') + args.config_name = os.path.join(checkpoint_last, 'config.json') + idx_file = os.path.join(checkpoint_last, 'idx_file.txt') + with open(idx_file, encoding='utf-8') as idxf: + args.start_epoch = int(idxf.readlines()[0].strip()) + 1 + + step_file = os.path.join(checkpoint_last, 'step_file.txt') + if os.path.exists(step_file): + with open(step_file, encoding='utf-8') as stepf: + args.start_step = int(stepf.readlines()[0].strip()) + + logger.info("reload model from {}, resume from {} epoch".format(checkpoint_last, args.start_epoch)) + + config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] + config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, + cache_dir=args.cache_dir if args.cache_dir else None) + config.num_labels=1 + tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name, + do_lower_case=args.do_lower_case, + cache_dir=args.cache_dir if args.cache_dir else None) + if args.block_size <= 0: + args.block_size = tokenizer.max_len_single_sentence # Our input block size will be the max possible for the model + args.block_size = min(args.block_size, tokenizer.max_len_single_sentence) + if args.model_name_or_path: + model = model_class.from_pretrained(args.model_name_or_path, + from_tf=bool('.ckpt' in args.model_name_or_path), + config=config, + cache_dir=args.cache_dir if args.cache_dir else None) + else: + model = model_class(config) + + model=Model(model,config,tokenizer,args) + if args.local_rank == 0: + torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab + + logger.info("Training/evaluation parameters %s", args) + + # Training + if args.do_train: + if args.local_rank not in [-1, 0]: + torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache + + train_dataset = TextDataset(tokenizer, args,args.train_data_file) + if args.local_rank == 0: + torch.distributed.barrier() + + train(args, train_dataset, model, tokenizer) + + + + # Evaluation + results = {} + if args.do_eval and args.local_rank in [-1, 0]: + checkpoint_prefix = 'epoch_1/subject_model.pth' #'checkpoint-best-map/model.bin' + output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix)) + model.load_state_dict(torch.load(output_dir),strict=False) + model.to(args.device) + result=evaluate(args, model, tokenizer) + logger.info("***** Eval results *****") + for key in sorted(result.keys()): + logger.info(" %s = %s", key, str(round(result[key],4))) + + if args.do_test and args.local_rank in [-1, 0]: + checkpoint_prefix = 'epoch_1/subject_model.pth' #'checkpoint-best-map/model.bin' + output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix)) + model.load_state_dict(torch.load(output_dir),strict=False) + model.to(args.device) + test(args, model, tokenizer) + + return results + + +if __name__ == "__main__": + main() + + + diff --git a/Code-Code/Clone-detection-POJ-104/code/test.sh b/Code-Code/Clone-detection-POJ-104/code/test.sh new file mode 100644 index 0000000000000000000000000000000000000000..f215aa0018b85553e857cc47bd866f730ebdde8d --- /dev/null +++ b/Code-Code/Clone-detection-POJ-104/code/test.sh @@ -0,0 +1,17 @@ +CUDA_VISIBLE_DEVICES=0,1 python run.py \ + --output_dir=../model \ + --model_type=roberta \ + --config_name=microsoft/codebert-base \ + --model_name_or_path=microsoft/codebert-base \ + --tokenizer_name=roberta-base \ + --do_test \ + --train_data_file=../dataset/train.jsonl \ + --test_data_file=../dataset/valid.jsonl \ + --epoch 2 \ + --block_size 400 \ + --train_batch_size 8 \ + --eval_batch_size 16 \ + --learning_rate 2e-5 \ + --max_grad_norm 1.0 \ + --evaluate_during_training \ + --seed 123456 \ No newline at end of file diff --git a/Code-Code/Clone-detection-POJ-104/code/train.sh b/Code-Code/Clone-detection-POJ-104/code/train.sh new file mode 100644 index 0000000000000000000000000000000000000000..6a8593e4eec3b363dea8329fb8b8a0e136c90ddf --- /dev/null +++ b/Code-Code/Clone-detection-POJ-104/code/train.sh @@ -0,0 +1,18 @@ +CUDA_VISIBLE_DEVICES=0,1 python run.py \ + --output_dir=../model \ + --model_type=roberta \ + --config_name=microsoft/codebert-base \ + --model_name_or_path=microsoft/codebert-base \ + --tokenizer_name=roberta-base \ + --do_train \ + --train_data_file=../dataset/train.jsonl \ + --eval_data_file=../dataset/valid.jsonl \ + --test_data_file=../dataset/test.jsonl \ + --epoch 2 \ + --block_size 400 \ + --train_batch_size 8 \ + --eval_batch_size 16 \ + --learning_rate 2e-5 \ + --max_grad_norm 1.0 \ + --evaluate_during_training \ + --seed 123456 \ No newline at end of file diff --git a/Code-Code/Clone-detection-POJ-104/dataset.zip b/Code-Code/Clone-detection-POJ-104/dataset.zip new file mode 100644 index 0000000000000000000000000000000000000000..fb7ef009f04435c45f9a9d478cc22075047b55a4 --- /dev/null +++ b/Code-Code/Clone-detection-POJ-104/dataset.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c13009574c8c3c85c4ec26f6e33e53765479f41fa20239578b473fd11df4d01 +size 7269797 diff --git a/Code-Code/CodeCompletion-token/code/beam.py b/Code-Code/CodeCompletion-token/code/beam.py new file mode 100644 index 0000000000000000000000000000000000000000..45bf7f9f51722ad031d4eec0c5030c83ed9a3213 --- /dev/null +++ b/Code-Code/CodeCompletion-token/code/beam.py @@ -0,0 +1,118 @@ +import torch +import torch.nn as nn +import torch +from torch.autograd import Variable +import copy + +class Beam(object): + def __init__(self, size, sos, eos): + self.size = size + self.tt = torch.cuda + # The score for each translation on the beam. + self.scores = self.tt.FloatTensor(size).zero_() + # The backpointers at each time-step. + self.prevKs = [] + # The outputs at each time-step. + self.nextYs = [self.tt.LongTensor(size) + .fill_(0)] + self.nextYs[0][:] = sos + # Has EOS topped the beam yet. + self._eos = eos + self.eosTop = False + # Time and k pair for finished. + self.finished = [] + + def getCurrentState(self): + "Get the outputs for the current timestep." + batch = self.tt.LongTensor(self.nextYs[-1]).view(-1, 1) + return batch + + def getCurrentOrigin(self): + "Get the backpointers for the current timestep." + return self.prevKs[-1] + + def advance(self, wordLk): + """ + Given prob over words for every last beam `wordLk` and attention + `attnOut`: Compute and update the beam search. + + Parameters: + + * `wordLk`- probs of advancing from the last step (K x words) + * `attnOut`- attention at the last step + + Returns: True if beam search is complete. + """ + numWords = wordLk.size(1) + + # Sum the previous scores. + if len(self.prevKs) > 0: + beamLk = wordLk + self.scores.unsqueeze(1).expand_as(wordLk) + + # Don't let EOS have children. + for i in range(self.nextYs[-1].size(0)): + if self.nextYs[-1][i] in self._eos: + beamLk[i] = -1e20 + else: + beamLk = wordLk[0] + flatBeamLk = beamLk.view(-1) + bestScores, bestScoresId = flatBeamLk.topk(self.size, 0, True, True) + + self.scores = bestScores + + # bestScoresId is flattened beam x word array, so calculate which + # word and beam each score came from + prevK = bestScoresId // numWords + self.prevKs.append(prevK) + self.nextYs.append((bestScoresId - prevK * numWords)) + + + for i in range(self.nextYs[-1].size(0)): + if self.nextYs[-1][i] in self._eos: + s = self.scores[i] + self.finished.append((s, len(self.nextYs) - 1, i)) + + # End condition is when top-of-beam is EOS and no global score. + if self.nextYs[-1][0] in self._eos: + self.eosTop = True + + def done(self): + return self.eosTop and len(self.finished) >=self.size + + def getFinal(self): + if len(self.finished) == 0: + self.finished.append((self.scores[0], len(self.nextYs) - 1, 0)) + self.finished.sort(key=lambda a: -a[0]) + if len(self.finished) != self.size: + unfinished=[] + for i in range(self.nextYs[-1].size(0)): + if self.nextYs[-1][i] not in self._eos: + s = self.scores[i] + unfinished.append((s, len(self.nextYs) - 1, i)) + unfinished.sort(key=lambda a: -a[0]) + self.finished+=unfinished[:self.size-len(self.finished)] + return self.finished[:self.size] + + def getHyp(self, beam_res): + """ + Walk back to construct the full hypothesis. + """ + hyps=[] + for _,timestep, k in beam_res: + hyp = [] + for j in range(len(self.prevKs[:timestep]) - 1, -1, -1): + hyp.append(self.nextYs[j+1][k]) + k = self.prevKs[j][k] + hyps.append(hyp[::-1]) + return hyps + + def buildTargetTokens(self, preds): + sentence=[] + for pred in preds: + tokens = [] + for tok in pred: + tokens.append(tok) + if tok in self._eos: + break + sentence.append(tokens) + return sentence diff --git a/Code-Code/CodeCompletion-token/code/dataset.py b/Code-Code/CodeCompletion-token/code/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..d8189dc3db412f76b0786257a4d3e40d27619de8 --- /dev/null +++ b/Code-Code/CodeCompletion-token/code/dataset.py @@ -0,0 +1,261 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +from __future__ import absolute_import, division, print_function + +import argparse +import glob +import logging +import os +import pickle +import random +import re +import gc +import shutil +import json + +import numpy as np +import torch +from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset +from torch.utils.data.distributed import DistributedSampler + +from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup, + BertConfig, BertForMaskedLM, BertTokenizer, + GPT2Config, GPT2LMHeadModel, GPT2Tokenizer, + OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, + RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, + DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer) + +class TextDataset(Dataset): + def __init__(self, tokenizer, args, logger, file_type='train', block_size=1024): + if args.local_rank==-1: + local_rank=0 + world_size=1 + else: + local_rank=args.local_rank + world_size=torch.distributed.get_world_size() + + if not os.path.exists(args.output_dir): + os.makedirs(args.output_dir) + cached_file = os.path.join(args.output_dir, file_type+"_langs_%s"%(args.langs)+"_blocksize_%d"%(block_size)+"_wordsize_%d"%(world_size)+"_rank_%d"%(local_rank)) + if os.path.exists(cached_file) and not args.overwrite_cache: + if file_type == 'train': + logger.warning("Loading features from cached file %s", cached_file) + with open(cached_file, 'rb') as handle: + self.inputs = pickle.load(handle) + + else: + self.inputs = [] + if args.langs == 'all': + langs = os.listdir(args.data_dir) + else: + langs = [args.langs] + + data=[] + for lang in langs: + datafile = os.path.join(args.data_dir, lang, file_type+'.pkl') + if file_type == 'train': + logger.warning("Creating features from dataset file at %s", datafile) + # with open(datafile) as f: + # data.extend([json.loads(x)['code'] for idx,x in enumerate(f.readlines()) if idx%world_size==local_rank]) + dataset = pickle.load(open(datafile, 'rb')) + data.extend([' '+' '.join(x['function'].split())+' ' for idx,x in enumerate(dataset) if idx%world_size==local_rank]) + + # random.shuffle(data) + data = data + length = len(data) + logger.warning("Data size: %d"%(length)) + input_ids = [] + for idx,x in enumerate(data): + try: + input_ids.extend(tokenizer.encode(x)) + except Exception: + pass + if idx % (length//10) == 0: + percent = idx / (length//10) * 10 + logger.warning("Rank %d, load %d"%(local_rank, percent)) + del data + gc.collect() + + length = len(input_ids) + for i in range(0, length-block_size, block_size): + self.inputs.append(input_ids[i : i + block_size]) + del input_ids + gc.collect() + + if file_type == 'train': + logger.warning("Rank %d Training %d token, %d samples"%(local_rank, length, len(self.inputs))) + logger.warning("Saving features into cached file %s", cached_file) + with open(cached_file, 'wb') as handle: + pickle.dump(self.inputs, handle, protocol=pickle.HIGHEST_PROTOCOL) + + def __len__(self): + return len(self.inputs) + + def __getitem__(self, item): + return torch.tensor(self.inputs[item]) + +class finetuneDataset(Dataset): + def __init__(self, tokenizer, args, logger, file_type='train', block_size=1024): + if args.local_rank==-1: + local_rank=0 + world_size=1 + else: + local_rank=args.local_rank + world_size=torch.distributed.get_world_size() + + if not os.path.exists(args.output_dir): + os.makedirs(args.output_dir) + cached_file = os.path.join(args.output_dir, file_type+"_blocksize_%d"%(block_size)+"_wordsize_%d"%(world_size)+"_rank_%d"%(local_rank)) + if os.path.exists(cached_file) and not args.overwrite_cache: + if file_type == 'train': + logger.warning("Loading features from cached file %s", cached_file) + with open(cached_file, 'rb') as handle: + self.inputs = pickle.load(handle) + + else: + self.inputs = [] + + datafile = os.path.join(args.data_dir, f"{file_type}.txt") + if file_type == 'train': + logger.warning("Creating features from dataset file at %s", datafile) + with open(datafile) as f: + data = f.readlines() + + length = len(data) + logger.info("Data size: %d"%(length)) + input_ids = [] + for idx,x in enumerate(data): + x = x.strip() + if x.startswith("") and x.endswith(""): + pass + else: + x = " " + x + " " + try: + input_ids.extend(tokenizer.encode(x)) + except Exception: + pass + if idx % (length//10) == 0: + percent = idx / (length//10) * 10 + logger.warning("Rank %d, load %d"%(local_rank, percent)) + del data + gc.collect() + + length = len(input_ids) // world_size + logger.info(f"tokens: {length*world_size}") + input_ids = input_ids[local_rank*length: (local_rank+1)*length] + + for i in range(0, length-block_size, block_size): + self.inputs.append(input_ids[i : i + block_size]) + del input_ids + gc.collect() + + if file_type == 'train': + logger.warning("Rank %d Training %d token, %d samples"%(local_rank, length, len(self.inputs))) + logger.warning("Saving features into cached file %s", cached_file) + with open(cached_file, 'wb') as handle: + pickle.dump(self.inputs, handle, protocol=pickle.HIGHEST_PROTOCOL) + + def __len__(self): + return len(self.inputs) + + def __getitem__(self, item): + return torch.tensor(self.inputs[item]) + +class EvalDataset(Dataset): + def __init__(self, tokenizer, args, logger, file_type='train', block_size=1024): + if not os.path.exists(args.output_dir): + os.makedirs(args.output_dir) + cached_file = os.path.join(args.output_dir, file_type+"_blocksize_%d"%(block_size)) + if os.path.exists(cached_file) and not args.overwrite_cache: + with open(cached_file, 'rb') as handle: + self.inputs = pickle.load(handle) + + else: + self.inputs = [] + + datafile = os.path.join(args.data_dir, f"{file_type}.txt") + with open(datafile) as f: + data = f.readlines() + + length = len(data) + logger.info("Data size: %d"%(length)) + input_ids = [] + for idx,x in enumerate(data): + x = x.strip() + if x.startswith("") and x.endswith(""): + pass + else: + x = " " + x + " " + try: + input_ids.extend(tokenizer.encode(x)) + except Exception: + pass + if idx % (length//10) == 0: + percent = idx / (length//10) * 10 + logger.warning("load %d"%(percent)) + del data + gc.collect() + + logger.info(f"tokens: {len(input_ids)}") + self.split(input_ids, tokenizer, logger, block_size=block_size) + del input_ids + gc.collect() + + with open(cached_file, 'wb') as handle: + pickle.dump(self.inputs, handle, protocol=pickle.HIGHEST_PROTOCOL) + + def split(self, input_ids, tokenizer, logger, block_size=1024): + sample = [] + i = 0 + while i < len(input_ids): + sample = input_ids[i: i+block_size] + if len(sample) == block_size: + for j in range(block_size): + if tokenizer.convert_ids_to_tokens(sample[block_size-1-j])[0] == '\u0120' or tokenizer.convert_ids_to_tokens(sample[block_size-1-j]).startswith("", "", "", ""]: + total += 1 + if x == y: + correct += 1 + + logger.info(f"Total {total} tokens, accuracy: {round(correct/total*100, 2)}") + +if __name__ == "__main__": + main() diff --git a/Code-Code/CodeCompletion-token/code/model.py b/Code-Code/CodeCompletion-token/code/model.py new file mode 100644 index 0000000000000000000000000000000000000000..278c7f1024fd7ae3f432fe6aab6cc9114d35f86e --- /dev/null +++ b/Code-Code/CodeCompletion-token/code/model.py @@ -0,0 +1,68 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +import math +import torch +import torch.nn as nn +import torch.nn.functional as F + +class RNNModel(nn.Module): + """Container module with an encoder, a recurrent module, and a decoder.""" + + def __init__(self, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False): + super(RNNModel, self).__init__() + self.ntoken = ntoken + self.drop = nn.Dropout(dropout) + self.encoder = nn.Embedding(ntoken, ninp) + self.rnn = nn.LSTM(ninp, nhid, nlayers, dropout=dropout, batch_first=True) + self.decoder = nn.Linear(nhid, ntoken) + self.criterion = nn.CrossEntropyLoss() + + # Optionally tie weights as in: + # "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016) + # https://arxiv.org/abs/1608.05859 + # and + # "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016) + # https://arxiv.org/abs/1611.01462 + if tie_weights: + if nhid != ninp: + raise ValueError('When using the tied flag, nhid must be equal to emsize') + self.decoder.weight = self.encoder.weight + + self.init_weights() + + self.nhid = nhid + self.nlayers = nlayers + + def init_weights(self): + initrange = 0.1 + nn.init.uniform_(self.encoder.weight, -initrange, initrange) + nn.init.zeros_(self.decoder.weight) + nn.init.uniform_(self.decoder.weight, -initrange, initrange) + + def forward(self, input, hidden=None, labels=None): + emb = self.encoder(input) + if hidden is not None: + output, hidden = self.rnn(emb, hidden) + else: + output, hidden = self.rnn(emb) + output = self.drop(output) + output = self.decoder(output) + # decoded = decoded.view(-1, self.ntoken) + # output = F.log_softmax(decoded, dim=1) + if labels is not None: + shift_logits = output[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + loss = self.criterion(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) + return loss, output, hidden + else: + return output, hidden + + def init_hidden(self, bsz): + weight = next(self.parameters()) + if self.rnn_type == 'LSTM': + return (weight.new_zeros(self.nlayers, bsz, self.nhid), + weight.new_zeros(self.nlayers, bsz, self.nhid)) + else: + return weight.new_zeros(self.nlayers, bsz, self.nhid) + + \ No newline at end of file diff --git a/Code-Code/CodeCompletion-token/code/run_lm.py b/Code-Code/CodeCompletion-token/code/run_lm.py new file mode 100644 index 0000000000000000000000000000000000000000..152b746885ddcd31cad91649b6d36bfc14b88fd2 --- /dev/null +++ b/Code-Code/CodeCompletion-token/code/run_lm.py @@ -0,0 +1,728 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Code completion (both token level and line level) pipeline in CodeXGLUE +""" + +from __future__ import absolute_import, division, print_function + +import argparse +import glob +import logging +import os +import pickle +import random +import re +import shutil +import json + +import numpy as np +import torch +from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset +from torch.utils.data.distributed import DistributedSampler +from dataset import TextDataset, finetuneDataset, EvalDataset, lineDataset +from beam import Beam + +from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup, + BertConfig, BertForMaskedLM, BertTokenizer, + GPT2Config, GPT2LMHeadModel, GPT2Tokenizer, + OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, + RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, + DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer) +from model import RNNModel + +# logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', +# datefmt='%m/%d/%Y %H:%M:%S', +# level=logging.INFO) +logger = logging.getLogger(__name__) + +MODEL_CLASSES = { + 'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer), + 'rnn': (GPT2Config, RNNModel, GPT2Tokenizer), + 'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer), + 'bert': (BertConfig, BertForMaskedLM, BertTokenizer), + 'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), + 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer) +} + + + +def load_and_cache_examples(args, tokenizer, evaluate=False): + if args.not_pretrain: + dataset = finetuneDataset(tokenizer, args, logger, file_type='dev' if evaluate else 'train', + block_size=args.block_size) + else: + dataset = TextDataset(tokenizer, args, logger, file_type='dev' if evaluate else 'train', + block_size=args.block_size) + return dataset + +def set_seed(args): + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + if args.n_gpu > 0: + torch.cuda.manual_seed_all(args.seed) + +def update_config(args, config): + # config.n_positions = config.n_ctx = args.block_size + config.vocab_size = args.vocab_size + +def get_special_tokens(path): + lits = json.load(open(path)) + tokens = ["", "", ""] + for lit in lits["str"]: + tokens.append(f"") + for lit in lits["num"]: + tokens.append(f"") + for lit in lits["char"]: + tokens.append(f"") + return tokens + + + +def train(args, train_dataset, model, tokenizer, fh, pool): + """ Train the model """ + if args.local_rank in [-1, 0]: + args.tensorboard_dir = os.path.join(args.output_dir, 'tensorboard') + if not os.path.exists(args.tensorboard_dir): + os.makedirs(args.tensorboard_dir) + + args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) + train_sampler = RandomSampler(train_dataset) + + train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.batch_size, drop_last=True) + total_examples = len(train_dataset) * ( + torch.distributed.get_world_size() if args.local_rank != -1 else 1) + batch_size = args.batch_size * args.gradient_accumulation_steps * ( + torch.distributed.get_world_size() if args.local_rank != -1 else 1) + # if args.max_steps > 0: + # t_total = args.max_steps + # args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 + if args.num_train_epochs > 0: + t_total = total_examples // batch_size * args.num_train_epochs + args.max_steps = t_total + model.to(args.device) + if args.local_rank not in [-1, 0]: + torch.distributed.barrier() + # Prepare optimizer and schedule (linear warmup and decay) + no_decay = ['bias', 'LayerNorm.weight'] + optimizer_grouped_parameters = [ + {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], + 'weight_decay': args.weight_decay}, + {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} + ] + optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) + scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, + num_training_steps=t_total) + checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last') + # scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt') + optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt') + # if os.path.exists(scheduler_last): + # scheduler.load_state_dict(torch.load(scheduler_last, map_location="cpu")) + if os.path.exists(optimizer_last): + logger.warning(f"Loading optimizer from {optimizer_last}") + optimizer.load_state_dict(torch.load(optimizer_last, map_location="cpu")) + if args.local_rank == 0: + torch.distributed.barrier() + if args.fp16: + try: + from apex import amp + except ImportError: + raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") + model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) + + # multi-gpu training (should be after apex fp16 initialization) + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + # Distributed training (should be after apex fp16 initialization) + if args.local_rank != -1: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank%args.gpu_per_node], + output_device=args.local_rank%args.gpu_per_node) + + # Train! + logger.info("***** Running training *****") + logger.info(" Num examples = %d", total_examples ) + logger.info(" Num epoch = %d", t_total*batch_size//total_examples) + logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) + logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", batch_size) + logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) + logger.info(" Total optimization steps = %d", t_total) + + global_step = args.start_step + tr_loss, logging_loss,avg_loss,tr_nb = 0.0, 0.0, 0.0, global_step + # model.resize_token_embeddings(len(tokenizer)) + model.zero_grad() + set_seed(args) # Added here for reproducibility (even between python 2 and 3) + + for idx in range(args.start_epoch, int(args.num_train_epochs)): + for step, batch in enumerate(train_dataloader): + inputs, labels = (batch, batch) + inputs = inputs.to(args.device) + labels = labels.to(args.device) + model.train() + outputs = model(inputs, labels=labels) + loss = outputs[0] + + if args.n_gpu > 1: + loss = loss.mean() # mean() to average on multi-gpu parallel training + if args.gradient_accumulation_steps > 1: + loss = loss / args.gradient_accumulation_steps + + if args.fp16: + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) + else: + loss.backward() + torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) + + tr_loss += loss.item() + + if (step + 1) % args.gradient_accumulation_steps == 0: + optimizer.step() + optimizer.zero_grad() + scheduler.step() + global_step += 1 + output_flag=True + avg_loss=round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4) + if global_step % args.logging_steps == 0: + logger.info(" steps: %s ppl: %s lr: %s", global_step, round(avg_loss,5), scheduler.get_last_lr()[0]) + if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: + # Log metrics + logging_loss = tr_loss + tr_nb=global_step + + if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: + checkpoint_prefix = "checkpoint" + # Save model checkpoint + if args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well + results = evaluate(args, model, tokenizer, eval_when_training=True) + for key, value in results.items(): + logger.info(" %s = %s", key, round(value,4)) + output_dir = os.path.join(args.output_dir, '{}-{}-{}'.format(checkpoint_prefix, global_step, round(results['perplexity'],4))) + else: + output_dir = os.path.join(args.output_dir, "{}-{}".format(checkpoint_prefix, global_step)) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + model_to_save = ( + model.module if hasattr(model, "module") else model + ) # Take care of distributed/parallel training + if args.model_type == "rnn": + torch.save(model_to_save.state_dict(), os.path.join(output_dir, "model.pt")) + else: + model_to_save.save_pretrained(output_dir) + tokenizer.save_pretrained(output_dir) + + torch.save(args, os.path.join(output_dir, "training_args.bin")) + logger.info("Saving model checkpoint to %s", output_dir) + + # _rotate_checkpoints(args, checkpoint_prefix) + last_output_dir = os.path.join(args.output_dir, 'checkpoint-last') + if not os.path.exists(last_output_dir): + os.makedirs(last_output_dir) + if args.model_type == "rnn": + torch.save(model_to_save.state_dict(), os.path.join(last_output_dir, "model.pt")) + else: + model_to_save.save_pretrained(last_output_dir) + tokenizer.save_pretrained(last_output_dir) + idx_file = os.path.join(last_output_dir, 'idx_file.txt') + with open(idx_file, 'w', encoding='utf-8') as idxf: + idxf.write(str(0) + '\n') + + torch.save(optimizer.state_dict(), os.path.join(last_output_dir, "optimizer.pt")) + # torch.save(scheduler.state_dict(), os.path.join(last_output_dir, "scheduler.pt")) + logger.info("Saving optimizer and scheduler states to %s", last_output_dir) + + step_file = os.path.join(last_output_dir, 'step_file.txt') + with open(step_file, 'w', encoding='utf-8') as stepf: + stepf.write(str(global_step) + '\n') + + + if args.max_steps > 0 and global_step > args.max_steps: + break + + # 每一轮记录checkpoint + output_dir = os.path.join(args.output_dir, 'epoch_{}'.format(idx+1)) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + model_to_save = model.module if hasattr(model, 'module') else model + ckpt_output_path = os.path.join(output_dir, 'subject_model.pth') + logger.info("Saving model checkpoint to %s", ckpt_output_path) + torch.save(model_to_save.state_dict(), ckpt_output_path) + + if args.max_steps > 0 and global_step > args.max_steps: + break + + return global_step, tr_loss / global_step + + +def evaluate(args, model, tokenizer, prefix="", eval_when_training=False): + # Loop to handle MNLI double evaluation (matched, mis-matched) + eval_output_dir = args.output_dir + + eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True) + + if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: + os.makedirs(eval_output_dir) + + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + # Note that DistributedSampler samples randomly + eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) + eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, drop_last=True) + + # multi-gpu evaluate + if args.n_gpu > 1 and eval_when_training is False: + model = torch.nn.DataParallel(model) + + # Eval! + #logger.info("***** Running evaluation {} *****".format(prefix)) + #logger.info(" Num examples = %d", len(eval_dataset)) + #logger.info(" Batch size = %d", args.eval_batch_size) + eval_loss = 0.0 + nb_eval_steps = 0 + model.eval() + + for batch in eval_dataloader: + inputs, labels = (batch, batch) + inputs = inputs.to(args.device) + labels = labels.to(args.device) + + with torch.no_grad(): + outputs = model(inputs, labels=labels) + lm_loss = outputs[0] + eval_loss += lm_loss.mean().item() + nb_eval_steps += 1 + + eval_loss = eval_loss / nb_eval_steps + perplexity = torch.exp(torch.tensor(eval_loss)) + + result = { + "perplexity": float(perplexity) + } + + output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") + with open(output_eval_file, "w") as writer: + #logger.info("***** Eval results {} *****".format(prefix)) + for key in sorted(result.keys()): + #logger.info(" %s = %s", key, str(result[key])) + writer.write("%s = %s\n" % (key, str(result[key]))) + + return result + +def eval_acc(args, model, tokenizer, file_type='test'): + """ + Evaluate token level code completion on accuracy. + + This function can only used to evaluate accuracy, but not inference, because the inputs are previous sub-tokens but not tokens. + But it can be guaranteed that the accuracy in this function is the same as the real token level completion. + The reason is: + Assuming the inputs are "context_len = 100 masks = np . zeros (", and the ground truth is "context_len". + Due to our bpe encoding, the model have to outputs "context", "_" and "len" in 3 time step, i.e. gt0="context", gt1="_", gt2="len". + In a real inference scenario: + time step 0, inputs "context_len = 100 masks = np . zeros ( ", model outputs: out0; + time step 1, inputs: in1=out0, outputs: out1 + ... until the model outputs a complete token + But in this function, no matter out0 is, in1=gt0="context". + That is to say, in this function, we feed ground truth but not output sub-token when we predict the next token which is split by bpe. + So obviouly we would get different predictions from the real token completion scenario. + However, if we calculate token leval accuracy, + if and only if the model predicts every sub-token correctly, the complete token can be seen correct. + In this situation, out0==gt0, out1==gt1, so it doesn't matter we feed gt or output to model. + In summary, this function can make models oupout the same complete token if this token equals to ground truth, + if not, the model might predict a different token from the real completion scenario, but all wrong. + So it would not affect the token level accuracy. + + I use this trick to speed up evaluation due to the large test set. + """ + eval_dataset = EvalDataset(tokenizer, args, logger, file_type=file_type, block_size=args.block_size) + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) + eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) + model.to(args.device) + # multi-gpu training (should be after apex fp16 initialization) + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + # Distributed training (should be after apex fp16 initialization) + if args.local_rank != -1: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank%args.gpu_per_node], + output_device=args.local_rank%args.gpu_per_node) + + def DecodeIds(idxs): + codes = "" + for idx in idxs: + to_add = tokenizer.convert_ids_to_tokens(idx) + if tokenizer.convert_ids_to_tokens(idx)[0] == '\u0120': + if not codes.endswith(" "): + codes += " " + to_add[1:] + else: + codes += to_add[1:] + elif ( + idx in [tokenizer.bos_token_id, tokenizer.eos_token_id, tokenizer.sep_token_id, tokenizer.pad_token_id] or + tokenizer.convert_ids_to_tokens(idx).startswith(" 0: + try: + all_pred.append(DecodeIds(now_pred).strip().split()[0]) + except IndexError: + all_pred.append("") + all_gt.append(DecodeIds(now_gt).strip()) + now_gt = [] + now_pred = [] + if y in [tokenizer.bos_token_id, tokenizer.eos_token_id, tokenizer.sep_token_id, tokenizer.pad_token_id] or tokenizer.convert_ids_to_tokens(y).startswith(" 0: + try: + all_pred.append(DecodeIds(now_pred).strip().split()[0]) + except IndexError: + all_pred.append("") + all_gt.append(DecodeIds(now_gt).strip()) + now_gt = [y] + now_pred = [pred[i-1]] + try: + all_pred.append(DecodeIds(now_pred).strip().split()[0]) + except IndexError: + all_pred.append("") + all_gt.append(DecodeIds(now_gt).strip()) + now_gt = [] + now_pred = [] + continue + now_gt.append(y) + now_pred.append(pred[i-1]) + assert len(all_pred) == len(all_gt) + + total_pred.extend(all_pred) + total_gt.extend(all_gt) + + + for x, y in zip(all_pred, all_gt): + if y not in ["", "", "", ""]: + total += 1 + if x == y: + correct += 1 + + if step % args.logging_steps == 0: + logger.info(f"{step} are done!") + logger.info(f"{total}, {correct/total}") + + # pickle.dump(total_pred, open(os.path.join(args.output_dir, "preds.pkl"), "wb")) + # pickle.dump(total_gt, open(os.path.join(args.output_dir, "gts.pkl"), "wb")) + + saved_file = os.path.join(args.output_dir, "predictions.txt") + total_samples = post_process(args, total_pred, total_gt, open(os.path.join(args.data_dir, f"{file_type}.txt")).readlines(), saved_file) + logger.info(f"Eval on {total_samples}, saved at {saved_file}") + + return total, correct + +def post_process(args, preds, gts, true_gts, saved_file): + wf = open(saved_file, "w") + + cnt = 0 + new_gt = [] + new_pred = [] + for i, (pred,gt) in enumerate(zip(preds,gts)): + if gt in ["", ""]: + continue + new_gt.append(gt) + new_pred.append(pred.replace(" ", "")) + if gt == "": + gt_str = " ".join(new_gt) + pred_str = " ".join(new_pred) + assert gt_str == true_gts[cnt].strip(), f"{cnt} sample gt_str != true_gt" + wf.write(pred_str+"\n") + cnt += 1 + new_gt = [] + new_pred = [] + + return cnt + + +def main(): + parser = argparse.ArgumentParser() + + ## Required parameters + parser.add_argument("--data_dir", default=None, type=str, required=True, + help="The input data path.") + parser.add_argument("--langs", default=None, type=str, required=True, + help="Languages to train, if all, train all languages in data_dir") + parser.add_argument("--output_dir", default=None, type=str, required=True, + help="The output directory where the model predictions and checkpoints will be written.") + + ## Other parameters + parser.add_argument("--model_type", default="gpt2", type=str, + help="The model architecture to be fine-tuned.") + parser.add_argument("--pretrain_dir", default="", type=str, + help="The output directory where the model predictions and checkpoints will be written.") + parser.add_argument("--config_dir", type=str, + help="config name. Required when training from scratch") + parser.add_argument("--tokenizer_dir", type=str, + help="Pre-trained tokenizer dir. Required when training from scratch") + parser.add_argument("--lit_file", type=str, + help="literals json file") + parser.add_argument("--load_name", type=str, default="pretrained", + help="Load pretrained model name") + + parser.add_argument("--mlm", action='store_true', + help="Train with masked-language modeling loss instead of language modeling.") + parser.add_argument("--mlm_probability", type=float, default=0.15, + help="Ratio of tokens to mask for masked language modeling loss") + + parser.add_argument("--cache_dir", default="", type=str, + help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)") + parser.add_argument("--block_size", default=1024, type=int, + help="Optional input sequence length after tokenization." + "The training dataset will be truncated in block of this size for training." + "Default to the model max input length for single sentence inputs (take into account special tokens).") + parser.add_argument("--do_train", action='store_true', + help="Whether to run training.") + parser.add_argument("--do_eval", action='store_true', + help="Whether to run eval on the dev set.") + parser.add_argument("--evaluate_during_training", action='store_true', + help="Run evaluation during training at each logging step.") + parser.add_argument("--do_lower_case", action='store_true', + help="Set this flag if you are using an uncased model.") + + parser.add_argument("--per_gpu_train_batch_size", default=4, type=int, + help="Batch size per GPU/CPU for training.") + parser.add_argument("--per_gpu_eval_batch_size", default=12, type=int, + help="Batch size per GPU/CPU for evaluation.") + parser.add_argument('--gradient_accumulation_steps', type=int, default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.") + parser.add_argument("--learning_rate", default=5e-5, type=float, + help="The initial learning rate for Adam.") + parser.add_argument("--weight_decay", default=0.0, type=float, + help="Weight deay if we apply some.") + parser.add_argument("--adam_epsilon", default=1e-8, type=float, + help="Epsilon for Adam optimizer.") + parser.add_argument("--max_grad_norm", default=1.0, type=float, + help="Max gradient norm.") + parser.add_argument("--num_train_epochs", default=1.0, type=float, + help="Total number of training epochs to perform.") + parser.add_argument("--max_steps", default=-1, type=int, + help="If > 0: set total number of training steps to perform. Override num_train_epochs.") + parser.add_argument("--warmup_steps", default=0, type=int, + help="Linear warmup over warmup_steps.") + + parser.add_argument('--logging_steps', type=int, default=1000, + help="Log every X updates steps.") + parser.add_argument('--save_steps', type=int, default=5000, + help="Save checkpoint every X updates steps.") + parser.add_argument('--save_total_limit', type=int, default=None, + help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default') + parser.add_argument("--eval_all_checkpoints", action='store_true', + help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number") + parser.add_argument("--no_cuda", action='store_true', + help="Avoid using CUDA when available") + parser.add_argument('--overwrite_output_dir', action='store_true', + help="Overwrite the content of the output directory") + parser.add_argument('--overwrite_cache', action='store_true', + help="Overwrite the cached training and evaluation sets") + parser.add_argument('--seed', type=int, default=42, + help="random seed for initialization") + parser.add_argument('--not_pretrain', action='store_true', + help="use different dataset") + + parser.add_argument('--fp16', action='store_true', + help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") + parser.add_argument('--fp16_opt_level', type=str, default='O1', + help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." + "See details at https://nvidia.github.io/apex/amp.html") + parser.add_argument("--local_rank", type=int, default=-1, + help="For distributed training: local_rank") + parser.add_argument("--node_index", type=int, default=-1, + help="node index if multi-node running") + parser.add_argument("--gpu_per_node", type=int, default=-1, + help="num of gpus per node") + parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.") + parser.add_argument('--server_port', type=str, default='', help="For distant debugging.") + + parser.add_argument('--log_file', type=str, default='') + parser.add_argument('--tensorboard_dir', type=str) + + pool = None + args = parser.parse_args() + + # args.output_dir = os.path.join(args.output_dir, args.dataset) + + if args.model_type in ["bert", "roberta", "distilbert"] and not args.mlm: + raise ValueError("BERT and RoBERTa do not have LM heads but masked LM heads. They must be run using the --mlm " + "flag (masked language modeling).") + + if os.path.exists(args.output_dir) and os.listdir( + args.output_dir) and args.do_train and not args.overwrite_output_dir: + raise ValueError( + "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( + args.output_dir)) + + # Setup distant debugging if needed + if args.server_ip and args.server_port: + # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script + import ptvsd + print("Waiting for debugger attach") + ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) + ptvsd.wait_for_attach() + + logger.info("local_rank: %d, node_index: %d, gpu_per_node: %d"%(args.local_rank, args.node_index, args.gpu_per_node)) + # Setup CUDA, GPU & distributed training + if args.local_rank == -1 or args.no_cuda: + device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + args.n_gpu = torch.cuda.device_count() + else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs + torch.cuda.set_device(args.local_rank) + device = torch.device("cuda", args.local_rank) + torch.distributed.init_process_group(backend='nccl') + args.local_rank += args.node_index * args.gpu_per_node + args.n_gpu = 1 + args.device = device + # args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) + + # Setup logging + logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', + datefmt='%m/%d/%Y %H:%M:%S', + level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN) + logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s, world size: %s", + args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, + torch.distributed.get_world_size() if args.local_rank != -1 else 1) + + # 使用FileHandler输出到文件 + fh = logging.FileHandler(args.log_file) + logger.addHandler(fh) + + # Set seed + set_seed(args) + + # Load pretrained model and tokenizer + if args.local_rank not in [-1, 0]: + torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab + + args.start_epoch = 0 + args.start_step = 0 + checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last') + if args.do_train and os.path.exists(checkpoint_last) and os.listdir(checkpoint_last): + args.pretrain_dir = os.path.join(checkpoint_last) + args.config_name = os.path.join(checkpoint_last, 'config.json') + idx_file = os.path.join(checkpoint_last, 'idx_file.txt') + with open(idx_file, encoding='utf-8') as idxf: + args.start_epoch = int(idxf.readlines()[0].strip()) + 1 + + step_file = os.path.join(checkpoint_last, 'step_file.txt') + if os.path.exists(step_file): + with open(step_file, encoding='utf-8') as stepf: + args.start_step = int(stepf.readlines()[0].strip()) + + logger.info("reload model from {}, resume from {} steps".format(checkpoint_last, args.start_step)) + + # get special tokens + special_tokens = get_special_tokens(args.lit_file) + + # Load pre-trained model + config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] + pretrained = checkpoint_last #args.pretrain_dir + if pretrained: + tokenizer = tokenizer_class.from_pretrained(pretrained, do_lower_case=args.do_lower_case, sep_token='', bos_token='', eos_token='', pad_token='', unk_token='<|UNKNOWN|>', additional_special_tokens=special_tokens) + if args.model_type == "rnn": + model = model_class(len(tokenizer), 768, 768, 1) + model_last = os.path.join(pretrained, 'model.pt') + if os.path.exists(model_last): + logger.warning(f"Loading model from {model_last}") + model.load_state_dict(torch.load(model_last, map_location="cpu")) + else: + model = model_class.from_pretrained(pretrained) + model.resize_token_embeddings(len(tokenizer)) + else: + tokenizer = tokenizer_class.from_pretrained(args.tokenizer_dir, sep_token='', bos_token='', eos_token='', pad_token='', unk_token='<|UNKNOWN|>', additional_special_tokens=special_tokens) + args.vocab_size = len(tokenizer) + if args.model_type == "rnn": + model = model_class(len(tokenizer), 768, 768, 1) + else: + config = config_class.from_pretrained(args.config_dir) + model = model_class(config) + model.resize_token_embeddings(len(tokenizer)) + + + model_parameters = model.parameters() + num_params = sum([np.prod(p.size()) for p in model_parameters]) + logger.info(f"Model has a total of {num_params} trainable parameters") + + if args.local_rank == 0: + torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab + + logger.info("Training/evaluation parameters %s", args) + + # Training + if args.do_train: + train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False) + + global_step, tr_loss = train(args, train_dataset, model, tokenizer, fh, pool) + logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) + + # Only works on single GPU + if args.do_eval: + checkpoint_prefix = 'epoch_5/subject_model.pth' + output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix)) + model.load_state_dict(torch.load(output_dir)) + model.to(args.device) + # 不要用dev文件,否则会在EvalDataset的__init__中检测不通过,被exit + # dev_total, dev_cr = eval_acc(args, model, tokenizer, 'dev') + # logger.info(f"Dev total tokens: {dev_total}, accuracy: {dev_cr/dev_total}") + test_total, test_cr = eval_acc(args, model, tokenizer, 'test') + logger.info(f"Test total tokens: {test_total}, accuracy: {test_cr/test_total}") + + +if __name__ == "__main__": + main() diff --git a/Code-Code/CodeCompletion-token/code/train.sh b/Code-Code/CodeCompletion-token/code/train.sh new file mode 100644 index 0000000000000000000000000000000000000000..9ac8122197f0b9da18651055851b56e4a0398d35 --- /dev/null +++ b/Code-Code/CodeCompletion-token/code/train.sh @@ -0,0 +1,31 @@ +LANG=java # set python for py150 +DATADIR=../dataset/javaCorpus/token_completion +LITFILE=../dataset/javaCorpus/literals.json +OUTPUTDIR=../model/javaCorpus +PRETRAINDIR=microsoft/CodeGPT-small-java # microsoft/CodeGPT-small-py for py150 +LOGFILE=train_javaCorpus.log +PER_NODE_GPU=4 # modify YOUR_GPU_NUM + +CUDA_VISIBLE_DEVICES=0,1,2,3 python run_lm.py \ + --data_dir=$DATADIR \ + --lit_file=$LITFILE \ + --langs=$LANG \ + --output_dir=$OUTPUTDIR \ + --pretrain_dir=$PRETRAINDIR \ + --log_file=$LOGFILE \ + --model_type=gpt2 \ + --block_size=512 \ + --do_train \ + --gpu_per_node $PER_NODE_GPU \ + --learning_rate=8e-5 \ + --weight_decay=0.01 \ + --evaluate_during_training \ + --per_gpu_train_batch_size=1 \ + --per_gpu_eval_batch_size=4 \ + --gradient_accumulation_steps=4 \ + --num_train_epochs=5 \ + --logging_steps=100 \ + --save_steps=1000 \ + --seed=42 \ + --overwrite_output_dir \ + --not_pretrain \ No newline at end of file diff --git a/Code-Code/CodeCompletion-token/data.zip b/Code-Code/CodeCompletion-token/data.zip new file mode 100644 index 0000000000000000000000000000000000000000..9ccbe06b7f4cce95fd235de2952ff0df26511cc6 --- /dev/null +++ b/Code-Code/CodeCompletion-token/data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3fe81ae13261569dcb0147143f6be01900bdea8fc19394b931a2f6be720dac03 +size 16149700 diff --git a/Code-Code/Defect-detection/code/eval.sh b/Code-Code/Defect-detection/code/eval.sh new file mode 100644 index 0000000000000000000000000000000000000000..6f647ecdf2dc1e95e6689608c28b3eb1b27d2678 --- /dev/null +++ b/Code-Code/Defect-detection/code/eval.sh @@ -0,0 +1,18 @@ +CUDA_VISIBLE_DEVICES=0,1 python run.py \ + --output_dir=../model \ + --model_type=roberta \ + --tokenizer_name=microsoft/codebert-base \ + --model_name_or_path=microsoft/codebert-base \ + --do_eval \ + --do_test \ + --train_data_file=../dataset/train.jsonl \ + --eval_data_file=../dataset/valid.jsonl \ + --test_data_file=../dataset/valid.jsonl \ + --epoch 5 \ + --block_size 400 \ + --train_batch_size 32 \ + --eval_batch_size 64 \ + --learning_rate 2e-5 \ + --max_grad_norm 1.0 \ + --evaluate_during_training \ + --seed 123456 \ No newline at end of file diff --git a/Code-Code/Defect-detection/code/evaluate.sh b/Code-Code/Defect-detection/code/evaluate.sh new file mode 100644 index 0000000000000000000000000000000000000000..6c0c31b51b1c8fff52bd7f6f502844b5f3e7f173 --- /dev/null +++ b/Code-Code/Defect-detection/code/evaluate.sh @@ -0,0 +1 @@ +python evaluator.py -a ../dataset/valid.jsonl -p ../model/predictions.txt \ No newline at end of file diff --git a/Code-Code/Defect-detection/code/evaluator.py b/Code-Code/Defect-detection/code/evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..27e717f1f1c348e27afce754dc5f515112357bab --- /dev/null +++ b/Code-Code/Defect-detection/code/evaluator.py @@ -0,0 +1,52 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. +import logging +import sys +import json +import numpy as np + +def read_answers(filename): + answers={} + with open(filename) as f: + for line in f: + line=line.strip() + js=json.loads(line) + answers[js['idx']]=js['target'] + return answers + +def read_predictions(filename): + predictions={} + with open(filename) as f: + for line in f: + line=line.strip() + idx,label=line.split() + predictions[int(idx)]=int(label) + return predictions + +def calculate_scores(answers,predictions): + Acc=[] + for key in answers: + if key not in predictions: + logging.error("Missing prediction for index {}.".format(key)) + sys.exit() + Acc.append(answers[key]==predictions[key]) + + scores={} + scores['Acc']=np.mean(Acc) + return scores + +def main(): + import argparse + parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for Defect Detection dataset.') + parser.add_argument('--answers', '-a',help="filename of the labels, in txt format.") + parser.add_argument('--predictions', '-p',help="filename of the leaderboard predictions, in txt format.") + + + args = parser.parse_args() + answers=read_answers(args.answers) + predictions=read_predictions(args.predictions) + scores=calculate_scores(answers,predictions) + print(scores) + +if __name__ == '__main__': + main() diff --git a/Code-Code/Defect-detection/code/model.py b/Code-Code/Defect-detection/code/model.py new file mode 100644 index 0000000000000000000000000000000000000000..8cd139829a4331a0fa72064f0ea18a778c22d41b --- /dev/null +++ b/Code-Code/Defect-detection/code/model.py @@ -0,0 +1,45 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +import torch +import torch.nn as nn +import torch +from torch.autograd import Variable +import copy +from torch.nn import CrossEntropyLoss, MSELoss + + + +class Model(nn.Module): + def __init__(self, encoder,config,tokenizer,args): + super(Model, self).__init__() + self.encoder = encoder + self.config=config + self.tokenizer=tokenizer + self.args=args + + # Define dropout layer, dropout_probability is taken from args. + self.dropout = nn.Dropout(args.dropout_probability) + + + def forward(self, input_ids=None,labels=None, return_vec=None): + outputs=self.encoder(input_ids,attention_mask=input_ids.ne(1)) + + if return_vec: + return outputs.pooler_output + outputs = outputs[0] + + # Apply dropout + outputs = self.dropout(outputs) + + logits=outputs + prob=torch.sigmoid(logits) + if labels is not None: + labels=labels.float() + loss=torch.log(prob[:,0]+1e-10)*labels+torch.log((1-prob)[:,0]+1e-10)*(1-labels) + loss=-loss.mean() + return loss,prob + else: + return prob + + + diff --git a/Code-Code/Defect-detection/code/run.py b/Code-Code/Defect-detection/code/run.py new file mode 100644 index 0000000000000000000000000000000000000000..35d0eecdc36132b091d4ee9dd0d04a918ce4ff90 --- /dev/null +++ b/Code-Code/Defect-detection/code/run.py @@ -0,0 +1,598 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa). +GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned +using a masked language modeling (MLM) loss. +""" + +from __future__ import absolute_import, division, print_function + +import argparse +import glob +import logging +import os +import pickle +import random +import re +import shutil + +import numpy as np +import torch +from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset +from torch.utils.data.distributed import DistributedSampler +import json +try: + from torch.utils.tensorboard import SummaryWriter +except: + from tensorboardX import SummaryWriter + +from tqdm import tqdm, trange +import multiprocessing +from model import Model +cpu_cont = multiprocessing.cpu_count() +from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup, + BertConfig, BertForMaskedLM, BertTokenizer, BertForSequenceClassification, + GPT2Config, GPT2LMHeadModel, GPT2Tokenizer, + OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, + RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer, + DistilBertConfig, DistilBertForMaskedLM, DistilBertForSequenceClassification, DistilBertTokenizer) + +logger = logging.getLogger(__name__) + +MODEL_CLASSES = { + 'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer), + 'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer), + 'bert': (BertConfig, BertForSequenceClassification, BertTokenizer), + 'roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer), + 'distilbert': (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer) +} + + + +class InputFeatures(object): + """A single training/test features for a example.""" + def __init__(self, + input_tokens, + input_ids, + idx, + label, + + ): + self.input_tokens = input_tokens + self.input_ids = input_ids + self.idx=str(idx) + self.label=label + + +def convert_examples_to_features(js,tokenizer,args): + #source + code=' '.join(js['func'].split()) + code_tokens=tokenizer.tokenize(code)[:args.block_size-2] + source_tokens =[tokenizer.cls_token]+code_tokens+[tokenizer.sep_token] + source_ids = tokenizer.convert_tokens_to_ids(source_tokens) + padding_length = args.block_size - len(source_ids) + source_ids+=[tokenizer.pad_token_id]*padding_length + return InputFeatures(source_tokens,source_ids,js['idx'],js['target']) + +class TextDataset(Dataset): + def __init__(self, tokenizer, args, file_path=None): + self.examples = [] + with open(file_path) as f: + for line in f: + js=json.loads(line.strip()) + self.examples.append(convert_examples_to_features(js,tokenizer,args)) + if 'train' in file_path: + for idx, example in enumerate(self.examples[:3]): + logger.info("*** Example ***") + logger.info("idx: {}".format(idx)) + logger.info("label: {}".format(example.label)) + logger.info("input_tokens: {}".format([x.replace('\u0120','_') for x in example.input_tokens])) + logger.info("input_ids: {}".format(' '.join(map(str, example.input_ids)))) + + def __len__(self): + return len(self.examples) + + def __getitem__(self, i): + return torch.tensor(self.examples[i].input_ids),torch.tensor(self.examples[i].label) + + +def set_seed(seed=42): + random.seed(seed) + os.environ['PYHTONHASHSEED'] = str(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.backends.cudnn.deterministic = True + + +def train(args, train_dataset, model, tokenizer): + """ Train the model """ + args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) + train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) + + train_dataloader = DataLoader(train_dataset, sampler=train_sampler, + batch_size=args.train_batch_size,num_workers=4,pin_memory=True) + args.max_steps=args.epoch*len( train_dataloader) + args.save_steps=len( train_dataloader) + args.warmup_steps=len( train_dataloader) + args.logging_steps=len( train_dataloader) + args.num_train_epochs=args.epoch + model.to(args.device) + # Prepare optimizer and schedule (linear warmup and decay) + no_decay = ['bias', 'LayerNorm.weight'] + optimizer_grouped_parameters = [ + {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], + 'weight_decay': args.weight_decay}, + {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} + ] + optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) + scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.max_steps*0.1, + num_training_steps=args.max_steps) + if args.fp16: + try: + from apex import amp + except ImportError: + raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") + model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) + + # multi-gpu training (should be after apex fp16 initialization) + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + # Distributed training (should be after apex fp16 initialization) + if args.local_rank != -1: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], + output_device=args.local_rank, + find_unused_parameters=True) + + checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last') + scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt') + optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt') + if os.path.exists(scheduler_last): + scheduler.load_state_dict(torch.load(scheduler_last)) + if os.path.exists(optimizer_last): + optimizer.load_state_dict(torch.load(optimizer_last)) + # Train! + logger.info("***** Running training *****") + logger.info(" Num examples = %d", len(train_dataset)) + logger.info(" Num Epochs = %d", args.num_train_epochs) + logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) + logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", + args.train_batch_size * args.gradient_accumulation_steps * ( + torch.distributed.get_world_size() if args.local_rank != -1 else 1)) + logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) + logger.info(" Total optimization steps = %d", args.max_steps) + + global_step = args.start_step + tr_loss, logging_loss,avg_loss,tr_nb,tr_num,train_loss = 0.0, 0.0,0.0,0,0,0 + best_mrr=0.0 + best_acc=0.0 + # model.resize_token_embeddings(len(tokenizer)) + model.zero_grad() + + # Initialize early stopping parameters at the start of training + early_stopping_counter = 0 + best_loss = None + + for idx in range(args.start_epoch, int(args.num_train_epochs)): + bar = tqdm(train_dataloader,total=len(train_dataloader)) + tr_num=0 + train_loss=0 + for step, batch in enumerate(bar): + inputs = batch[0].to(args.device) + labels=batch[1].to(args.device) + model.train() + loss,logits = model(inputs,labels) + + + if args.n_gpu > 1: + loss = loss.mean() # mean() to average on multi-gpu parallel training + if args.gradient_accumulation_steps > 1: + loss = loss / args.gradient_accumulation_steps + + if args.fp16: + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) + else: + loss.backward() + torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) + + tr_loss += loss.item() + tr_num+=1 + train_loss+=loss.item() + if avg_loss==0: + avg_loss=tr_loss + avg_loss=round(train_loss/tr_num,5) + bar.set_description("epoch {} loss {}".format(idx,avg_loss)) + + + if (step + 1) % args.gradient_accumulation_steps == 0: + optimizer.step() + optimizer.zero_grad() + scheduler.step() + global_step += 1 + output_flag=True + avg_loss=round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4) + if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: + logging_loss = tr_loss + tr_nb=global_step + + if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: + + if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well + results = evaluate(args, model, tokenizer,eval_when_training=True) + for key, value in results.items(): + logger.info(" %s = %s", key, round(value,4)) + # Save model checkpoint + + if results['eval_acc']>best_acc: + best_acc=results['eval_acc'] + logger.info(" "+"*"*20) + logger.info(" Best acc:%s",round(best_acc,4)) + logger.info(" "+"*"*20) + + checkpoint_prefix = 'checkpoint-best-acc' + output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix)) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + model_to_save = model.module if hasattr(model,'module') else model + output_dir = os.path.join(output_dir, '{}'.format('model.bin')) + torch.save(model_to_save.state_dict(), output_dir) + logger.info("Saving model checkpoint to %s", output_dir) + + # 每一轮记录checkpoint + output_dir = os.path.join(args.output_dir, 'epoch_{}'.format(idx+1)) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + model_to_save = model.module if hasattr(model, 'module') else model + ckpt_output_path = os.path.join(output_dir, 'subject_model.pth') + logger.info("Saving model checkpoint to %s", ckpt_output_path) + torch.save(model_to_save.state_dict(), ckpt_output_path) + + # Calculate average loss for the epoch + avg_loss = train_loss / tr_num + + # Check for early stopping condition + if args.early_stopping_patience is not None: + if best_loss is None or avg_loss < best_loss - args.min_loss_delta: + best_loss = avg_loss + early_stopping_counter = 0 + else: + early_stopping_counter += 1 + if early_stopping_counter >= args.early_stopping_patience: + logger.info("Early stopping") + break # Exit the loop early + + + + +def evaluate(args, model, tokenizer,eval_when_training=False): + # Loop to handle MNLI double evaluation (matched, mis-matched) + eval_output_dir = args.output_dir + + eval_dataset = TextDataset(tokenizer, args,args.eval_data_file) + + if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: + os.makedirs(eval_output_dir) + + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + # Note that DistributedSampler samples randomly + eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) + eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=4,pin_memory=True) + + # multi-gpu evaluate + if args.n_gpu > 1 and eval_when_training is False: + model = torch.nn.DataParallel(model) + + # Eval! + logger.info("***** Running evaluation *****") + logger.info(" Num examples = %d", len(eval_dataset)) + logger.info(" Batch size = %d", args.eval_batch_size) + eval_loss = 0.0 + nb_eval_steps = 0 + model.eval() + logits=[] + labels=[] + for batch in eval_dataloader: + inputs = batch[0].to(args.device) + label=batch[1].to(args.device) + with torch.no_grad(): + lm_loss,logit = model(inputs,label) + eval_loss += lm_loss.mean().item() + logits.append(logit.cpu().numpy()) + labels.append(label.cpu().numpy()) + nb_eval_steps += 1 + logits=np.concatenate(logits,0) + labels=np.concatenate(labels,0) + preds=logits[:,0]>0.5 + eval_acc=np.mean(labels==preds) + eval_loss = eval_loss / nb_eval_steps + perplexity = torch.tensor(eval_loss) + + result = { + "eval_loss": float(perplexity), + "eval_acc":round(eval_acc,4), + } + return result + +def test(args, model, tokenizer): + # Loop to handle MNLI double evaluation (matched, mis-matched) + eval_dataset = TextDataset(tokenizer, args,args.test_data_file) + + + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + # Note that DistributedSampler samples randomly + eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) + eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) + + # multi-gpu evaluate + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + # Eval! + logger.info("***** Running Test *****") + logger.info(" Num examples = %d", len(eval_dataset)) + logger.info(" Batch size = %d", args.eval_batch_size) + eval_loss = 0.0 + nb_eval_steps = 0 + model.eval() + logits=[] + labels=[] + for batch in tqdm(eval_dataloader,total=len(eval_dataloader)): + inputs = batch[0].to(args.device) + label=batch[1].to(args.device) + with torch.no_grad(): + logit = model(inputs) + logits.append(logit.cpu().numpy()) + labels.append(label.cpu().numpy()) + + logits=np.concatenate(logits,0) + labels=np.concatenate(labels,0) + preds=logits[:,0]>0.5 + with open(os.path.join(args.output_dir,"predictions.txt"),'w') as f: + for example,pred in zip(eval_dataset.examples,preds): + if pred: + f.write(example.idx+'\t1\n') + else: + f.write(example.idx+'\t0\n') + + + +def main(): + parser = argparse.ArgumentParser() + + ## Required parameters + parser.add_argument("--train_data_file", default=None, type=str, required=True, + help="The input training data file (a text file).") + parser.add_argument("--output_dir", default=None, type=str, required=True, + help="The output directory where the model predictions and checkpoints will be written.") + + ## Other parameters + parser.add_argument("--eval_data_file", default=None, type=str, + help="An optional input evaluation data file to evaluate the perplexity on (a text file).") + parser.add_argument("--test_data_file", default=None, type=str, + help="An optional input evaluation data file to evaluate the perplexity on (a text file).") + + parser.add_argument("--model_type", default="bert", type=str, + help="The model architecture to be fine-tuned.") + parser.add_argument("--model_name_or_path", default=None, type=str, + help="The model checkpoint for weights initialization.") + + parser.add_argument("--mlm", action='store_true', + help="Train with masked-language modeling loss instead of language modeling.") + parser.add_argument("--mlm_probability", type=float, default=0.15, + help="Ratio of tokens to mask for masked language modeling loss") + + parser.add_argument("--config_name", default="", type=str, + help="Optional pretrained config name or path if not the same as model_name_or_path") + parser.add_argument("--tokenizer_name", default="", type=str, + help="Optional pretrained tokenizer name or path if not the same as model_name_or_path") + parser.add_argument("--cache_dir", default="", type=str, + help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)") + parser.add_argument("--block_size", default=-1, type=int, + help="Optional input sequence length after tokenization." + "The training dataset will be truncated in block of this size for training." + "Default to the model max input length for single sentence inputs (take into account special tokens).") + parser.add_argument("--do_train", action='store_true', + help="Whether to run training.") + parser.add_argument("--do_eval", action='store_true', + help="Whether to run eval on the dev set.") + parser.add_argument("--do_test", action='store_true', + help="Whether to run eval on the dev set.") + parser.add_argument("--evaluate_during_training", action='store_true', + help="Run evaluation during training at each logging step.") + parser.add_argument("--do_lower_case", action='store_true', + help="Set this flag if you are using an uncased model.") + + parser.add_argument("--train_batch_size", default=4, type=int, + help="Batch size per GPU/CPU for training.") + parser.add_argument("--eval_batch_size", default=4, type=int, + help="Batch size per GPU/CPU for evaluation.") + parser.add_argument('--gradient_accumulation_steps', type=int, default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.") + parser.add_argument("--learning_rate", default=5e-5, type=float, + help="The initial learning rate for Adam.") + parser.add_argument("--weight_decay", default=0.0, type=float, + help="Weight deay if we apply some.") + parser.add_argument("--adam_epsilon", default=1e-8, type=float, + help="Epsilon for Adam optimizer.") + parser.add_argument("--max_grad_norm", default=1.0, type=float, + help="Max gradient norm.") + parser.add_argument("--num_train_epochs", default=1.0, type=float, + help="Total number of training epochs to perform.") + parser.add_argument("--max_steps", default=-1, type=int, + help="If > 0: set total number of training steps to perform. Override num_train_epochs.") + parser.add_argument("--warmup_steps", default=0, type=int, + help="Linear warmup over warmup_steps.") + + parser.add_argument('--logging_steps', type=int, default=50, + help="Log every X updates steps.") + parser.add_argument('--save_steps', type=int, default=50, + help="Save checkpoint every X updates steps.") + parser.add_argument('--save_total_limit', type=int, default=None, + help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default') + parser.add_argument("--eval_all_checkpoints", action='store_true', + help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number") + parser.add_argument("--no_cuda", action='store_true', + help="Avoid using CUDA when available") + parser.add_argument('--overwrite_output_dir', action='store_true', + help="Overwrite the content of the output directory") + parser.add_argument('--overwrite_cache', action='store_true', + help="Overwrite the cached training and evaluation sets") + parser.add_argument('--seed', type=int, default=42, + help="random seed for initialization") + parser.add_argument('--epoch', type=int, default=42, + help="random seed for initialization") + parser.add_argument('--fp16', action='store_true', + help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") + parser.add_argument('--fp16_opt_level', type=str, default='O1', + help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." + "See details at https://nvidia.github.io/apex/amp.html") + parser.add_argument("--local_rank", type=int, default=-1, + help="For distributed training: local_rank") + parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.") + parser.add_argument('--server_port', type=str, default='', help="For distant debugging.") + + # Add early stopping parameters and dropout probability parameters + parser.add_argument("--early_stopping_patience", type=int, default=None, + help="Number of epochs with no improvement after which training will be stopped.") + parser.add_argument("--min_loss_delta", type=float, default=0.001, + help="Minimum change in the loss required to qualify as an improvement.") + parser.add_argument('--dropout_probability', type=float, default=0, help='dropout probability') + + + + + args = parser.parse_args() + + # Setup distant debugging if needed + if args.server_ip and args.server_port: + # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script + import ptvsd + print("Waiting for debugger attach") + ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) + ptvsd.wait_for_attach() + + # Setup CUDA, GPU & distributed training + if args.local_rank == -1 or args.no_cuda: + device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + args.n_gpu = torch.cuda.device_count() + else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs + torch.cuda.set_device(args.local_rank) + device = torch.device("cuda", args.local_rank) + torch.distributed.init_process_group(backend='nccl') + args.n_gpu = 1 + args.device = device + args.per_gpu_train_batch_size=args.train_batch_size//args.n_gpu + args.per_gpu_eval_batch_size=args.eval_batch_size//args.n_gpu + # Setup logging + logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', + datefmt='%m/%d/%Y %H:%M:%S', + level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN) + logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", + args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) + + + + # Set seed + set_seed(args.seed) + + # Load pretrained model and tokenizer + if args.local_rank not in [-1, 0]: + torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab + + args.start_epoch = 0 + args.start_step = 0 + checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last') + if os.path.exists(checkpoint_last) and os.listdir(checkpoint_last): + args.model_name_or_path = os.path.join(checkpoint_last, 'pytorch_model.bin') + args.config_name = os.path.join(checkpoint_last, 'config.json') + idx_file = os.path.join(checkpoint_last, 'idx_file.txt') + with open(idx_file, encoding='utf-8') as idxf: + args.start_epoch = int(idxf.readlines()[0].strip()) + 1 + + step_file = os.path.join(checkpoint_last, 'step_file.txt') + if os.path.exists(step_file): + with open(step_file, encoding='utf-8') as stepf: + args.start_step = int(stepf.readlines()[0].strip()) + + logger.info("reload model from {}, resume from {} epoch".format(checkpoint_last, args.start_epoch)) + + config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] + config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, + cache_dir=args.cache_dir if args.cache_dir else None) + config.num_labels=1 + tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name, + do_lower_case=args.do_lower_case, + cache_dir=args.cache_dir if args.cache_dir else None) + if args.block_size <= 0: + args.block_size = tokenizer.max_len_single_sentence # Our input block size will be the max possible for the model + args.block_size = min(args.block_size, tokenizer.max_len_single_sentence) + if args.model_name_or_path: + model = model_class.from_pretrained(args.model_name_or_path, + from_tf=bool('.ckpt' in args.model_name_or_path), + config=config, + cache_dir=args.cache_dir if args.cache_dir else None) + else: + model = model_class(config) + + model=Model(model,config,tokenizer,args) + if args.local_rank == 0: + torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab + + logger.info("Training/evaluation parameters %s", args) + + # Training + if args.do_train: + if args.local_rank not in [-1, 0]: + torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache + + train_dataset = TextDataset(tokenizer, args,args.train_data_file) + if args.local_rank == 0: + torch.distributed.barrier() + + train(args, train_dataset, model, tokenizer) + + + + # Evaluation + results = {} + if args.do_eval and args.local_rank in [-1, 0]: + checkpoint_prefix = 'epoch_5/subject_model.pth' + output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix)) + model.load_state_dict(torch.load(output_dir)) + model.to(args.device) + result=evaluate(args, model, tokenizer) + logger.info("***** Eval results *****") + for key in sorted(result.keys()): + logger.info(" %s = %s", key, str(round(result[key],4))) + + if args.do_test and args.local_rank in [-1, 0]: + checkpoint_prefix = 'epoch_5/subject_model.pth' + output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix)) + model.load_state_dict(torch.load(output_dir)) + model.to(args.device) + test(args, model, tokenizer) + + return results + + +if __name__ == "__main__": + main() + + diff --git a/Code-Code/Defect-detection/code/train.sh b/Code-Code/Defect-detection/code/train.sh new file mode 100644 index 0000000000000000000000000000000000000000..6c4b5e3e17e4d95f1279a825aa5f82e4d34edbb2 --- /dev/null +++ b/Code-Code/Defect-detection/code/train.sh @@ -0,0 +1,17 @@ +CUDA_VISIBLE_DEVICES=0,1 python run.py \ + --output_dir=../model \ + --model_type=roberta \ + --tokenizer_name=microsoft/codebert-base \ + --model_name_or_path=microsoft/codebert-base \ + --do_train \ + --train_data_file=../dataset/train.jsonl \ + --eval_data_file=../dataset/valid.jsonl \ + --test_data_file=../dataset/test.jsonl \ + --epoch 5 \ + --block_size 400 \ + --train_batch_size 32 \ + --eval_batch_size 64 \ + --learning_rate 2e-5 \ + --max_grad_norm 1.0 \ + --evaluate_during_training \ + --seed 123456 \ No newline at end of file diff --git a/Code-Code/Defect-detection/dataset.zip b/Code-Code/Defect-detection/dataset.zip new file mode 100644 index 0000000000000000000000000000000000000000..e7cb35a71d30510cf2b17fd8933728de1eb785af --- /dev/null +++ b/Code-Code/Defect-detection/dataset.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fafb4004eda1a4e1d4392b002e3de6f542d2a2b6701ec9758f25791bc9da49d6 +size 14533467 diff --git a/Code-Code/code-refinement/code/bleu.py b/Code-Code/code-refinement/code/bleu.py new file mode 100644 index 0000000000000000000000000000000000000000..47e1335796082b5568089150d7799d37c0527ada --- /dev/null +++ b/Code-Code/code-refinement/code/bleu.py @@ -0,0 +1,134 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Python implementation of BLEU and smooth-BLEU. + +This module provides a Python implementation of BLEU and smooth-BLEU. +Smooth BLEU is computed following the method outlined in the paper: +Chin-Yew Lin, Franz Josef Och. ORANGE: a method for evaluating automatic +evaluation metrics for machine translation. COLING 2004. +""" + +import collections +import math + + +def _get_ngrams(segment, max_order): + """Extracts all n-grams upto a given maximum order from an input segment. + + Args: + segment: text segment from which n-grams will be extracted. + max_order: maximum length in tokens of the n-grams returned by this + methods. + + Returns: + The Counter containing all n-grams upto max_order in segment + with a count of how many times each n-gram occurred. + """ + ngram_counts = collections.Counter() + for order in range(1, max_order + 1): + for i in range(0, len(segment) - order + 1): + ngram = tuple(segment[i:i+order]) + ngram_counts[ngram] += 1 + return ngram_counts + + +def compute_bleu(reference_corpus, translation_corpus, max_order=4, + smooth=False): + """Computes BLEU score of translated segments against one or more references. + + Args: + reference_corpus: list of lists of references for each translation. Each + reference should be tokenized into a list of tokens. + translation_corpus: list of translations to score. Each translation + should be tokenized into a list of tokens. + max_order: Maximum n-gram order to use when computing BLEU score. + smooth: Whether or not to apply Lin et al. 2004 smoothing. + + Returns: + 3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram + precisions and brevity penalty. + """ + matches_by_order = [0] * max_order + possible_matches_by_order = [0] * max_order + reference_length = 0 + translation_length = 0 + for (references, translation) in zip(reference_corpus, + translation_corpus): + reference_length += min(len(r) for r in references) + translation_length += len(translation) + + merged_ref_ngram_counts = collections.Counter() + for reference in references: + merged_ref_ngram_counts |= _get_ngrams(reference, max_order) + translation_ngram_counts = _get_ngrams(translation, max_order) + overlap = translation_ngram_counts & merged_ref_ngram_counts + for ngram in overlap: + matches_by_order[len(ngram)-1] += overlap[ngram] + for order in range(1, max_order+1): + possible_matches = len(translation) - order + 1 + if possible_matches > 0: + possible_matches_by_order[order-1] += possible_matches + + precisions = [0] * max_order + for i in range(0, max_order): + if smooth: + precisions[i] = ((matches_by_order[i] + 1.) / + (possible_matches_by_order[i] + 1.)) + else: + if possible_matches_by_order[i] > 0: + precisions[i] = (float(matches_by_order[i]) / + possible_matches_by_order[i]) + else: + precisions[i] = 0.0 + + if min(precisions) > 0: + p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions) + geo_mean = math.exp(p_log_sum) + else: + geo_mean = 0 + + ratio = float(translation_length) / reference_length + + if ratio > 1.0: + bp = 1. + else: + bp = math.exp(1 - 1. / ratio) + + bleu = geo_mean * bp + + return (bleu, precisions, bp, ratio, translation_length, reference_length) + + +def _bleu(ref_file, trans_file, subword_option=None): + max_order = 4 + smooth = True + ref_files = [ref_file] + reference_text = [] + for reference_filename in ref_files: + with open(reference_filename) as fh: + reference_text.append(fh.readlines()) + per_segment_references = [] + for references in zip(*reference_text): + reference_list = [] + for reference in references: + reference_list.append(reference.strip().split()) + per_segment_references.append(reference_list) + translations = [] + with open(trans_file) as fh: + for line in fh: + translations.append(line.strip().split()) + bleu_score, _, _, _, _, _ = compute_bleu(per_segment_references, translations, max_order, smooth) + return round(100 * bleu_score,2) \ No newline at end of file diff --git a/Code-Code/code-refinement/code/eval.sh b/Code-Code/code-refinement/code/eval.sh new file mode 100644 index 0000000000000000000000000000000000000000..2531f001df8701802775e733df0de6130ade40b3 --- /dev/null +++ b/Code-Code/code-refinement/code/eval.sh @@ -0,0 +1,17 @@ +pretrained_model=microsoft/codebert-base +output_dir=../model +data_size=small + +CUDA_VISIBLE_DEVICES=1 python run.py \ + --do_test \ + --model_type roberta \ + --model_name_or_path $pretrained_model \ + --config_name roberta-base \ + --tokenizer_name roberta-base \ + --load_model_path $output_dir/epoch_34/subject_model.pth \ + --dev_filename ../data/$data_size/valid.buggy-fixed.buggy,../data/$data_size/valid.buggy-fixed.fixed \ + --output_dir $output_dir \ + --max_source_length 256 \ + --max_target_length 256 \ + --beam_size 5 \ + --eval_batch_size 16 \ No newline at end of file diff --git a/Code-Code/code-refinement/code/evaluate.sh b/Code-Code/code-refinement/code/evaluate.sh new file mode 100644 index 0000000000000000000000000000000000000000..8b836142c0363bfa26ab9001db9d8c8dd5096684 --- /dev/null +++ b/Code-Code/code-refinement/code/evaluate.sh @@ -0,0 +1,3 @@ +python evaluator.py \ +-ref ../data/small/valid.buggy-fixed.fixed \ +-pre ../model/test_0.output \ No newline at end of file diff --git a/Code-Code/code-refinement/code/evaluator.py b/Code-Code/code-refinement/code/evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..f9ce76ff1ac983e87c1ff63aafb42ef50739f955 --- /dev/null +++ b/Code-Code/code-refinement/code/evaluator.py @@ -0,0 +1,35 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. +import logging +import sys + +from bleu import _bleu + +def main(): + import argparse + parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for BigCloneBench dataset.') + parser.add_argument('--references', '-ref',help="filename of the labels, in txt format.") + parser.add_argument('--predictions', '-pre',help="filename of the leaderboard predictions, in txt format.") + + args = parser.parse_args() + + refs = [x.strip() for x in open(args.references, 'r', encoding='utf-8').readlines()] + pres = [x.strip() for x in open(args.predictions, 'r', encoding='utf-8').readlines()] + + assert len(refs) == len(pres) + + length = len(refs) + count = 0 + for i in range(length): + r = refs[i] + p = pres[i] + if r == p: + count += 1 + acc = round(count/length*100, 2) + + bleu_score = round(_bleu(args.references, args.predictions),2) + + print('BLEU:', bleu_score, '; Acc:', acc) + +if __name__ == '__main__': + main() diff --git a/Code-Code/code-refinement/code/model.py b/Code-Code/code-refinement/code/model.py new file mode 100644 index 0000000000000000000000000000000000000000..5f806abfe33f8cb15ab8c57fcf4a13a558d13da4 --- /dev/null +++ b/Code-Code/code-refinement/code/model.py @@ -0,0 +1,223 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch +import torch.nn as nn +import torch +from torch.autograd import Variable +import copy +class Seq2Seq(nn.Module): + """ + Build Seqence-to-Sequence. + + Parameters: + + * `encoder`- encoder of seq2seq model. e.g. roberta + * `decoder`- decoder of seq2seq model. e.g. transformer + * `config`- configuration of encoder model. + * `beam_size`- beam size for beam search. + * `max_length`- max length of target for beam search. + * `sos_id`- start of symbol ids in target for beam search. + * `eos_id`- end of symbol ids in target for beam search. + """ + def __init__(self, encoder,decoder,config,beam_size=None,max_length=None,sos_id=None,eos_id=None): + super(Seq2Seq, self).__init__() + self.encoder = encoder + self.decoder=decoder + self.config=config + self.register_buffer("bias", torch.tril(torch.ones(2048, 2048))) + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + self.lsm = nn.LogSoftmax(dim=-1) + self.tie_weights() + + self.beam_size=beam_size + self.max_length=max_length + self.sos_id=sos_id + self.eos_id=eos_id + + def _tie_or_clone_weights(self, first_module, second_module): + """ Tie or clone module weights depending of weither we are using TorchScript or not + """ + if self.config.torchscript: + first_module.weight = nn.Parameter(second_module.weight.clone()) + else: + first_module.weight = second_module.weight + + def tie_weights(self): + """ Make sure we are sharing the input and output embeddings. + Export to TorchScript can't handle parameter sharing so we are cloning them instead. + """ + self._tie_or_clone_weights(self.lm_head, + self.encoder.embeddings.word_embeddings) + + def forward(self, source_ids=None,source_mask=None,target_ids=None,target_mask=None,args=None, return_vec=None): + outputs = self.encoder(source_ids, attention_mask=source_mask) + if return_vec: + return outputs.pooler_output + + encoder_output = outputs[0].permute([1,0,2]).contiguous() + + if target_ids is not None: + attn_mask=-1e4 *(1-self.bias[:target_ids.shape[1],:target_ids.shape[1]]) + tgt_embeddings = self.encoder.embeddings(target_ids).permute([1,0,2]).contiguous() + out = self.decoder(tgt_embeddings,encoder_output,tgt_mask=attn_mask,memory_key_padding_mask=(1-source_mask).bool()) + hidden_states = torch.tanh(self.dense(out)).permute([1,0,2]).contiguous() + lm_logits = self.lm_head(hidden_states) + # Shift so that tokens < n predict n + active_loss = target_mask[..., 1:].ne(0).view(-1) == 1 + shift_logits = lm_logits[..., :-1, :].contiguous() + shift_labels = target_ids[..., 1:].contiguous() + # Flatten the tokens + loss_fct = nn.CrossEntropyLoss(ignore_index=-1) + loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1))[active_loss], + shift_labels.view(-1)[active_loss]) + + outputs = loss,loss*active_loss.sum(),active_loss.sum() + return outputs + else: + #Predict + preds=[] + zero=torch.cuda.LongTensor(1).fill_(0) + for i in range(source_ids.shape[0]): + context=encoder_output[:,i:i+1] + context_mask=source_mask[i:i+1,:] + beam = Beam(self.beam_size,self.sos_id,self.eos_id) + input_ids=beam.getCurrentState() + context=context.repeat(1, self.beam_size,1) + context_mask=context_mask.repeat(self.beam_size,1) + for _ in range(self.max_length): + if beam.done(): + break + attn_mask=-1e4 *(1-self.bias[:input_ids.shape[1],:input_ids.shape[1]]) + tgt_embeddings = self.encoder.embeddings(input_ids).permute([1,0,2]).contiguous() + out = self.decoder(tgt_embeddings,context,tgt_mask=attn_mask,memory_key_padding_mask=(1-context_mask).bool()) + out = torch.tanh(self.dense(out)) + hidden_states=out.permute([1,0,2]).contiguous()[:,-1,:] + out = self.lsm(self.lm_head(hidden_states)).data + beam.advance(out) + input_ids.data.copy_(input_ids.data.index_select(0, beam.getCurrentOrigin())) + input_ids=torch.cat((input_ids,beam.getCurrentState()),-1) + hyp= beam.getHyp(beam.getFinal()) + pred=beam.buildTargetTokens(hyp)[:self.beam_size] + pred=[torch.cat([x.view(-1) for x in p]+[zero]*(self.max_length-len(p))).view(1,-1) for p in pred] + preds.append(torch.cat(pred,0).unsqueeze(0)) + + preds=torch.cat(preds,0) + return preds + + + +class Beam(object): + def __init__(self, size,sos,eos): + self.size = size + self.tt = torch.cuda + # The score for each translation on the beam. + self.scores = self.tt.FloatTensor(size).zero_() + # The backpointers at each time-step. + self.prevKs = [] + # The outputs at each time-step. + self.nextYs = [self.tt.LongTensor(size) + .fill_(0)] + self.nextYs[0][0] = sos + # Has EOS topped the beam yet. + self._eos = eos + self.eosTop = False + # Time and k pair for finished. + self.finished = [] + + def getCurrentState(self): + "Get the outputs for the current timestep." + batch = self.tt.LongTensor(self.nextYs[-1]).view(-1, 1) + return batch + + def getCurrentOrigin(self): + "Get the backpointers for the current timestep." + return self.prevKs[-1] + + def advance(self, wordLk): + """ + Given prob over words for every last beam `wordLk` and attention + `attnOut`: Compute and update the beam search. + + Parameters: + + * `wordLk`- probs of advancing from the last step (K x words) + * `attnOut`- attention at the last step + + Returns: True if beam search is complete. + """ + numWords = wordLk.size(1) + + # Sum the previous scores. + if len(self.prevKs) > 0: + beamLk = wordLk + self.scores.unsqueeze(1).expand_as(wordLk) + + # Don't let EOS have children. + for i in range(self.nextYs[-1].size(0)): + if self.nextYs[-1][i] == self._eos: + beamLk[i] = -1e20 + else: + beamLk = wordLk[0] + flatBeamLk = beamLk.view(-1) + bestScores, bestScoresId = flatBeamLk.topk(self.size, 0, True, True) + + self.scores = bestScores + + # bestScoresId is flattened beam x word array, so calculate which + # word and beam each score came from + prevK = bestScoresId // numWords + self.prevKs.append(prevK) + self.nextYs.append((bestScoresId - prevK * numWords)) + + + for i in range(self.nextYs[-1].size(0)): + if self.nextYs[-1][i] == self._eos: + s = self.scores[i] + self.finished.append((s, len(self.nextYs) - 1, i)) + + # End condition is when top-of-beam is EOS and no global score. + if self.nextYs[-1][0] == self._eos: + self.eosTop = True + + def done(self): + return self.eosTop and len(self.finished) >=self.size + + def getFinal(self): + if len(self.finished) == 0: + self.finished.append((self.scores[0], len(self.nextYs) - 1, 0)) + self.finished.sort(key=lambda a: -a[0]) + if len(self.finished) != self.size: + unfinished=[] + for i in range(self.nextYs[-1].size(0)): + if self.nextYs[-1][i] != self._eos: + s = self.scores[i] + unfinished.append((s, len(self.nextYs) - 1, i)) + unfinished.sort(key=lambda a: -a[0]) + self.finished+=unfinished[:self.size-len(self.finished)] + return self.finished[:self.size] + + def getHyp(self, beam_res): + """ + Walk back to construct the full hypothesis. + """ + hyps=[] + for _,timestep, k in beam_res: + hyp = [] + for j in range(len(self.prevKs[:timestep]) - 1, -1, -1): + hyp.append(self.nextYs[j+1][k]) + k = self.prevKs[j][k] + hyps.append(hyp[::-1]) + return hyps + + def buildTargetTokens(self, preds): + sentence=[] + for pred in preds: + tokens = [] + for tok in pred: + if tok==self._eos: + break + tokens.append(tok) + sentence.append(tokens) + return sentence + diff --git a/Code-Code/code-refinement/code/run.py b/Code-Code/code-refinement/code/run.py new file mode 100644 index 0000000000000000000000000000000000000000..d56c56af3bfe2060ea681f47b5261495305d277e --- /dev/null +++ b/Code-Code/code-refinement/code/run.py @@ -0,0 +1,575 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa). +GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned +using a masked language modeling (MLM) loss. +""" + +from __future__ import absolute_import +import os +import sys +import pickle +import torch +import json +import random +import logging +import argparse +import numpy as np +from io import open +from itertools import cycle +import torch.nn as nn +from model import Seq2Seq +from tqdm import tqdm, trange +from bleu import _bleu +from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset +from torch.utils.data.distributed import DistributedSampler +from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup, + RobertaConfig, RobertaModel, RobertaTokenizer) +MODEL_CLASSES = {'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer)} + +logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', + datefmt = '%m/%d/%Y %H:%M:%S', + level = logging.INFO) +logger = logging.getLogger(__name__) + +class Example(object): + """A single training/test example.""" + def __init__(self, + idx, + source, + target, + ): + self.idx = idx + self.source = source + self.target = target + +# def read_examples(filename): +# """Read examples from filename.""" +# examples=[] +# with open(filename,encoding="utf-8") as f: +# for idx,js in enumerate(json.load(f)): +# source=' '.join(js['old_comment_tokens']) +# target=' '.join(js['new_comment_tokens']) +# examples.append( +# Example( +# idx = idx, +# source=source, +# target=target, +# ) +# ) +# return examples +def read_examples(filename): + """Read examples from filename.""" + examples=[] + assert len(filename.split(','))==2 + src_filename = filename.split(',')[0] + trg_filename = filename.split(',')[1] + idx = 0 + with open(src_filename) as f1,open(trg_filename) as f2: + for line1,line2 in zip(f1,f2): + examples.append( + Example( + idx = idx, + source=line1.strip(), + target=line2.strip(), + ) + ) + idx+=1 + return examples + +class InputFeatures(object): + """A single training/test features for a example.""" + def __init__(self, + example_id, + source_ids, + target_ids, + source_mask, + target_mask, + + ): + self.example_id = example_id + self.source_ids = source_ids + self.target_ids = target_ids + self.source_mask = source_mask + self.target_mask = target_mask + + + +def convert_examples_to_features(examples, tokenizer, args,stage=None): + features = [] + for example_index, example in enumerate(examples): + #source + source_tokens = tokenizer.tokenize(example.source)[:args.max_source_length-2] + source_tokens =[tokenizer.cls_token]+source_tokens+[tokenizer.sep_token] + source_ids = tokenizer.convert_tokens_to_ids(source_tokens) + source_mask = [1] * (len(source_tokens)) + padding_length = args.max_source_length - len(source_ids) + source_ids+=[tokenizer.pad_token_id]*padding_length + source_mask+=[0]*padding_length + + #target + if stage=="test": + target_tokens = tokenizer.tokenize("None") + else: + target_tokens = tokenizer.tokenize(example.target)[:args.max_target_length-2] + target_tokens = [tokenizer.cls_token]+target_tokens+[tokenizer.sep_token] + target_ids = tokenizer.convert_tokens_to_ids(target_tokens) + target_mask = [1] *len(target_ids) + padding_length = args.max_target_length - len(target_ids) + target_ids+=[tokenizer.pad_token_id]*padding_length + target_mask+=[0]*padding_length + + if example_index < 5: + if stage=='train': + logger.info("*** Example ***") + logger.info("idx: {}".format(example.idx)) + + logger.info("source_tokens: {}".format([x.replace('\u0120','_') for x in source_tokens])) + logger.info("source_ids: {}".format(' '.join(map(str, source_ids)))) + logger.info("source_mask: {}".format(' '.join(map(str, source_mask)))) + + logger.info("target_tokens: {}".format([x.replace('\u0120','_') for x in target_tokens])) + logger.info("target_ids: {}".format(' '.join(map(str, target_ids)))) + logger.info("target_mask: {}".format(' '.join(map(str, target_mask)))) + + features.append( + InputFeatures( + example_index, + source_ids, + target_ids, + source_mask, + target_mask, + ) + ) + return features + + +def _truncate_seq_pair(tokens_a, tokens_b,tokens_c, max_length): + """Truncates a sequence pair in place to the maximum length.""" + + # This is a simple heuristic which will always truncate the longer sequence + # one token at a time. This makes more sense than truncating an equal percent + # of tokens from each, since if one sequence is very short then each token + # that's truncated likely contains more information than a longer sequence. + + while True: + total_length = len(tokens_a) + len(tokens_b)+len(tokens_c) + if total_length <= max_length: + break + if len(tokens_a) >= len(tokens_b) and len(tokens_a)>=len(tokens_c): + tokens_a.pop() + elif len(tokens_b) >= len(tokens_a) and len(tokens_b)>=len(tokens_c): + tokens_b.pop() + else: + tokens_c.pop() + +def set_seed(args): + """set random seed.""" + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + if args.n_gpu > 0: + torch.cuda.manual_seed_all(args.seed) + +def main(): + parser = argparse.ArgumentParser() + + ## Required parameters + parser.add_argument("--model_type", default=None, type=str, required=True, + help="Model type: e.g. roberta") + parser.add_argument("--model_name_or_path", default=None, type=str, required=True, + help="Path to pre-trained model: e.g. roberta-base" ) + parser.add_argument("--tokenizer_name", default="", required=True, + help="Pretrained tokenizer name or path if not the same as model_name") + parser.add_argument("--output_dir", default=None, type=str, required=True, + help="The output directory where the model predictions and checkpoints will be written.") + parser.add_argument("--load_model_path", default=None, type=str, + help="Path to trained model: Should contain the .bin files" ) + ## Other parameters + parser.add_argument("--train_filename", default=None, type=str, + help="The train filenames (source and target files).") + parser.add_argument("--dev_filename", default=None, type=str, + help="The dev filename. (source and target files).") + parser.add_argument("--test_filename", default=None, type=str, + help="The test filename. (source and target files).") + + parser.add_argument("--config_name", default="", type=str, + help="Pretrained config name or path if not the same as model_name") + + parser.add_argument("--max_source_length", default=64, type=int, + help="The maximum total source sequence length after tokenization. Sequences longer " + "than this will be truncated, sequences shorter will be padded.") + parser.add_argument("--max_target_length", default=32, type=int, + help="The maximum total target sequence length after tokenization. Sequences longer " + "than this will be truncated, sequences shorter will be padded.") + + parser.add_argument("--do_train", action='store_true', + help="Whether to run training.") + parser.add_argument("--do_eval", action='store_true', + help="Whether to run eval on the dev set.") + parser.add_argument("--do_test", action='store_true', + help="Whether to run eval on the dev set.") + parser.add_argument("--do_lower_case", action='store_true', + help="Set this flag if you are using an uncased model.") + parser.add_argument("--no_cuda", action='store_true', + help="Avoid using CUDA when available") + + parser.add_argument("--train_batch_size", default=8, type=int, + help="Batch size per GPU/CPU for training.") + parser.add_argument("--eval_batch_size", default=8, type=int, + help="Batch size per GPU/CPU for evaluation.") + parser.add_argument('--gradient_accumulation_steps', type=int, default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.") + parser.add_argument("--learning_rate", default=5e-5, type=float, + help="The initial learning rate for Adam.") + parser.add_argument("--beam_size", default=10, type=int, + help="beam size for beam search") + parser.add_argument("--weight_decay", default=0.0, type=float, + help="Weight deay if we apply some.") + parser.add_argument("--adam_epsilon", default=1e-8, type=float, + help="Epsilon for Adam optimizer.") + parser.add_argument("--max_grad_norm", default=1.0, type=float, + help="Max gradient norm.") + parser.add_argument("--num_train_epochs", default=3.0, type=float, + help="Total number of training epochs to perform.") + parser.add_argument("--max_steps", default=-1, type=int, + help="If > 0: set total number of training steps to perform. Override num_train_epochs.") + parser.add_argument("--eval_steps", default=-1, type=int, + help="") + parser.add_argument("--train_steps", default=-1, type=int, + help="") + parser.add_argument("--warmup_steps", default=0, type=int, + help="Linear warmup over warmup_steps.") + parser.add_argument("--local_rank", type=int, default=-1, + help="For distributed training: local_rank") + parser.add_argument('--seed', type=int, default=42, + help="random seed for initialization") + # print arguments + args = parser.parse_args() + logger.info(args) + + # Setup CUDA, GPU & distributed training + if args.local_rank == -1 or args.no_cuda: + device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + args.n_gpu = torch.cuda.device_count() + else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs + torch.cuda.set_device(args.local_rank) + device = torch.device("cuda", args.local_rank) + torch.distributed.init_process_group(backend='nccl') + args.n_gpu = 1 + logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s", + args.local_rank, device, args.n_gpu, bool(args.local_rank != -1)) + args.device = device + # Set seed + set_seed(args) + # make dir if output_dir not exist + if os.path.exists(args.output_dir) is False: + os.makedirs(args.output_dir) + + config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] + config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path) + tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name,do_lower_case=args.do_lower_case) + + #budild model + encoder = model_class.from_pretrained(args.model_name_or_path,config=config) + decoder_layer = nn.TransformerDecoderLayer(d_model=config.hidden_size, nhead=config.num_attention_heads) + decoder = nn.TransformerDecoder(decoder_layer, num_layers=6) + model=Seq2Seq(encoder=encoder,decoder=decoder,config=config, + beam_size=args.beam_size,max_length=args.max_target_length, + sos_id=tokenizer.cls_token_id,eos_id=tokenizer.sep_token_id) + + if args.load_model_path is not None: + logger.info("reload model from {}".format(args.load_model_path)) + model.load_state_dict(torch.load(args.load_model_path)) + + model.to(device) + if args.local_rank != -1: + # Distributed training + try: + from apex.parallel import DistributedDataParallel as DDP + except ImportError: + raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") + + model = DDP(model) + elif args.n_gpu > 1: + # multi-gpu training + model = torch.nn.DataParallel(model) + + + + + if args.do_train: + # Prepare training data loader + train_examples = read_examples(args.train_filename) + train_features = convert_examples_to_features(train_examples, tokenizer,args,stage='train') + all_source_ids = torch.tensor([f.source_ids for f in train_features], dtype=torch.long) + all_source_mask = torch.tensor([f.source_mask for f in train_features], dtype=torch.long) + all_target_ids = torch.tensor([f.target_ids for f in train_features], dtype=torch.long) + all_target_mask = torch.tensor([f.target_mask for f in train_features], dtype=torch.long) + train_data = TensorDataset(all_source_ids,all_source_mask,all_target_ids,all_target_mask) + + if args.local_rank == -1: + train_sampler = RandomSampler(train_data) + else: + train_sampler = DistributedSampler(train_data) + train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size//args.gradient_accumulation_steps) + + num_train_optimization_steps = args.train_steps + + # Prepare optimizer and schedule (linear warmup and decay) + no_decay = ['bias', 'LayerNorm.weight'] + optimizer_grouped_parameters = [ + {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], + 'weight_decay': args.weight_decay}, + {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} + ] + optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) + scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, + num_training_steps=num_train_optimization_steps) + + + #Start training + logger.info("***** Running training *****") + logger.info(" Num examples = %d", len(train_examples)) + logger.info(" Batch size = %d", args.train_batch_size) + logger.info(" Num epoch = %d", num_train_optimization_steps*args.train_batch_size//len(train_examples)) + + + model.train() + dev_dataset={} + nb_tr_examples, nb_tr_steps,tr_loss,global_step,best_bleu,best_loss = 0, 0,0,0,0,1e6 + bar = range(num_train_optimization_steps) + train_dataloader=cycle(train_dataloader) + eval_flag = True + idx=0 + for step in bar: + batch = next(train_dataloader) + batch = tuple(t.to(device) for t in batch) + source_ids,source_mask,target_ids,target_mask = batch + loss,_,_ = model(source_ids=source_ids,source_mask=source_mask,target_ids=target_ids,target_mask=target_mask) + + if args.n_gpu > 1: + loss = loss.mean() # mean() to average on multi-gpu. + if args.gradient_accumulation_steps > 1: + loss = loss / args.gradient_accumulation_steps + tr_loss += loss.item() + train_loss=round(tr_loss*args.gradient_accumulation_steps/(nb_tr_steps+1),4) + if (global_step + 1)%100==0: + logger.info(" step {} loss {} batch-{}".format(global_step + 1,train_loss, ((global_step+1)*args.train_batch_size) / len(train_examples))) + nb_tr_examples += source_ids.size(0) + nb_tr_steps += 1 + loss.backward() + + if (nb_tr_steps + 1) % args.gradient_accumulation_steps == 0: + #Update parameters + optimizer.step() + optimizer.zero_grad() + scheduler.step() + global_step += 1 + eval_flag = True + + + if args.do_eval and ((global_step + 1) %args.eval_steps == 0) and eval_flag: + #Eval model with dev dataset + tr_loss = 0 + nb_tr_examples, nb_tr_steps = 0, 0 + eval_flag=False + if 'dev_loss' in dev_dataset: + eval_examples,eval_data=dev_dataset['dev_loss'] + else: + eval_examples = read_examples(args.dev_filename) + eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='dev') + all_source_ids = torch.tensor([f.source_ids for f in eval_features], dtype=torch.long) + all_source_mask = torch.tensor([f.source_mask for f in eval_features], dtype=torch.long) + all_target_ids = torch.tensor([f.target_ids for f in eval_features], dtype=torch.long) + all_target_mask = torch.tensor([f.target_mask for f in eval_features], dtype=torch.long) + eval_data = TensorDataset(all_source_ids,all_source_mask,all_target_ids,all_target_mask) + dev_dataset['dev_loss']=eval_examples,eval_data + eval_sampler = SequentialSampler(eval_data) + eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) + + logger.info("\n***** Running evaluation *****") + logger.info(" Num examples = %d", len(eval_examples)) + logger.info(" Batch size = %d", args.eval_batch_size) + + #Start Evaling model + model.eval() + eval_loss,tokens_num = 0,0 + for batch in eval_dataloader: + batch = tuple(t.to(device) for t in batch) + source_ids,source_mask,target_ids,target_mask = batch + + with torch.no_grad(): + _,loss,num = model(source_ids=source_ids,source_mask=source_mask, + target_ids=target_ids,target_mask=target_mask) + eval_loss += loss.sum().item() + tokens_num += num.sum().item() + #Pring loss of dev dataset + model.train() + eval_loss = eval_loss / tokens_num + result = {'eval_ppl': round(np.exp(eval_loss),5), + 'global_step': global_step+1, + 'train_loss': round(train_loss,5)} + for key in sorted(result.keys()): + logger.info(" %s = %s", key, str(result[key])) + logger.info(" "+"*"*20) + + #save last checkpoint + last_output_dir = os.path.join(args.output_dir, 'checkpoint-last') + if not os.path.exists(last_output_dir): + os.makedirs(last_output_dir) + model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self + output_model_file = os.path.join(last_output_dir, "pytorch_model.bin") + torch.save(model_to_save.state_dict(), output_model_file) + if eval_lossbest_bleu: + logger.info(" Best bleu:%s",dev_bleu) + logger.info(" "+"*"*20) + best_bleu=dev_bleu + # Save best checkpoint for best bleu + output_dir = os.path.join(args.output_dir, 'checkpoint-best-bleu') + if not os.path.exists(output_dir): + os.makedirs(output_dir) + model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self + output_model_file = os.path.join(output_dir, "pytorch_model.bin") + torch.save(model_to_save.state_dict(), output_model_file) + + # 每一轮记录checkpoint + if int((global_step+1)*args.train_batch_size / len(train_examples)) == idx+1: + logger.info(" batch:%s",idx) + output_dir = os.path.join(args.output_dir, 'epoch_{}'.format(idx+1)) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + model_to_save = model.module if hasattr(model, 'module') else model + ckpt_output_path = os.path.join(output_dir, 'subject_model.pth') + logger.info("Saving model checkpoint to %s", ckpt_output_path) + torch.save(model_to_save.state_dict(), ckpt_output_path) + idx = idx+1 + + if args.do_test: + files=[] + if args.dev_filename is not None: + files.append(args.dev_filename) + if args.test_filename is not None: + files.append(args.test_filename) + for idx,file in enumerate(files): + logger.info("Test file: {}".format(file)) + eval_examples = read_examples(file) + eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='test') + all_source_ids = torch.tensor([f.source_ids for f in eval_features], dtype=torch.long) + all_source_mask = torch.tensor([f.source_mask for f in eval_features], dtype=torch.long) + eval_data = TensorDataset(all_source_ids,all_source_mask) + + # Calculate bleu + eval_sampler = SequentialSampler(eval_data) + eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) + + model.eval() + p=[] + for batch in tqdm(eval_dataloader,total=len(eval_dataloader)): + batch = tuple(t.to(device) for t in batch) + source_ids,source_mask= batch + with torch.no_grad(): + preds = model(source_ids=source_ids,source_mask=source_mask) + for pred in preds: + t=pred[0].cpu().numpy() + t=list(t) + if 0 in t: + t=t[:t.index(0)] + text = tokenizer.decode(t,clean_up_tokenization_spaces=False) + p.append(text) + model.train() + predictions=[] + accs=[] + with open(os.path.join(args.output_dir,"test_{}.output".format(str(idx))),'w') as f, open(os.path.join(args.output_dir,"test_{}.gold".format(str(idx))),'w') as f1: + for ref,gold in zip(p,eval_examples): + predictions.append(str(gold.idx)+'\t'+ref) + f.write(ref+'\n') + f1.write(gold.target+'\n') + accs.append(ref==gold.target) + dev_bleu=round(_bleu(os.path.join(args.output_dir, "test_{}.gold".format(str(idx))).format(file), + os.path.join(args.output_dir, "test_{}.output".format(str(idx))).format(file)),2) + logger.info(" %s = %s "%("bleu-4",str(dev_bleu))) + logger.info(" %s = %s "%("xMatch",str(round(np.mean(accs)*100,4)))) + logger.info(" "+"*"*20) + + + + + + + +if __name__ == "__main__": + main() + + diff --git a/Code-Code/code-refinement/code/train.sh b/Code-Code/code-refinement/code/train.sh new file mode 100644 index 0000000000000000000000000000000000000000..ca9db82e2a8c632836280fc2c1ba8fb08518c9a3 --- /dev/null +++ b/Code-Code/code-refinement/code/train.sh @@ -0,0 +1,22 @@ +pretrained_model=microsoft/codebert-base +output_dir=../model +data_size=small + +CUDA_VISIBLE_DEVICES=1 python run.py \ + --do_train \ + --do_eval \ + --model_type roberta \ + --model_name_or_path $pretrained_model \ + --config_name roberta-base \ + --tokenizer_name roberta-base \ + --train_filename ../data/$data_size/train.buggy-fixed.buggy,../data/$data_size/train.buggy-fixed.fixed \ + --dev_filename ../data/$data_size/valid.buggy-fixed.buggy,../data/$data_size/valid.buggy-fixed.fixed \ + --output_dir $output_dir \ + --max_source_length 256 \ + --max_target_length 256 \ + --beam_size 5 \ + --train_batch_size 16 \ + --eval_batch_size 16 \ + --learning_rate 5e-5 \ + --train_steps 100000 \ + --eval_steps 5000 diff --git a/Code-Code/code-refinement/dataset.zip b/Code-Code/code-refinement/dataset.zip new file mode 100644 index 0000000000000000000000000000000000000000..9e886b761f2d1fd29bb9140fcb5a42fdc25e5044 --- /dev/null +++ b/Code-Code/code-refinement/dataset.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:777c3c2c8db2e206e35336adda286979caea2dd7627f86be63ad9313d6dd5c29 +size 9317188 diff --git a/Code-Text/code-to-text/code/bleu.py b/Code-Text/code-to-text/code/bleu.py new file mode 100644 index 0000000000000000000000000000000000000000..f8f2b0aad12754e6cb01575ae9675858c1c4e16d --- /dev/null +++ b/Code-Text/code-to-text/code/bleu.py @@ -0,0 +1,200 @@ +#!/usr/bin/python + +''' +This script was adapted from the original version by hieuhoang1972 which is part of MOSES. +''' + +# $Id: bleu.py 1307 2007-03-14 22:22:36Z hieuhoang1972 $ + +'''Provides: + +cook_refs(refs, n=4): Transform a list of reference sentences as strings into a form usable by cook_test(). +cook_test(test, refs, n=4): Transform a test sentence as a string (together with the cooked reference sentences) into a form usable by score_cooked(). +score_cooked(alltest, n=4): Score a list of cooked test sentences. + +score_set(s, testid, refids, n=4): Interface with dataset.py; calculate BLEU score of testid against refids. + +The reason for breaking the BLEU computation into three phases cook_refs(), cook_test(), and score_cooked() is to allow the caller to calculate BLEU scores for multiple test sets as efficiently as possible. +''' + +import sys, math, re, xml.sax.saxutils +import subprocess +import os + +# Added to bypass NIST-style pre-processing of hyp and ref files -- wade +nonorm = 0 + +preserve_case = False +eff_ref_len = "shortest" + +normalize1 = [ + ('', ''), # strip "skipped" tags + (r'-\n', ''), # strip end-of-line hyphenation and join lines + (r'\n', ' '), # join lines +# (r'(\d)\s+(?=\d)', r'\1'), # join digits +] +normalize1 = [(re.compile(pattern), replace) for (pattern, replace) in normalize1] + +normalize2 = [ + (r'([\{-\~\[-\` -\&\(-\+\:-\@\/])',r' \1 '), # tokenize punctuation. apostrophe is missing + (r'([^0-9])([\.,])',r'\1 \2 '), # tokenize period and comma unless preceded by a digit + (r'([\.,])([^0-9])',r' \1 \2'), # tokenize period and comma unless followed by a digit + (r'([0-9])(-)',r'\1 \2 ') # tokenize dash when preceded by a digit +] +normalize2 = [(re.compile(pattern), replace) for (pattern, replace) in normalize2] + +def normalize(s): + '''Normalize and tokenize text. This is lifted from NIST mteval-v11a.pl.''' + # Added to bypass NIST-style pre-processing of hyp and ref files -- wade + if (nonorm): + return s.split() + if type(s) is not str: + s = " ".join(s) + # language-independent part: + for (pattern, replace) in normalize1: + s = re.sub(pattern, replace, s) + s = xml.sax.saxutils.unescape(s, {'"':'"'}) + # language-dependent part (assuming Western languages): + s = " %s " % s + if not preserve_case: + s = s.lower() # this might not be identical to the original + for (pattern, replace) in normalize2: + s = re.sub(pattern, replace, s) + return s.split() + +def count_ngrams(words, n=4): + counts = {} + for k in range(1,n+1): + for i in range(len(words)-k+1): + ngram = tuple(words[i:i+k]) + counts[ngram] = counts.get(ngram, 0)+1 + return counts + +def cook_refs(refs, n=4): + '''Takes a list of reference sentences for a single segment + and returns an object that encapsulates everything that BLEU + needs to know about them.''' + + refs = [normalize(ref) for ref in refs] + maxcounts = {} + for ref in refs: + counts = count_ngrams(ref, n) + for (ngram,count) in counts.items(): + maxcounts[ngram] = max(maxcounts.get(ngram,0), count) + return ([len(ref) for ref in refs], maxcounts) + +def cook_test(test, item, n=4): + '''Takes a test sentence and returns an object that + encapsulates everything that BLEU needs to know about it.''' + (reflens, refmaxcounts)=item + test = normalize(test) + result = {} + result["testlen"] = len(test) + + # Calculate effective reference sentence length. + + if eff_ref_len == "shortest": + result["reflen"] = min(reflens) + elif eff_ref_len == "average": + result["reflen"] = float(sum(reflens))/len(reflens) + elif eff_ref_len == "closest": + min_diff = None + for reflen in reflens: + if min_diff is None or abs(reflen-len(test)) < min_diff: + min_diff = abs(reflen-len(test)) + result['reflen'] = reflen + + result["guess"] = [max(len(test)-k+1,0) for k in range(1,n+1)] + + result['correct'] = [0]*n + counts = count_ngrams(test, n) + for (ngram, count) in counts.items(): + result["correct"][len(ngram)-1] += min(refmaxcounts.get(ngram,0), count) + + return result + +def score_cooked(allcomps, n=4, ground=0, smooth=1): + totalcomps = {'testlen':0, 'reflen':0, 'guess':[0]*n, 'correct':[0]*n} + for comps in allcomps: + for key in ['testlen','reflen']: + totalcomps[key] += comps[key] + for key in ['guess','correct']: + for k in range(n): + totalcomps[key][k] += comps[key][k] + logbleu = 0.0 + all_bleus = [] + for k in range(n): + correct = totalcomps['correct'][k] + guess = totalcomps['guess'][k] + addsmooth = 0 + if smooth == 1 and k > 0: + addsmooth = 1 + logbleu += math.log(correct + addsmooth + sys.float_info.min)-math.log(guess + addsmooth+ sys.float_info.min) + if guess == 0: + all_bleus.append(-10000000) + else: + all_bleus.append(math.log(correct + sys.float_info.min)-math.log( guess )) + + logbleu /= float(n) + all_bleus.insert(0, logbleu) + + brevPenalty = min(0,1-float(totalcomps['reflen'] + 1)/(totalcomps['testlen'] + 1)) + for i in range(len(all_bleus)): + if i ==0: + all_bleus[i] += brevPenalty + all_bleus[i] = math.exp(all_bleus[i]) + return all_bleus + +def bleu(refs, candidate, ground=0, smooth=1): + refs = cook_refs(refs) + test = cook_test(candidate, refs) + return score_cooked([test], ground=ground, smooth=smooth) + +def splitPuncts(line): + return ' '.join(re.findall(r"[\w]+|[^\s\w]", line)) + +def computeMaps(predictions, goldfile): + predictionMap = {} + goldMap = {} + gf = open(goldfile, 'r') + + for row in predictions: + cols = row.strip().split('\t') + if len(cols) == 1: + (rid, pred) = (cols[0], '') + else: + (rid, pred) = (cols[0], cols[1]) + predictionMap[rid] = [splitPuncts(pred.strip().lower())] + + for row in gf: + (rid, pred) = row.split('\t') + if rid in predictionMap: # Only insert if the id exists for the method + if rid not in goldMap: + goldMap[rid] = [] + goldMap[rid].append(splitPuncts(pred.strip().lower())) + + sys.stderr.write('Total: ' + str(len(goldMap)) + '\n') + return (goldMap, predictionMap) + + +#m1 is the reference map +#m2 is the prediction map +def bleuFromMaps(m1, m2): + score = [0] * 5 + num = 0.0 + + for key in m1: + if key in m2: + bl = bleu(m1[key], m2[key][0]) + score = [ score[i] + bl[i] for i in range(0, len(bl))] + num += 1 + return [s * 100.0 / num for s in score] + +if __name__ == '__main__': + reference_file = sys.argv[1] + predictions = [] + for row in sys.stdin: + predictions.append(row) + (goldMap, predictionMap) = computeMaps(predictions, reference_file) + print (bleuFromMaps(goldMap, predictionMap)[0]) + diff --git a/Code-Text/code-to-text/code/evaluate.sh b/Code-Text/code-to-text/code/evaluate.sh new file mode 100644 index 0000000000000000000000000000000000000000..44c8fd2230e5a484b16a976254d3fcdbc6b2c180 --- /dev/null +++ b/Code-Text/code-to-text/code/evaluate.sh @@ -0,0 +1,6 @@ +lang=python +gold_file=../model/$lang/dev.gold +output_file=../model/$lang/dev.output + +python evaluator.py \ +$gold_file < $output_file \ No newline at end of file diff --git a/Code-Text/code-to-text/code/evaluator.py b/Code-Text/code-to-text/code/evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..f8f2b0aad12754e6cb01575ae9675858c1c4e16d --- /dev/null +++ b/Code-Text/code-to-text/code/evaluator.py @@ -0,0 +1,200 @@ +#!/usr/bin/python + +''' +This script was adapted from the original version by hieuhoang1972 which is part of MOSES. +''' + +# $Id: bleu.py 1307 2007-03-14 22:22:36Z hieuhoang1972 $ + +'''Provides: + +cook_refs(refs, n=4): Transform a list of reference sentences as strings into a form usable by cook_test(). +cook_test(test, refs, n=4): Transform a test sentence as a string (together with the cooked reference sentences) into a form usable by score_cooked(). +score_cooked(alltest, n=4): Score a list of cooked test sentences. + +score_set(s, testid, refids, n=4): Interface with dataset.py; calculate BLEU score of testid against refids. + +The reason for breaking the BLEU computation into three phases cook_refs(), cook_test(), and score_cooked() is to allow the caller to calculate BLEU scores for multiple test sets as efficiently as possible. +''' + +import sys, math, re, xml.sax.saxutils +import subprocess +import os + +# Added to bypass NIST-style pre-processing of hyp and ref files -- wade +nonorm = 0 + +preserve_case = False +eff_ref_len = "shortest" + +normalize1 = [ + ('', ''), # strip "skipped" tags + (r'-\n', ''), # strip end-of-line hyphenation and join lines + (r'\n', ' '), # join lines +# (r'(\d)\s+(?=\d)', r'\1'), # join digits +] +normalize1 = [(re.compile(pattern), replace) for (pattern, replace) in normalize1] + +normalize2 = [ + (r'([\{-\~\[-\` -\&\(-\+\:-\@\/])',r' \1 '), # tokenize punctuation. apostrophe is missing + (r'([^0-9])([\.,])',r'\1 \2 '), # tokenize period and comma unless preceded by a digit + (r'([\.,])([^0-9])',r' \1 \2'), # tokenize period and comma unless followed by a digit + (r'([0-9])(-)',r'\1 \2 ') # tokenize dash when preceded by a digit +] +normalize2 = [(re.compile(pattern), replace) for (pattern, replace) in normalize2] + +def normalize(s): + '''Normalize and tokenize text. This is lifted from NIST mteval-v11a.pl.''' + # Added to bypass NIST-style pre-processing of hyp and ref files -- wade + if (nonorm): + return s.split() + if type(s) is not str: + s = " ".join(s) + # language-independent part: + for (pattern, replace) in normalize1: + s = re.sub(pattern, replace, s) + s = xml.sax.saxutils.unescape(s, {'"':'"'}) + # language-dependent part (assuming Western languages): + s = " %s " % s + if not preserve_case: + s = s.lower() # this might not be identical to the original + for (pattern, replace) in normalize2: + s = re.sub(pattern, replace, s) + return s.split() + +def count_ngrams(words, n=4): + counts = {} + for k in range(1,n+1): + for i in range(len(words)-k+1): + ngram = tuple(words[i:i+k]) + counts[ngram] = counts.get(ngram, 0)+1 + return counts + +def cook_refs(refs, n=4): + '''Takes a list of reference sentences for a single segment + and returns an object that encapsulates everything that BLEU + needs to know about them.''' + + refs = [normalize(ref) for ref in refs] + maxcounts = {} + for ref in refs: + counts = count_ngrams(ref, n) + for (ngram,count) in counts.items(): + maxcounts[ngram] = max(maxcounts.get(ngram,0), count) + return ([len(ref) for ref in refs], maxcounts) + +def cook_test(test, item, n=4): + '''Takes a test sentence and returns an object that + encapsulates everything that BLEU needs to know about it.''' + (reflens, refmaxcounts)=item + test = normalize(test) + result = {} + result["testlen"] = len(test) + + # Calculate effective reference sentence length. + + if eff_ref_len == "shortest": + result["reflen"] = min(reflens) + elif eff_ref_len == "average": + result["reflen"] = float(sum(reflens))/len(reflens) + elif eff_ref_len == "closest": + min_diff = None + for reflen in reflens: + if min_diff is None or abs(reflen-len(test)) < min_diff: + min_diff = abs(reflen-len(test)) + result['reflen'] = reflen + + result["guess"] = [max(len(test)-k+1,0) for k in range(1,n+1)] + + result['correct'] = [0]*n + counts = count_ngrams(test, n) + for (ngram, count) in counts.items(): + result["correct"][len(ngram)-1] += min(refmaxcounts.get(ngram,0), count) + + return result + +def score_cooked(allcomps, n=4, ground=0, smooth=1): + totalcomps = {'testlen':0, 'reflen':0, 'guess':[0]*n, 'correct':[0]*n} + for comps in allcomps: + for key in ['testlen','reflen']: + totalcomps[key] += comps[key] + for key in ['guess','correct']: + for k in range(n): + totalcomps[key][k] += comps[key][k] + logbleu = 0.0 + all_bleus = [] + for k in range(n): + correct = totalcomps['correct'][k] + guess = totalcomps['guess'][k] + addsmooth = 0 + if smooth == 1 and k > 0: + addsmooth = 1 + logbleu += math.log(correct + addsmooth + sys.float_info.min)-math.log(guess + addsmooth+ sys.float_info.min) + if guess == 0: + all_bleus.append(-10000000) + else: + all_bleus.append(math.log(correct + sys.float_info.min)-math.log( guess )) + + logbleu /= float(n) + all_bleus.insert(0, logbleu) + + brevPenalty = min(0,1-float(totalcomps['reflen'] + 1)/(totalcomps['testlen'] + 1)) + for i in range(len(all_bleus)): + if i ==0: + all_bleus[i] += brevPenalty + all_bleus[i] = math.exp(all_bleus[i]) + return all_bleus + +def bleu(refs, candidate, ground=0, smooth=1): + refs = cook_refs(refs) + test = cook_test(candidate, refs) + return score_cooked([test], ground=ground, smooth=smooth) + +def splitPuncts(line): + return ' '.join(re.findall(r"[\w]+|[^\s\w]", line)) + +def computeMaps(predictions, goldfile): + predictionMap = {} + goldMap = {} + gf = open(goldfile, 'r') + + for row in predictions: + cols = row.strip().split('\t') + if len(cols) == 1: + (rid, pred) = (cols[0], '') + else: + (rid, pred) = (cols[0], cols[1]) + predictionMap[rid] = [splitPuncts(pred.strip().lower())] + + for row in gf: + (rid, pred) = row.split('\t') + if rid in predictionMap: # Only insert if the id exists for the method + if rid not in goldMap: + goldMap[rid] = [] + goldMap[rid].append(splitPuncts(pred.strip().lower())) + + sys.stderr.write('Total: ' + str(len(goldMap)) + '\n') + return (goldMap, predictionMap) + + +#m1 is the reference map +#m2 is the prediction map +def bleuFromMaps(m1, m2): + score = [0] * 5 + num = 0.0 + + for key in m1: + if key in m2: + bl = bleu(m1[key], m2[key][0]) + score = [ score[i] + bl[i] for i in range(0, len(bl))] + num += 1 + return [s * 100.0 / num for s in score] + +if __name__ == '__main__': + reference_file = sys.argv[1] + predictions = [] + for row in sys.stdin: + predictions.append(row) + (goldMap, predictionMap) = computeMaps(predictions, reference_file) + print (bleuFromMaps(goldMap, predictionMap)[0]) + diff --git a/Code-Text/code-to-text/code/model.py b/Code-Text/code-to-text/code/model.py new file mode 100644 index 0000000000000000000000000000000000000000..4247c403443a64b48f1700e3ebdbfff9d84ac5d0 --- /dev/null +++ b/Code-Text/code-to-text/code/model.py @@ -0,0 +1,222 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. + +import torch +import torch.nn as nn +import torch +from torch.autograd import Variable +import copy +class Seq2Seq(nn.Module): + """ + Build Seqence-to-Sequence. + + Parameters: + + * `encoder`- encoder of seq2seq model. e.g. roberta + * `decoder`- decoder of seq2seq model. e.g. transformer + * `config`- configuration of encoder model. + * `beam_size`- beam size for beam search. + * `max_length`- max length of target for beam search. + * `sos_id`- start of symbol ids in target for beam search. + * `eos_id`- end of symbol ids in target for beam search. + """ + def __init__(self, encoder,decoder,config,beam_size=None,max_length=None,sos_id=None,eos_id=None): + super(Seq2Seq, self).__init__() + self.encoder = encoder + self.decoder=decoder + self.config=config + self.register_buffer("bias", torch.tril(torch.ones(2048, 2048))) + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + self.lsm = nn.LogSoftmax(dim=-1) + self.tie_weights() + + self.beam_size=beam_size + self.max_length=max_length + self.sos_id=sos_id + self.eos_id=eos_id + + def _tie_or_clone_weights(self, first_module, second_module): + """ Tie or clone module weights depending of weither we are using TorchScript or not + """ + if self.config.torchscript: + first_module.weight = nn.Parameter(second_module.weight.clone()) + else: + first_module.weight = second_module.weight + + def tie_weights(self): + """ Make sure we are sharing the input and output embeddings. + Export to TorchScript can't handle parameter sharing so we are cloning them instead. + """ + self._tie_or_clone_weights(self.lm_head, + self.encoder.embeddings.word_embeddings) + + def forward(self, source_ids=None,source_mask=None,target_ids=None,target_mask=None,args=None): + outputs = self.encoder(source_ids, attention_mask=source_mask) + encoder_output = outputs[0].permute([1,0,2]).contiguous() + if target_ids is not None: + attn_mask=-1e4 *(1-self.bias[:target_ids.shape[1],:target_ids.shape[1]]) + tgt_embeddings = self.encoder.embeddings(target_ids).permute([1,0,2]).contiguous() + out = self.decoder(tgt_embeddings,encoder_output,tgt_mask=attn_mask,memory_key_padding_mask=(1-source_mask).bool()) + hidden_states = torch.tanh(self.dense(out)).permute([1,0,2]).contiguous() + lm_logits = self.lm_head(hidden_states) + # Shift so that tokens < n predict n + active_loss = target_mask[..., 1:].ne(0).view(-1) == 1 + shift_logits = lm_logits[..., :-1, :].contiguous() + shift_labels = target_ids[..., 1:].contiguous() + # Flatten the tokens + loss_fct = nn.CrossEntropyLoss(ignore_index=-1) + loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1))[active_loss], + shift_labels.view(-1)[active_loss]) + + outputs = loss,loss*active_loss.sum(),active_loss.sum() + return outputs + else: + #Predict + preds=[] + zero=torch.cuda.LongTensor(1).fill_(0) + for i in range(source_ids.shape[0]): + context=encoder_output[:,i:i+1] + context_mask=source_mask[i:i+1,:] + beam = Beam(self.beam_size,self.sos_id,self.eos_id) + input_ids=beam.getCurrentState() + context=context.repeat(1, self.beam_size,1) + context_mask=context_mask.repeat(self.beam_size,1) + for _ in range(self.max_length): + if beam.done(): + break + attn_mask=-1e4 *(1-self.bias[:input_ids.shape[1],:input_ids.shape[1]]) + tgt_embeddings = self.encoder.embeddings(input_ids).permute([1,0,2]).contiguous() + out = self.decoder(tgt_embeddings,context,tgt_mask=attn_mask,memory_key_padding_mask=(1-context_mask).bool()) + out = torch.tanh(self.dense(out)) + hidden_states=out.permute([1,0,2]).contiguous()[:,-1,:] + out = self.lsm(self.lm_head(hidden_states)).data + beam.advance(out) + input_ids.data.copy_(input_ids.data.index_select(0, beam.getCurrentOrigin())) + input_ids=torch.cat((input_ids,beam.getCurrentState()),-1) + hyp= beam.getHyp(beam.getFinal()) + pred=beam.buildTargetTokens(hyp)[:self.beam_size] + pred=[torch.cat([x.view(-1) for x in p]+[zero]*(self.max_length-len(p))).view(1,-1) for p in pred] + preds.append(torch.cat(pred,0).unsqueeze(0)) + + preds=torch.cat(preds,0) + return preds + + def feature(self, source_ids,source_mask): + outputs = self.encoder(source_ids, attention_mask=source_mask) + return outputs.pooler_output + + +class Beam(object): + def __init__(self, size,sos,eos): + self.size = size + self.tt = torch.cuda + # The score for each translation on the beam. + self.scores = self.tt.FloatTensor(size).zero_() + # The backpointers at each time-step. + self.prevKs = [] + # The outputs at each time-step. + self.nextYs = [self.tt.LongTensor(size) + .fill_(0)] + self.nextYs[0][0] = sos + # Has EOS topped the beam yet. + self._eos = eos + self.eosTop = False + # Time and k pair for finished. + self.finished = [] + + def getCurrentState(self): + "Get the outputs for the current timestep." + batch = self.tt.LongTensor(self.nextYs[-1]).view(-1, 1) + return batch + + def getCurrentOrigin(self): + "Get the backpointers for the current timestep." + return self.prevKs[-1] + + def advance(self, wordLk): + """ + Given prob over words for every last beam `wordLk` and attention + `attnOut`: Compute and update the beam search. + + Parameters: + + * `wordLk`- probs of advancing from the last step (K x words) + * `attnOut`- attention at the last step + + Returns: True if beam search is complete. + """ + numWords = wordLk.size(1) + + # Sum the previous scores. + if len(self.prevKs) > 0: + beamLk = wordLk + self.scores.unsqueeze(1).expand_as(wordLk) + + # Don't let EOS have children. + for i in range(self.nextYs[-1].size(0)): + if self.nextYs[-1][i] == self._eos: + beamLk[i] = -1e20 + else: + beamLk = wordLk[0] + flatBeamLk = beamLk.view(-1) + bestScores, bestScoresId = flatBeamLk.topk(self.size, 0, True, True) + + self.scores = bestScores + + # bestScoresId is flattened beam x word array, so calculate which + # word and beam each score came from + prevK = bestScoresId // numWords + self.prevKs.append(prevK) + self.nextYs.append((bestScoresId - prevK * numWords)) + + + for i in range(self.nextYs[-1].size(0)): + if self.nextYs[-1][i] == self._eos: + s = self.scores[i] + self.finished.append((s, len(self.nextYs) - 1, i)) + + # End condition is when top-of-beam is EOS and no global score. + if self.nextYs[-1][0] == self._eos: + self.eosTop = True + + def done(self): + return self.eosTop and len(self.finished) >=self.size + + def getFinal(self): + if len(self.finished) == 0: + self.finished.append((self.scores[0], len(self.nextYs) - 1, 0)) + self.finished.sort(key=lambda a: -a[0]) + if len(self.finished) != self.size: + unfinished=[] + for i in range(self.nextYs[-1].size(0)): + if self.nextYs[-1][i] != self._eos: + s = self.scores[i] + unfinished.append((s, len(self.nextYs) - 1, i)) + unfinished.sort(key=lambda a: -a[0]) + self.finished+=unfinished[:self.size-len(self.finished)] + return self.finished[:self.size] + + def getHyp(self, beam_res): + """ + Walk back to construct the full hypothesis. + """ + hyps=[] + for _,timestep, k in beam_res: + hyp = [] + for j in range(len(self.prevKs[:timestep]) - 1, -1, -1): + hyp.append(self.nextYs[j+1][k]) + k = self.prevKs[j][k] + hyps.append(hyp[::-1]) + return hyps + + def buildTargetTokens(self, preds): + sentence=[] + for pred in preds: + tokens = [] + for tok in pred: + if tok==self._eos: + break + tokens.append(tok) + sentence.append(tokens) + return sentence + diff --git a/Code-Text/code-to-text/code/run.py b/Code-Text/code-to-text/code/run.py new file mode 100644 index 0000000000000000000000000000000000000000..7300a9884a181c2098b438cebaaeadbb74cf8ec0 --- /dev/null +++ b/Code-Text/code-to-text/code/run.py @@ -0,0 +1,544 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa). +GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned +using a masked language modeling (MLM) loss. +""" + +from __future__ import absolute_import +import os +import sys +import bleu +import pickle +import torch +import json +import random +import logging +import argparse +import numpy as np +from io import open +from itertools import cycle +import torch.nn as nn +from model import Seq2Seq +from tqdm import tqdm, trange +from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset +from torch.utils.data.distributed import DistributedSampler +from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup, + RobertaConfig, RobertaModel, RobertaTokenizer) +MODEL_CLASSES = {'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer)} + +logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', + datefmt = '%m/%d/%Y %H:%M:%S', + level = logging.INFO) +logger = logging.getLogger(__name__) + +class Example(object): + """A single training/test example.""" + def __init__(self, + idx, + source, + target, + ): + self.idx = idx + self.source = source + self.target = target + +def read_examples(filename): + """Read examples from filename.""" + examples=[] + with open(filename,encoding="utf-8") as f: + for idx, line in enumerate(f): + line=line.strip() + js=json.loads(line) + if 'idx' not in js: + js['idx']=idx + code=' '.join(js['code_tokens']).replace('\n',' ') + code=' '.join(code.strip().split()) + nl=' '.join(js['docstring_tokens']).replace('\n','') + nl=' '.join(nl.strip().split()) + examples.append( + Example( + idx = idx, + source=code, + target = nl, + ) + ) + return examples + + +class InputFeatures(object): + """A single training/test features for a example.""" + def __init__(self, + example_id, + source_ids, + target_ids, + source_mask, + target_mask, + + ): + self.example_id = example_id + self.source_ids = source_ids + self.target_ids = target_ids + self.source_mask = source_mask + self.target_mask = target_mask + + + +def convert_examples_to_features(examples, tokenizer, args,stage=None): + features = [] + for example_index, example in enumerate(examples): + #source + source_tokens = tokenizer.tokenize(example.source)[:args.max_source_length-2] + source_tokens =[tokenizer.cls_token]+source_tokens+[tokenizer.sep_token] + source_ids = tokenizer.convert_tokens_to_ids(source_tokens) + source_mask = [1] * (len(source_tokens)) + padding_length = args.max_source_length - len(source_ids) + source_ids+=[tokenizer.pad_token_id]*padding_length + source_mask+=[0]*padding_length + + #target + if stage=="test": + target_tokens = tokenizer.tokenize("None") + else: + target_tokens = tokenizer.tokenize(example.target)[:args.max_target_length-2] + target_tokens = [tokenizer.cls_token]+target_tokens+[tokenizer.sep_token] + target_ids = tokenizer.convert_tokens_to_ids(target_tokens) + target_mask = [1] *len(target_ids) + padding_length = args.max_target_length - len(target_ids) + target_ids+=[tokenizer.pad_token_id]*padding_length + target_mask+=[0]*padding_length + + if example_index < 5: + if stage=='train': + logger.info("*** Example ***") + logger.info("idx: {}".format(example.idx)) + + logger.info("source_tokens: {}".format([x.replace('\u0120','_') for x in source_tokens])) + logger.info("source_ids: {}".format(' '.join(map(str, source_ids)))) + logger.info("source_mask: {}".format(' '.join(map(str, source_mask)))) + + logger.info("target_tokens: {}".format([x.replace('\u0120','_') for x in target_tokens])) + logger.info("target_ids: {}".format(' '.join(map(str, target_ids)))) + logger.info("target_mask: {}".format(' '.join(map(str, target_mask)))) + + features.append( + InputFeatures( + example_index, + source_ids, + target_ids, + source_mask, + target_mask, + ) + ) + return features + + + +def set_seed(seed=42): + random.seed(seed) + os.environ['PYHTONHASHSEED'] = str(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.backends.cudnn.deterministic = True + +def main(): + parser = argparse.ArgumentParser() + + ## Required parameters + parser.add_argument("--model_type", default=None, type=str, required=True, + help="Model type: e.g. roberta") + parser.add_argument("--model_name_or_path", default=None, type=str, required=True, + help="Path to pre-trained model: e.g. roberta-base" ) + parser.add_argument("--output_dir", default=None, type=str, required=True, + help="The output directory where the model predictions and checkpoints will be written.") + parser.add_argument("--load_model_path", default=None, type=str, + help="Path to trained model: Should contain the .bin files" ) + ## Other parameters + parser.add_argument("--train_filename", default=None, type=str, + help="The train filename. Should contain the .jsonl files for this task.") + parser.add_argument("--dev_filename", default=None, type=str, + help="The dev filename. Should contain the .jsonl files for this task.") + parser.add_argument("--test_filename", default=None, type=str, + help="The test filename. Should contain the .jsonl files for this task.") + + parser.add_argument("--config_name", default="", type=str, + help="Pretrained config name or path if not the same as model_name") + parser.add_argument("--tokenizer_name", default="", type=str, + help="Pretrained tokenizer name or path if not the same as model_name") + parser.add_argument("--max_source_length", default=64, type=int, + help="The maximum total source sequence length after tokenization. Sequences longer " + "than this will be truncated, sequences shorter will be padded.") + parser.add_argument("--max_target_length", default=32, type=int, + help="The maximum total target sequence length after tokenization. Sequences longer " + "than this will be truncated, sequences shorter will be padded.") + + parser.add_argument("--do_train", action='store_true', + help="Whether to run training.") + parser.add_argument("--do_eval", action='store_true', + help="Whether to run eval on the dev set.") + parser.add_argument("--do_test", action='store_true', + help="Whether to run eval on the dev set.") + parser.add_argument("--do_lower_case", action='store_true', + help="Set this flag if you are using an uncased model.") + parser.add_argument("--no_cuda", action='store_true', + help="Avoid using CUDA when available") + + parser.add_argument("--train_batch_size", default=8, type=int, + help="Batch size per GPU/CPU for training.") + parser.add_argument("--eval_batch_size", default=8, type=int, + help="Batch size per GPU/CPU for evaluation.") + parser.add_argument('--gradient_accumulation_steps', type=int, default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.") + parser.add_argument("--learning_rate", default=5e-5, type=float, + help="The initial learning rate for Adam.") + parser.add_argument("--beam_size", default=10, type=int, + help="beam size for beam search") + parser.add_argument("--weight_decay", default=0.0, type=float, + help="Weight deay if we apply some.") + parser.add_argument("--adam_epsilon", default=1e-8, type=float, + help="Epsilon for Adam optimizer.") + parser.add_argument("--max_grad_norm", default=1.0, type=float, + help="Max gradient norm.") + parser.add_argument("--num_train_epochs", default=3, type=int, + help="Total number of training epochs to perform.") + parser.add_argument("--max_steps", default=-1, type=int, + help="If > 0: set total number of training steps to perform. Override num_train_epochs.") + parser.add_argument("--eval_steps", default=-1, type=int, + help="") + parser.add_argument("--train_steps", default=-1, type=int, + help="") + parser.add_argument("--warmup_steps", default=0, type=int, + help="Linear warmup over warmup_steps.") + parser.add_argument("--local_rank", type=int, default=-1, + help="For distributed training: local_rank") + parser.add_argument('--seed', type=int, default=42, + help="random seed for initialization") + # print arguments + args = parser.parse_args() + logger.info(args) + + # Setup CUDA, GPU & distributed training + if args.local_rank == -1 or args.no_cuda: + device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + args.n_gpu = torch.cuda.device_count() + else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs + torch.cuda.set_device(args.local_rank) + device = torch.device("cuda", args.local_rank) + torch.distributed.init_process_group(backend='nccl') + args.n_gpu = 1 + logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s", + args.local_rank, device, args.n_gpu, bool(args.local_rank != -1)) + args.device = device + # Set seed + set_seed(args.seed) + # make dir if output_dir not exist + if os.path.exists(args.output_dir) is False: + os.makedirs(args.output_dir) + + config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] + config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path) + tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,do_lower_case=args.do_lower_case) + + #budild model + encoder = model_class.from_pretrained(args.model_name_or_path,config=config) + decoder_layer = nn.TransformerDecoderLayer(d_model=config.hidden_size, nhead=config.num_attention_heads) + decoder = nn.TransformerDecoder(decoder_layer, num_layers=6) + model=Seq2Seq(encoder=encoder,decoder=decoder,config=config, + beam_size=args.beam_size,max_length=args.max_target_length, + sos_id=tokenizer.cls_token_id,eos_id=tokenizer.sep_token_id) + if args.load_model_path is not None: + logger.info("reload model from {}".format(args.load_model_path)) + model.load_state_dict(torch.load(args.load_model_path)) + + model.to(device) + if args.local_rank != -1: + # Distributed training + try: + from apex.parallel import DistributedDataParallel as DDP + except ImportError: + raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") + + model = DDP(model) + elif args.n_gpu > 1: + # multi-gpu training + model = torch.nn.DataParallel(model) + + if args.do_train: + # Prepare training data loader + train_examples = read_examples(args.train_filename) + train_features = convert_examples_to_features(train_examples, tokenizer,args,stage='train') + all_source_ids = torch.tensor([f.source_ids for f in train_features], dtype=torch.long) + all_source_mask = torch.tensor([f.source_mask for f in train_features], dtype=torch.long) + all_target_ids = torch.tensor([f.target_ids for f in train_features], dtype=torch.long) + all_target_mask = torch.tensor([f.target_mask for f in train_features], dtype=torch.long) + train_data = TensorDataset(all_source_ids,all_source_mask,all_target_ids,all_target_mask) + + if args.local_rank == -1: + train_sampler = RandomSampler(train_data) + else: + train_sampler = DistributedSampler(train_data) + train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size//args.gradient_accumulation_steps) + + num_train_optimization_steps = args.train_steps + + # Prepare optimizer and schedule (linear warmup and decay) + no_decay = ['bias', 'LayerNorm.weight'] + optimizer_grouped_parameters = [ + {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], + 'weight_decay': args.weight_decay}, + {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} + ] + t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs + optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) + scheduler = get_linear_schedule_with_warmup(optimizer, + num_warmup_steps=int(t_total*0.1), + num_training_steps=t_total) + + #Start training + logger.info("***** Running training *****") + logger.info(" Num examples = %d", len(train_examples)) + logger.info(" Batch size = %d", args.train_batch_size) + logger.info(" Num epoch = %d", args.num_train_epochs) + + + model.train() + dev_dataset={} + nb_tr_examples, nb_tr_steps,tr_loss,global_step,best_bleu,best_loss = 0, 0,0,0,0,1e6 + for epoch in range(args.num_train_epochs): + bar = tqdm(train_dataloader,total=len(train_dataloader)) + for batch in bar: + batch = tuple(t.to(device) for t in batch) + source_ids,source_mask,target_ids,target_mask = batch + loss,_,_ = model(source_ids=source_ids,source_mask=source_mask,target_ids=target_ids,target_mask=target_mask) + + if args.n_gpu > 1: + loss = loss.mean() # mean() to average on multi-gpu. + if args.gradient_accumulation_steps > 1: + loss = loss / args.gradient_accumulation_steps + tr_loss += loss.item() + train_loss=round(tr_loss*args.gradient_accumulation_steps/(nb_tr_steps+1),4) + bar.set_description("epoch {} loss {}".format(epoch,train_loss)) + nb_tr_examples += source_ids.size(0) + nb_tr_steps += 1 + loss.backward() + + if (nb_tr_steps + 1) % args.gradient_accumulation_steps == 0: + #Update parameters + optimizer.step() + optimizer.zero_grad() + scheduler.step() + global_step += 1 + + if args.do_eval: + #Eval model with dev dataset + tr_loss = 0 + nb_tr_examples, nb_tr_steps = 0, 0 + eval_flag=False + if 'dev_loss' in dev_dataset: + eval_examples,eval_data=dev_dataset['dev_loss'] + else: + eval_examples = read_examples(args.dev_filename) + eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='dev') + all_source_ids = torch.tensor([f.source_ids for f in eval_features], dtype=torch.long) + all_source_mask = torch.tensor([f.source_mask for f in eval_features], dtype=torch.long) + all_target_ids = torch.tensor([f.target_ids for f in eval_features], dtype=torch.long) + all_target_mask = torch.tensor([f.target_mask for f in eval_features], dtype=torch.long) + eval_data = TensorDataset(all_source_ids,all_source_mask,all_target_ids,all_target_mask) + dev_dataset['dev_loss']=eval_examples,eval_data + eval_sampler = SequentialSampler(eval_data) + eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) + + logger.info("\n***** Running evaluation *****") + logger.info(" Num examples = %d", len(eval_examples)) + logger.info(" Batch size = %d", args.eval_batch_size) + + #Start Evaling model + model.eval() + eval_loss,tokens_num = 0,0 + for batch in eval_dataloader: + batch = tuple(t.to(device) for t in batch) + source_ids,source_mask,target_ids,target_mask = batch + + with torch.no_grad(): + _,loss,num = model(source_ids=source_ids,source_mask=source_mask, + target_ids=target_ids,target_mask=target_mask) + eval_loss += loss.sum().item() + tokens_num += num.sum().item() + #Pring loss of dev dataset + model.train() + eval_loss = eval_loss / tokens_num + result = {'eval_ppl': round(np.exp(eval_loss),5), + 'global_step': global_step+1, + 'train_loss': round(train_loss,5)} + for key in sorted(result.keys()): + logger.info(" %s = %s", key, str(result[key])) + logger.info(" "+"*"*20) + + #save last checkpoint + last_output_dir = os.path.join(args.output_dir, 'checkpoint-last') + if not os.path.exists(last_output_dir): + os.makedirs(last_output_dir) + model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self + output_model_file = os.path.join(last_output_dir, "pytorch_model.bin") + torch.save(model_to_save.state_dict(), output_model_file) + if eval_lossbest_bleu: + logger.info(" Best bleu:%s",dev_bleu) + logger.info(" "+"*"*20) + best_bleu=dev_bleu + # Save best checkpoint for best bleu + output_dir = os.path.join(args.output_dir, 'checkpoint-best-bleu') + if not os.path.exists(output_dir): + os.makedirs(output_dir) + model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self + output_model_file = os.path.join(output_dir, "pytorch_model.bin") + torch.save(model_to_save.state_dict(), output_model_file) + + # 每一轮记录checkpoint + output_dir = os.path.join(args.output_dir, 'epoch_{}'.format(epoch + 1)) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + model_to_save = model.module if hasattr(model, 'module') else model + ckpt_output_path = os.path.join(output_dir, 'subject_model.pth') + logger.info("Saving model checkpoint to %s", ckpt_output_path) + torch.save(model_to_save.state_dict(), ckpt_output_path) + # 每一轮记录表征 + # logger.info("Saving training feature") + # train_dataloader_bs1 = DataLoader(train_dataset, sampler=train_sampler, batch_size=1,num_workers=4,pin_memory=True) + # train_feature = [] + # for batch in tqdm(train_dataloader_bs1): + # batch = tuple(t.to(device) for t in batch) + # source_ids, source_mask, _, _ = batch + # model.eval() + # with torch.no_grad(): + # tf = model.feature(source_ids, source_mask) + # train_feature.append(tf.cpu().detach().numpy()) + # feature_output_path = os.path.join(output_dir, 'feature.pkl') + # with open(feature_output_path, 'wb') as f: + # pickle.dump(train_feature, f) + + if args.do_test: + files=[] + if args.dev_filename is not None: + files.append(args.dev_filename) + if args.test_filename is not None: + files.append(args.test_filename) + for idx,file in enumerate(files): + logger.info("Test file: {}".format(file)) + eval_examples = read_examples(file) + eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='test') + all_source_ids = torch.tensor([f.source_ids for f in eval_features], dtype=torch.long) + all_source_mask = torch.tensor([f.source_mask for f in eval_features], dtype=torch.long) + eval_data = TensorDataset(all_source_ids,all_source_mask) + + # Calculate bleu + eval_sampler = SequentialSampler(eval_data) + eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) + + model.eval() + p=[] + for batch in tqdm(eval_dataloader,total=len(eval_dataloader)): + batch = tuple(t.to(device) for t in batch) + source_ids,source_mask= batch + with torch.no_grad(): + preds = model(source_ids=source_ids,source_mask=source_mask) + for pred in preds: + t=pred[0].cpu().numpy() + t=list(t) + if 0 in t: + t=t[:t.index(0)] + text = tokenizer.decode(t,clean_up_tokenization_spaces=False) + p.append(text) + model.train() + predictions=[] + with open(os.path.join(args.output_dir,"test_{}.output".format(str(idx))),'w') as f, open(os.path.join(args.output_dir,"test_{}.gold".format(str(idx))),'w') as f1: + for ref,gold in zip(p,eval_examples): + predictions.append(str(gold.idx)+'\t'+ref) + f.write(str(gold.idx)+'\t'+ref+'\n') + f1.write(str(gold.idx)+'\t'+gold.target+'\n') + + (goldMap, predictionMap) = bleu.computeMaps(predictions, os.path.join(args.output_dir, "test_{}.gold".format(idx))) + dev_bleu=round(bleu.bleuFromMaps(goldMap, predictionMap)[0],2) + logger.info(" %s = %s "%("bleu-4",str(dev_bleu))) + logger.info(" "+"*"*20) + + + + + + + +if __name__ == "__main__": + main() + + diff --git a/Code-Text/code-to-text/code/test.sh b/Code-Text/code-to-text/code/test.sh new file mode 100644 index 0000000000000000000000000000000000000000..02edf2acf0fc0d6646b4aa2342c03ef267999479 --- /dev/null +++ b/Code-Text/code-to-text/code/test.sh @@ -0,0 +1,22 @@ +lang=python #programming language +batch_size=64 +beam_size=10 +source_length=256 +target_length=128 +data_dir=../dataset +output_dir=../model/$lang +dev_file=$data_dir/$lang/valid.jsonl +test_file=$data_dir/$lang/test.jsonl +test_model=$output_dir/epoch_10/subject_model.pth #checkpoint for test + +CUDA_VISIBLE_DEVICES=2,3 python run.py \ +--do_test --model_type roberta \ +--model_name_or_path microsoft/codebert-base \ +--load_model_path $test_model \ +--dev_filename $dev_file \ +--test_filename $test_file \ +--output_dir $output_dir \ +--max_source_length $source_length \ +--max_target_length $target_length \ +--beam_size $beam_size \ +--eval_batch_size $batch_size \ No newline at end of file diff --git a/Code-Text/code-to-text/code/train.sh b/Code-Text/code-to-text/code/train.sh new file mode 100644 index 0000000000000000000000000000000000000000..9ad83ecb355dba5e1e5ebd09736a4952793c6246 --- /dev/null +++ b/Code-Text/code-to-text/code/train.sh @@ -0,0 +1,28 @@ +lang=python #programming language +lr=5e-5 +batch_size=32 +beam_size=10 +source_length=256 +target_length=128 +data_dir=../dataset +output_dir=../model/$lang +train_file=$data_dir/$lang/train.jsonl +dev_file=$data_dir/$lang/valid.jsonl +epochs=10 +pretrained_model=microsoft/codebert-base #Roberta: roberta-base + +CUDA_VISIBLE_DEVICES=2,3 python run.py \ +--do_train \ +--do_eval \ +--model_type roberta \ +--model_name_or_path $pretrained_model \ +--train_filename $train_file \ +--dev_filename $dev_file \ +--output_dir $output_dir \ +--max_source_length $source_length \ +--max_target_length $target_length \ +--beam_size $beam_size \ +--train_batch_size $batch_size \ +--eval_batch_size $batch_size \ +--learning_rate $lr \ +--num_train_epochs $epochs \ No newline at end of file diff --git a/Code-Text/code-to-text/data.zip b/Code-Text/code-to-text/data.zip new file mode 100644 index 0000000000000000000000000000000000000000..7e38fb829189fc141e3dab6a7e8f2f42ebe2765f --- /dev/null +++ b/Code-Text/code-to-text/data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ee5eea169be9d0516dcb0f100ec1932b830b37b329d96bfa87c8f76ee7da380 +size 381246360 diff --git a/README.md b/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c82a8f9588a6b20ef4e50d7ffb553d855217c70e --- /dev/null +++ b/README.md @@ -0,0 +1,81 @@ +--- +license: mit +--- +# 模型训练过程汇总(持续更新中) + +对于已收集的每一个模型,`code` 目录为模型定义、训练和测试的代码和脚本文件,`model` 目录为已收集的 epoch 模型文件,`dataset.zip` 为模型数据集。 + +下表汇总了所有收集的模型训练过程信息: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
模型名称模型简介模型类型Epoch数量数据集信息
Clone-detection-BigCloneBench基于大规模代码克隆基准数据集的代码克隆检测模型,任务是进行二元分类(0/1),其中1代表语义等价,0代表其他情况。代码克隆检测2个epochBigCloneBench数据集
Clone-detection-POJ-104基于POJ-104数据集的代码克隆检测模型,任务是识别不同编程题目中相似的代码实现,给定一段代码和一组候选代码,任务是返回具有相同语义的Top K个代码代码克隆检测2个epoch (0-1)POJ-104编程题目数据集
CodeCompletion-token基于token级别的代码自动补全模型代码补全5个epoch (Java语料库)Java代码token序列数据集
Defect-detection代码缺陷检测模型,通过分析代码来识别潜在的缺陷和错误(进行二元分类(0/1))代码缺陷检测5个epoch包含缺陷标注的C语言代码数据集
code-refinement代码优化模型代码优化/重构34个epoch(small数据集)代码优化前后对数据集(C语言)
code-to-text代码到自然语言的转换模型代码注释生成每种语言10个epoch (支持Python/Java/JavaScript/PHP/Ruby/Go)多语言代码-文本对数据集
NL-code-search-Adv高级自然语言代码搜索模型,通过计算自然语言查询与代码片段之间的相似性来实现代码搜索,代码搜索2个epoch自然语言-(python)代码对数据集
NL-code-search-WebQuery基于Web查询的代码搜索模型,该模型通过编码器处理代码和自然语言输入,并利用多层感知器(MLP)来计算相似性得分代码搜索两个数据集各3个epochWeb查询-代码对数据集(CodeSearchNet数据集和CoSQA数据集(python))
text-to-code自然语言到代码的生成模型代码生成23个epoch文本描述-代码(c语言)对数据集
diff --git a/Text-code/NL-code-search-Adv/code/demo.py b/Text-code/NL-code-search-Adv/code/demo.py new file mode 100644 index 0000000000000000000000000000000000000000..72659edd5ed6389ec57b1fccc91b35dd9bb62810 --- /dev/null +++ b/Text-code/NL-code-search-Adv/code/demo.py @@ -0,0 +1,55 @@ +from transformers import RobertaTokenizer, RobertaConfig, RobertaModel +import torch +import sys +import os + +from model import Model + + +def single_tokenize(text, tokenizer, block_size=256): + tokens = tokenizer.tokenize(text)[:block_size - 2] + tokens = [tokenizer.cls_token] + tokens + [tokenizer.sep_token] + ids = tokenizer.convert_tokens_to_ids(tokens) + padding_length = block_size - len(ids) + ids += [tokenizer.pad_token_id] * padding_length + return torch.tensor([ids]) + + +if __name__ == "__main__": + config =RobertaConfig.from_pretrained("../../../../active_dataset_debugging/base/codebert-base") + config.num_labels = 1 + tokenizer = RobertaTokenizer.from_pretrained("../../../../active_dataset_debugging/base/codebert-base", do_lower_case=True) + model = RobertaModel.from_pretrained("../../../../active_dataset_debugging/base/roberta-base", config=config) + model = Model(model, config, tokenizer, args=None) + model.load_state_dict(torch.load("../model/python/epoch_2/subject_model.pth", map_location=torch.device('cpu'))) + + + query = "print hello world" + code_1 = """ + import numpy as np + """ + code_2 = """ + a = 'hello world' + """ + code_3 = """ + cout << "hello world" << endl; + """ + code_4 = ''' + print('hello world') + ''' + codes = [] + codes.append(code_1) + codes.append(code_2) + codes.append(code_3) + codes.append(code_4) + scores = [] + nl_inputs = single_tokenize(query, tokenizer) + for code in codes: + code_inputs = single_tokenize(code, tokenizer) + score = model(code_inputs, nl_inputs, return_scores=True) + scores.append(score) + print("Query:", query) + for i in range(len(codes)): + print('------------------------------') + print("Code:", codes[i]) + print("Score:", float(scores[i])) \ No newline at end of file diff --git a/Text-code/NL-code-search-Adv/code/evaluate.sh b/Text-code/NL-code-search-Adv/code/evaluate.sh new file mode 100644 index 0000000000000000000000000000000000000000..9f135df15bc32325cd80ebc3364a2ef051a07766 --- /dev/null +++ b/Text-code/NL-code-search-Adv/code/evaluate.sh @@ -0,0 +1,5 @@ +lang=python + +python evaluator.py \ +-a ../dataset/$lang/valid.jsonl \ +-p ../model/$lang/predictions.jsonl \ No newline at end of file diff --git a/Text-code/NL-code-search-Adv/code/evaluator.py b/Text-code/NL-code-search-Adv/code/evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..a75b4944ca9d656641ed6214ce14c2709dca946e --- /dev/null +++ b/Text-code/NL-code-search-Adv/code/evaluator.py @@ -0,0 +1,57 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. +import logging +import sys,json +import numpy as np + +def read_answers(filename): + answers={} + with open(filename) as f: + for line in f: + line=line.strip() + js=json.loads(line) + answers[js['url']]=js['idx'] + return answers + +def read_predictions(filename): + predictions={} + with open(filename) as f: + for line in f: + line=line.strip() + js=json.loads(line) + predictions[js['url']]=js['answers'] + return predictions + +def calculate_scores(answers,predictions): + scores=[] + for key in answers: + if key not in predictions: + logging.error("Missing prediction for url {}.".format(key)) + sys.exit() + flag=False + for rank,idx in enumerate(predictions[key]): + if idx==answers[key]: + scores.append(1/(rank+1)) + flag=True + break + if flag is False: + scores.append(0) + result={} + result['MRR']=round(np.mean(scores),4) + return result + +def main(): + import argparse + parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for NL-code-search-Adv dataset.') + parser.add_argument('--answers', '-a',help="filename of the labels, in txt format.") + parser.add_argument('--predictions', '-p',help="filename of the leaderboard predictions, in txt format.") + + + args = parser.parse_args() + answers=read_answers(args.answers) + predictions=read_predictions(args.predictions) + scores=calculate_scores(answers,predictions) + print(scores) + +if __name__ == '__main__': + main() diff --git a/Text-code/NL-code-search-Adv/code/model.py b/Text-code/NL-code-search-Adv/code/model.py new file mode 100644 index 0000000000000000000000000000000000000000..161d6fc55e24918921ba3d6bfb29cb0d691cc5cc --- /dev/null +++ b/Text-code/NL-code-search-Adv/code/model.py @@ -0,0 +1,44 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +import torch +import torch.nn as nn +import torch +from torch.autograd import Variable +import copy +import torch.nn.functional as F +from torch.nn import CrossEntropyLoss, MSELoss + + +class Model(nn.Module): + def __init__(self, encoder, config, tokenizer, args): + super(Model, self).__init__() + self.encoder = encoder + self.config = config + self.tokenizer = tokenizer + self.args = args + + def forward(self, code_inputs, nl_inputs, return_vec=False, return_scores=False): + bs = code_inputs.shape[0] + inputs = torch.cat((code_inputs, nl_inputs), 0) + encoder_output = self.encoder(inputs, attention_mask=inputs.ne(1)) + outputs = encoder_output[1] + + code_vec = outputs[:bs] + nl_vec = outputs[bs:] + + if return_vec: + return code_vec, nl_vec + scores = (nl_vec[:, None, :] * code_vec[None, :, :]).sum(-1) + if return_scores: + return scores + loss_fct = CrossEntropyLoss() + loss = loss_fct(scores, torch.arange(bs, device=scores.device)) + return loss, code_vec, nl_vec + + def feature(self, code_inputs, nl_inputs): + bs = code_inputs.shape[0] + inputs = torch.cat((code_inputs, nl_inputs), 0) + encoder_output = self.encoder(inputs, attention_mask=inputs.ne(1)) + code_feature = encoder_output.pooler_output[:bs] + nl_feature = encoder_output.pooler_output[bs:] + return code_feature, nl_feature \ No newline at end of file diff --git a/Text-code/NL-code-search-Adv/code/run.py b/Text-code/NL-code-search-Adv/code/run.py new file mode 100644 index 0000000000000000000000000000000000000000..bab2b78d139d58152ba9f3076f1d4b9f9085eb10 --- /dev/null +++ b/Text-code/NL-code-search-Adv/code/run.py @@ -0,0 +1,634 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa). +GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned +using a masked language modeling (MLM) loss. +""" + +from __future__ import absolute_import, division, print_function + +import argparse +import glob +import logging +import os +import pickle +import random +import re +import shutil + +import numpy as np +import torch +from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler, TensorDataset +from torch.utils.data.distributed import DistributedSampler +import json + +try: + from torch.utils.tensorboard import SummaryWriter +except: + from tensorboardX import SummaryWriter + +from tqdm import tqdm, trange +import multiprocessing +from model import Model + +cpu_cont = multiprocessing.cpu_count() +from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup, + BertConfig, BertForMaskedLM, BertTokenizer, + GPT2Config, GPT2LMHeadModel, GPT2Tokenizer, + OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, + RobertaConfig, RobertaModel, RobertaTokenizer, + DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer) + +logger = logging.getLogger(__name__) + +MODEL_CLASSES = { + 'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer), + 'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer), + 'bert': (BertConfig, BertForMaskedLM, BertTokenizer), + 'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer), + 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer) +} + + +class InputFeatures(object): + """A single training/test features for a example.""" + + def __init__(self, + code_tokens, + code_ids, + nl_tokens, + nl_ids, + url, + idx, + + ): + self.code_tokens = code_tokens + self.code_ids = code_ids + self.nl_tokens = nl_tokens + self.nl_ids = nl_ids + self.url = url + self.idx = idx + + +def convert_examples_to_features(js, tokenizer, args): + # code + if 'code_tokens' in js: + code = ' '.join(js['code_tokens']) + else: + code = ' '.join(js['function_tokens']) + code_tokens = tokenizer.tokenize(code)[:args.block_size - 2] + code_tokens = [tokenizer.cls_token] + code_tokens + [tokenizer.sep_token] + code_ids = tokenizer.convert_tokens_to_ids(code_tokens) + padding_length = args.block_size - len(code_ids) + code_ids += [tokenizer.pad_token_id] * padding_length + + nl = ' '.join(js['docstring_tokens']) + nl_tokens = tokenizer.tokenize(nl)[:args.block_size - 2] + nl_tokens = [tokenizer.cls_token] + nl_tokens + [tokenizer.sep_token] + nl_ids = tokenizer.convert_tokens_to_ids(nl_tokens) + padding_length = args.block_size - len(nl_ids) + nl_ids += [tokenizer.pad_token_id] * padding_length + + return InputFeatures(code_tokens, code_ids, nl_tokens, nl_ids, js['url'], js['idx']) + + +class TextDataset(Dataset): + def __init__(self, tokenizer, args, file_path=None): + self.examples = [] + data = [] + with open(file_path) as f: + for i, line in enumerate(f): + # if i>200: + # break + line = line.strip() + js = json.loads(line) + data.append(js) + for js in data: + self.examples.append(convert_examples_to_features(js, tokenizer, args)) + if 'train' in file_path: + for idx, example in enumerate(self.examples[:1]): + logger.info("*** Example ***") + logger.info("idx: {}".format(idx)) + logger.info("code_tokens: {}".format([x.replace('\u0120', '_') for x in example.code_tokens])) + logger.info("code_ids: {}".format(' '.join(map(str, example.code_ids)))) + logger.info("nl_tokens: {}".format([x.replace('\u0120', '_') for x in example.nl_tokens])) + logger.info("nl_ids: {}".format(' '.join(map(str, example.nl_ids)))) + + def __len__(self): + return len(self.examples) + + def __getitem__(self, i): + return (torch.tensor(self.examples[i].code_ids), torch.tensor(self.examples[i].nl_ids)) + + +def set_seed(seed=42): + random.seed(seed) + os.environ['PYHTONHASHSEED'] = str(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.backends.cudnn.deterministic = True + + +def train(args, train_dataset, model, tokenizer): + """ Train the model """ + + args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) + train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) + + train_dataloader = DataLoader(train_dataset, sampler=train_sampler, + batch_size=args.train_batch_size, num_workers=4, pin_memory=True) + args.max_steps = args.epoch * len(train_dataloader) + args.save_steps = len(train_dataloader) // 10 + args.warmup_steps = len(train_dataloader) + args.logging_steps = len(train_dataloader) + args.num_train_epochs = args.epoch + model.to(args.device) + # Prepare optimizer and schedule (linear warmup and decay) + no_decay = ['bias', 'LayerNorm.weight'] + optimizer_grouped_parameters = [ + {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], + 'weight_decay': args.weight_decay}, + {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} + ] + optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) + scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.max_steps * 0.1, + num_training_steps=args.max_steps) + if args.fp16: + try: + from apex import amp + except ImportError: + raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") + model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) + + # multi-gpu training (should be after apex fp16 initialization) + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + # Distributed training (should be after apex fp16 initialization) + if args.local_rank != -1: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], + output_device=args.local_rank, + find_unused_parameters=True) + + checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last') + scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt') + optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt') + if os.path.exists(scheduler_last): + scheduler.load_state_dict(torch.load(scheduler_last)) + if os.path.exists(optimizer_last): + optimizer.load_state_dict(torch.load(optimizer_last)) + + # Train! + logger.info("***** Running training *****") + logger.info(" Num examples = %d", len(train_dataset)) + logger.info(" Num Epochs = %d", args.num_train_epochs) + logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) + logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", + args.train_batch_size * args.gradient_accumulation_steps * ( + torch.distributed.get_world_size() if args.local_rank != -1 else 1)) + logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) + logger.info(" Total optimization steps = %d", args.max_steps) + + global_step = args.start_step + tr_loss, logging_loss, avg_loss, tr_nb, tr_num, train_loss = 0.0, 0.0, 0.0, 0, 0, 0 + best_mrr = 0.0 + best_acc = 0.0 + # model.resize_token_embeddings(len(tokenizer)) + model.zero_grad() + + for idx in range(args.start_epoch, int(args.num_train_epochs)): + bar = train_dataloader + tr_num = 0 + train_loss = 0 + for step, batch in enumerate(tqdm(bar)): + code_inputs = batch[0].to(args.device) + nl_inputs = batch[1].to(args.device) + + model.train() + loss, code_vec, nl_vec = model(code_inputs, nl_inputs) + + if args.n_gpu > 1: + loss = loss.mean() # mean() to average on multi-gpu parallel training + if args.gradient_accumulation_steps > 1: + loss = loss / args.gradient_accumulation_steps + + if args.fp16: + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) + else: + loss.backward() + torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) + + tr_loss += loss.item() + tr_num += 1 + train_loss += loss.item() + if avg_loss == 0: + avg_loss = tr_loss + avg_loss = round(train_loss / tr_num, 5) + if (step + 1) % 100 == 0: + logger.info("epoch {} step {} loss {}".format(idx, step + 1, avg_loss)) + # bar.set_description("epoch {} loss {}".format(idx,avg_loss)) + + if (step + 1) % args.gradient_accumulation_steps == 0: + optimizer.step() + optimizer.zero_grad() + scheduler.step() + global_step += 1 + output_flag = True + avg_loss = round(np.exp((tr_loss - logging_loss) / (global_step - tr_nb)), 4) + if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: + logging_loss = tr_loss + tr_nb = global_step + + if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: + + if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well + results = evaluate(args, model, tokenizer, eval_when_training=True) + for key, value in results.items(): + logger.info(" %s = %s", key, round(value, 4)) + # Save model checkpoint + tr_num = 0 + train_loss = 0 + + if results['eval_mrr'] > best_acc: + best_acc = results['eval_mrr'] + logger.info(" " + "*" * 20) + logger.info(" Best mrr:%s", round(best_acc, 4)) + logger.info(" " + "*" * 20) + + checkpoint_prefix = 'checkpoint-best-mrr' + output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix)) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + model_to_save = model.module if hasattr(model, 'module') else model + output_dir = os.path.join(output_dir, '{}'.format('model.bin')) + torch.save(model_to_save.state_dict(), output_dir) + logger.info("Saving model checkpoint to %s", output_dir) + + # 每一轮记录checkpoint + output_dir = os.path.join(args.output_dir, 'epoch_{}'.format(idx+1)) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + model_to_save = model.module if hasattr(model, 'module') else model + ckpt_output_path = os.path.join(output_dir, 'subject_model.pth') + logger.info("Saving model checkpoint to %s", ckpt_output_path) + torch.save(model_to_save.state_dict(), ckpt_output_path) + # 每一轮记录表征 + # logger.info("Saving training feature") + # train_dataloader_bs1 = DataLoader(train_dataset, sampler=train_sampler, batch_size=1,num_workers=4,pin_memory=True) + # code_feature, nl_feature = [], [] + # for batch in tqdm(train_dataloader_bs1): + # code_inputs = batch[0].to(args.device) + # nl_inputs = batch[1].to(args.device) + # model.eval() + # with torch.no_grad(): + # cf, nf = model.feature(code_inputs=code_inputs, nl_inputs=nl_inputs) + # code_feature.append(cf.cpu().detach().numpy()) + # nl_feature.append(nf.cpu().detach().numpy()) + # code_feature_output_path = os.path.join(output_dir, 'code_feature.pkl') + # nl_feature_output_path = os.path.join(output_dir, 'nl_feature.pkl') + # with open(code_feature_output_path, 'wb') as f1, open(nl_feature_output_path, 'wb') as f2: + # pickle.dump(code_feature, f1) + # pickle.dump(code_feature, f2) + + +eval_dataset = None +def evaluate(args, model, tokenizer, eval_when_training=False): + # Loop to handle MNLI double evaluation (matched, mis-matched) + eval_output_dir = args.output_dir + global eval_dataset + if eval_dataset is None: + eval_dataset = TextDataset(tokenizer, args, args.eval_data_file) + + if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: + os.makedirs(eval_output_dir) + + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + # Note that DistributedSampler samples randomly + eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) + eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, num_workers=4, + pin_memory=True) + + # multi-gpu evaluate + if args.n_gpu > 1 and eval_when_training is False: + model = torch.nn.DataParallel(model) + + # Eval! + logger.info("***** Running evaluation *****") + logger.info(" Num examples = %d", len(eval_dataset)) + logger.info(" Batch size = %d", args.eval_batch_size) + eval_loss = 0.0 + nb_eval_steps = 0 + model.eval() + code_vecs = [] + nl_vecs = [] + for batch in eval_dataloader: + code_inputs = batch[0].to(args.device) + nl_inputs = batch[1].to(args.device) + with torch.no_grad(): + lm_loss, code_vec, nl_vec = model(code_inputs, nl_inputs) + eval_loss += lm_loss.mean().item() + code_vecs.append(code_vec.cpu().numpy()) + nl_vecs.append(nl_vec.cpu().numpy()) + nb_eval_steps += 1 + code_vecs = np.concatenate(code_vecs, 0) + nl_vecs = np.concatenate(nl_vecs, 0) + eval_loss = eval_loss / nb_eval_steps + perplexity = torch.tensor(eval_loss) + + scores = np.matmul(nl_vecs, code_vecs.T) + ranks = [] + for i in range(len(scores)): + score = scores[i, i] + rank = 1 + for j in range(len(scores)): + if i != j and scores[i, j] >= score: + rank += 1 + ranks.append(1 / rank) + + result = { + "eval_loss": float(perplexity), + "eval_mrr": float(np.mean(ranks)) + } + + return result + + +def test(args, model, tokenizer): + # Loop to handle MNLI double evaluation (matched, mis-matched) + eval_dataset = TextDataset(tokenizer, args, args.test_data_file) + + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + # Note that DistributedSampler samples randomly + eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) + eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) + + # multi-gpu evaluate + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + # Eval! + logger.info("***** Running Test *****") + logger.info(" Num examples = %d", len(eval_dataset)) + logger.info(" Batch size = %d", args.eval_batch_size) + eval_loss = 0.0 + nb_eval_steps = 0 + code_vecs = [] + nl_vecs = [] + for batch in eval_dataloader: + code_inputs = batch[0].to(args.device) + nl_inputs = batch[1].to(args.device) + with torch.no_grad(): + lm_loss, code_vec, nl_vec = model(code_inputs, nl_inputs) + eval_loss += lm_loss.mean().item() + code_vecs.append(code_vec.cpu().numpy()) + nl_vecs.append(nl_vec.cpu().numpy()) + nb_eval_steps += 1 + code_vecs = np.concatenate(code_vecs, 0) + nl_vecs = np.concatenate(nl_vecs, 0) + eval_loss = eval_loss / nb_eval_steps + perplexity = torch.tensor(eval_loss) + + scores = np.matmul(nl_vecs, code_vecs.T) + + sort_ids = np.argsort(scores, axis=-1, kind='quicksort', order=None)[:, ::-1] + indexs = [] + urls = [] + for example in eval_dataset.examples: + indexs.append(example.idx) + urls.append(example.url) + with open(os.path.join(args.output_dir, "predictions.jsonl"), 'w') as f: + for index, url, sort_id in zip(indexs, urls, sort_ids): + js = {} + js['url'] = url + js['answers'] = [] + for idx in sort_id[:100]: + js['answers'].append(indexs[int(idx)]) + f.write(json.dumps(js) + '\n') + + +def main(): + parser = argparse.ArgumentParser() + + ## Required parameters + parser.add_argument("--output_dir", default=None, type=str, required=True, + help="The output directory where the model predictions and checkpoints will be written.") + + ## Other parameters + parser.add_argument("--train_data_file", default=None, type=str, + help="The input training data file (a text file).") + parser.add_argument("--eval_data_file", default=None, type=str, + help="An optional input evaluation data file to evaluate the perplexity on (a text file).") + parser.add_argument("--test_data_file", default=None, type=str, + help="An optional input evaluation data file to evaluate the perplexity on (a text file).") + + parser.add_argument("--model_type", default="bert", type=str, + help="The model architecture to be fine-tuned.") + parser.add_argument("--model_name_or_path", default=None, type=str, + help="The model checkpoint for weights initialization.") + + parser.add_argument("--mlm", action='store_true', + help="Train with masked-language modeling loss instead of language modeling.") + parser.add_argument("--mlm_probability", type=float, default=0.15, + help="Ratio of tokens to mask for masked language modeling loss") + + parser.add_argument("--config_name", default="", type=str, + help="Optional pretrained config name or path if not the same as model_name_or_path") + parser.add_argument("--tokenizer_name", default="", type=str, + help="Optional pretrained tokenizer name or path if not the same as model_name_or_path") + parser.add_argument("--cache_dir", default="", type=str, + help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)") + parser.add_argument("--block_size", default=-1, type=int, + help="Optional input sequence length after tokenization." + "The training dataset will be truncated in block of this size for training." + "Default to the model max input length for single sentence inputs (take into account special tokens).") + parser.add_argument("--do_train", action='store_true', + help="Whether to run training.") + parser.add_argument("--do_eval", action='store_true', + help="Whether to run eval on the dev set.") + parser.add_argument("--do_test", action='store_true', + help="Whether to run eval on the dev set.") + parser.add_argument("--evaluate_during_training", action='store_true', + help="Run evaluation during training at each logging step.") + parser.add_argument("--do_lower_case", action='store_true', + help="Set this flag if you are using an uncased model.") + + parser.add_argument("--train_batch_size", default=4, type=int, + help="Batch size per GPU/CPU for training.") + parser.add_argument("--eval_batch_size", default=4, type=int, + help="Batch size per GPU/CPU for evaluation.") + parser.add_argument('--gradient_accumulation_steps', type=int, default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.") + parser.add_argument("--learning_rate", default=5e-5, type=float, + help="The initial learning rate for Adam.") + parser.add_argument("--weight_decay", default=0.0, type=float, + help="Weight deay if we apply some.") + parser.add_argument("--adam_epsilon", default=1e-8, type=float, + help="Epsilon for Adam optimizer.") + parser.add_argument("--max_grad_norm", default=1.0, type=float, + help="Max gradient norm.") + parser.add_argument("--num_train_epochs", default=1.0, type=float, + help="Total number of training epochs to perform.") + parser.add_argument("--max_steps", default=-1, type=int, + help="If > 0: set total number of training steps to perform. Override num_train_epochs.") + parser.add_argument("--warmup_steps", default=0, type=int, + help="Linear warmup over warmup_steps.") + + parser.add_argument('--logging_steps', type=int, default=50, + help="Log every X updates steps.") + parser.add_argument('--save_steps', type=int, default=50, + help="Save checkpoint every X updates steps.") + parser.add_argument('--save_total_limit', type=int, default=None, + help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default') + parser.add_argument("--eval_all_checkpoints", action='store_true', + help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number") + parser.add_argument("--no_cuda", action='store_true', + help="Avoid using CUDA when available") + parser.add_argument('--overwrite_output_dir', action='store_true', + help="Overwrite the content of the output directory") + parser.add_argument('--overwrite_cache', action='store_true', + help="Overwrite the cached training and evaluation sets") + parser.add_argument('--seed', type=int, default=42, + help="random seed for initialization") + parser.add_argument('--epoch', type=int, default=42, + help="random seed for initialization") + parser.add_argument('--fp16', action='store_true', + help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") + parser.add_argument('--fp16_opt_level', type=str, default='O1', + help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." + "See details at https://nvidia.github.io/apex/amp.html") + parser.add_argument("--local_rank", type=int, default=-1, + help="For distributed training: local_rank") + parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.") + parser.add_argument('--server_port', type=str, default='', help="For distant debugging.") + + args = parser.parse_args() + + # Setup distant debugging if needed + if args.server_ip and args.server_port: + # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script + import ptvsd + print("Waiting for debugger attach") + ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) + ptvsd.wait_for_attach() + + # Setup CUDA, GPU & distributed training + if args.local_rank == -1 or args.no_cuda: + device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + args.n_gpu = torch.cuda.device_count() + else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs + torch.cuda.set_device(args.local_rank) + device = torch.device("cuda", args.local_rank) + torch.distributed.init_process_group(backend='nccl') + args.n_gpu = 1 + args.device = device + args.per_gpu_train_batch_size = args.train_batch_size # 修改//args.n_gpu + args.per_gpu_eval_batch_size = args.eval_batch_size # 修改//args.n_gpu + # Setup logging + logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', + datefmt='%m/%d/%Y %H:%M:%S', + level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN) + logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", + args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) + + # Set seed + set_seed(args.seed) + + # Load pretrained model and tokenizer + if args.local_rank not in [-1, 0]: + torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab + + args.start_epoch = 0 + args.start_step = 0 + checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last') + if os.path.exists(checkpoint_last) and os.listdir(checkpoint_last): + args.model_name_or_path = os.path.join(checkpoint_last, 'pytorch_model.bin') + args.config_name = os.path.join(checkpoint_last, 'config.json') + idx_file = os.path.join(checkpoint_last, 'idx_file.txt') + with open(idx_file, encoding='utf-8') as idxf: + args.start_epoch = int(idxf.readlines()[0].strip()) + 1 + + step_file = os.path.join(checkpoint_last, 'step_file.txt') + if os.path.exists(step_file): + with open(step_file, encoding='utf-8') as stepf: + args.start_step = int(stepf.readlines()[0].strip()) + + logger.info("reload model from {}, resume from {} epoch".format(checkpoint_last, args.start_epoch)) + + config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] + config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, + cache_dir=args.cache_dir if args.cache_dir else None) + config.num_labels = 1 + tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name, + do_lower_case=args.do_lower_case, + cache_dir=args.cache_dir if args.cache_dir else None) + if args.block_size <= 0: + args.block_size = tokenizer.max_len_single_sentence # Our input block size will be the max possible for the model + args.block_size = min(args.block_size, tokenizer.max_len_single_sentence) + if args.model_name_or_path: + model = model_class.from_pretrained(args.model_name_or_path, + config=config, + cache_dir=args.cache_dir if args.cache_dir else None) + else: + model = model_class(config) + + model = Model(model, config, tokenizer, args) + if args.local_rank == 0: + torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab + + logger.info("Training/evaluation parameters %s", args) + + # Training + if args.do_train: + if args.local_rank not in [-1, 0]: + torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache + + train_dataset = TextDataset(tokenizer, args, args.train_data_file) + + if args.local_rank == 0: + torch.distributed.barrier() + + train(args, train_dataset, model, tokenizer) + + # Evaluation + results = {} + if args.do_eval and args.local_rank in [-1, 0]: + checkpoint_prefix = 'epoch_2/subject_model.pth' + output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix)) + model.load_state_dict(torch.load(output_dir)) + model.to(args.device) + result = evaluate(args, model, tokenizer) + logger.info("***** Eval results *****") + for key in sorted(result.keys()): + logger.info(" %s = %s", key, str(round(result[key], 4))) + + if args.do_test and args.local_rank in [-1, 0]: + checkpoint_prefix = 'epoch_2/subject_model.pth' + output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix)) + model.load_state_dict(torch.load(output_dir)) + model.to(args.device) + test(args, model, tokenizer) + + return results + + +if __name__ == "__main__": + main() + + diff --git a/Text-code/NL-code-search-Adv/code/test.sh b/Text-code/NL-code-search-Adv/code/test.sh new file mode 100644 index 0000000000000000000000000000000000000000..217a265a2c909a972b31e53577633c2902c978df --- /dev/null +++ b/Text-code/NL-code-search-Adv/code/test.sh @@ -0,0 +1,21 @@ +lang=python + +CUDA_VISIBLE_DEVICES=2,3 python run.py \ +--output_dir ../$lang/model \ +--model_type roberta \ +--config_name microsoft/codebert-base \ +--model_name_or_path microsoft/codebert-base \ +--tokenizer_name roberta-base \ +--do_test \ +--train_data_file ../dataset/$lang/train.jsonl \ +--eval_data_file ../dataset/$lang/valid.jsonl \ +--test_data_file ../dataset/$lang/valid.jsonl \ +--epoch 2 \ +--block_size 256 \ +--train_batch_size 32 \ +--eval_batch_size 64 \ +--learning_rate 5e-5 \ +--max_grad_norm 1.0 \ +--evaluate_during_training \ +--seed 123456 \ +2>&1| tee train.log \ No newline at end of file diff --git a/Text-code/NL-code-search-Adv/code/train.sh b/Text-code/NL-code-search-Adv/code/train.sh new file mode 100644 index 0000000000000000000000000000000000000000..fa512b30389d3ea192947543c61b638c839ccd49 --- /dev/null +++ b/Text-code/NL-code-search-Adv/code/train.sh @@ -0,0 +1,20 @@ +lang=python + +CUDA_VISIBLE_DEVICES=2,3 python run.py \ +--output_dir ../$lang/model \ +--model_type roberta \ +--config_name microsoft/codebert-base \ +--model_name_or_path microsoft/codebert-base \ +--tokenizer_name roberta-base \ +--do_train \ +--train_data_file ../dataset/$lang/train.jsonl \ +--eval_data_file ../dataset/$lang/valid.jsonl \ +--epoch 2 \ +--block_size 256 \ +--train_batch_size 32 \ +--eval_batch_size 64 \ +--learning_rate 5e-5 \ +--max_grad_norm 1.0 \ +--evaluate_during_training \ +--seed 123456 \ +2>&1| tee train.log \ No newline at end of file diff --git a/Text-code/NL-code-search-Adv/dataset.zip b/Text-code/NL-code-search-Adv/dataset.zip new file mode 100644 index 0000000000000000000000000000000000000000..737413b472c2755a0055e66357a2e3c071afbbff --- /dev/null +++ b/Text-code/NL-code-search-Adv/dataset.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:415e59ea6ac79b87deca6e7adbfaab930c22e047d37bf81060c6cc1ccdedd1a7 +size 126844430 diff --git a/Text-code/NL-code-search-WebQuery/code/eval.sh b/Text-code/NL-code-search-WebQuery/code/eval.sh new file mode 100644 index 0000000000000000000000000000000000000000..f20d2f76e164c17a4a94e2dde4b6e97623918d45 --- /dev/null +++ b/Text-code/NL-code-search-WebQuery/code/eval.sh @@ -0,0 +1,9 @@ +CUDA_VISIBLE_DEVICES=0,1 python run.py \ +--model_type roberta \ +--do_eval \ +--dev_file cosqa-dev.json \ +--max_seq_length 200 \ +--per_gpu_eval_batch_size 2 \ +--data_dir ../data/CoSQA \ +--output_dir ../model/model_cosqa_continue_training \ +--encoder_name_or_path microsoft/codebert-base \ No newline at end of file diff --git a/Text-code/NL-code-search-WebQuery/code/evaluate.sh b/Text-code/NL-code-search-WebQuery/code/evaluate.sh new file mode 100644 index 0000000000000000000000000000000000000000..8bbf7814769d4872f3aa23acd816c28f26a237bd --- /dev/null +++ b/Text-code/NL-code-search-WebQuery/code/evaluate.sh @@ -0,0 +1,3 @@ +python evaluator.py \ + --answers_webquery ../model/model_cosqa_continue_training/answer_predictions.txt \ + --predictions_webquery ../model/model_cosqa_continue_training/webquery_predictions.txt \ No newline at end of file diff --git a/Text-code/NL-code-search-WebQuery/code/evaluator.py b/Text-code/NL-code-search-WebQuery/code/evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..58f61f83f00c2d463d2af1b05346dcd47515aa61 --- /dev/null +++ b/Text-code/NL-code-search-WebQuery/code/evaluator.py @@ -0,0 +1,59 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. +import logging +import sys, json, os +import numpy as np +import argparse +from sklearn.metrics import recall_score, precision_score, f1_score, accuracy_score + + +def read_answers(filename): + answers = {} + with open(filename, 'r', encoding='utf-8') as f: + for line in f.readlines(): + line = line.strip() + answers[line.split('\t')[0]] = int(line.split('\t')[1]) + return answers + + +def read_predictions(filename): + predictions = {} + with open(filename, 'r', encoding='utf-8') as f: + for line in f.readlines(): + line = line.strip() + predictions[line.split('\t')[0]] = int(line.split('\t')[1]) + return predictions + + +def calculate_scores(answers, predictions): + y_trues, y_preds = [], [] + for key in answers: + if key not in predictions: + logging.error("Missing prediction for index {}.".format(key)) + sys.exit() + y_trues.append(answers[key]) + y_preds.append(predictions[key]) + scores={} + scores['Precision']=precision_score(y_trues, y_preds) + scores['Recall']=recall_score(y_trues, y_preds) + scores['F1']=f1_score(y_trues, y_preds) + scores['Accuracy']=accuracy_score(y_trues, y_preds) + return scores + + +def main(): + parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for ClozeTest-maxmin dataset.') + parser.add_argument('--answers_webquery', '-aw', help="filename of the labels on webquery test set, in txt format.") + parser.add_argument('--predictions_webquery', '-pw', help="filename of the leaderboard predictions on webquery test set, in txt format.") + args = parser.parse_args() + + answers = read_answers(args.answers_webquery) + predictions = read_predictions(args.predictions_webquery) + acc_webquery = calculate_scores(answers, predictions) + # print('NL-code-search-WebQuery on WebQuery test set, acc: {}'.format(acc_webquery)) + print('NL-code-search-WebQuery on WebQuery test set:') + print(acc_webquery) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/Text-code/NL-code-search-WebQuery/code/finetune.sh b/Text-code/NL-code-search-WebQuery/code/finetune.sh new file mode 100644 index 0000000000000000000000000000000000000000..8ffd41bde351f9084489acd889c62866df3a28e4 --- /dev/null +++ b/Text-code/NL-code-search-WebQuery/code/finetune.sh @@ -0,0 +1,18 @@ +CUDA_VISIBLE_DEVICES=0,1 python run.py \ +--model_type roberta \ +--do_train \ +--do_eval \ +--eval_all_checkpoints \ +--train_file cosqa-train.json \ +--dev_file cosqa-dev.json \ +--max_seq_length 200 \ +--per_gpu_train_batch_size 16 \ +--per_gpu_eval_batch_size 16 \ +--learning_rate 1e-5 \ +--num_train_epochs 3 \ +--gradient_accumulation_steps 1 \ +--warmup_steps 5000 \ +--evaluate_during_training \ +--data_dir ../data/CoSQA/ \ +--output_dir ../model/model_cosqa_continue_training \ +--encoder_name_or_path ./model_codesearchnet/checkpoint-best-aver \ No newline at end of file diff --git a/Text-code/NL-code-search-WebQuery/code/models.py b/Text-code/NL-code-search-WebQuery/code/models.py new file mode 100644 index 0000000000000000000000000000000000000000..7d3b377dbc4e8ebb3c2cd27ef7b73e82c7600f86 --- /dev/null +++ b/Text-code/NL-code-search-WebQuery/code/models.py @@ -0,0 +1,52 @@ +import torch +import torch.nn as nn +import torch +from torch.autograd import Variable +import copy +# from transformers.modeling_bert import BertLayerNorm +import torch.nn.functional as F +from torch.nn import CrossEntropyLoss, MSELoss +# from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup, +# BertConfig, BertForMaskedLM, BertTokenizer, +# GPT2Config, GPT2LMHeadModel, GPT2Tokenizer, +# OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, +# RobertaConfig, RobertaModel, RobertaTokenizer, +# DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer) +from transformers.modeling_utils import PreTrainedModel + + +class Model(PreTrainedModel): + def __init__(self, encoder, config, tokenizer, args): + super(Model, self).__init__(config) + self.encoder = encoder + self.config = config + self.tokenizer = tokenizer + self.mlp = nn.Sequential(nn.Linear(768*4, 768), + nn.Tanh(), + nn.Linear(768, 1), + nn.Sigmoid()) + self.loss_func = nn.BCELoss() + self.args = args + + def forward(self, code_inputs, nl_inputs, labels, return_vec=False, do_my_test=False): + bs = code_inputs.shape[0] + inputs = torch.cat((code_inputs, nl_inputs), 0) + encoder_output = self.encoder(inputs, attention_mask=inputs.ne(1)) + outputs = encoder_output[1] + + code_vec = outputs[:bs] + nl_vec = outputs[bs:] + + code_feature = encoder_output.pooler_output[:bs] + nl_feature = encoder_output.pooler_output[bs:] + + if return_vec: + return code_vec, nl_vec + logits = self.mlp(torch.cat((nl_vec, code_vec, nl_vec-code_vec, nl_vec*code_vec), 1)) + loss = self.loss_func(logits, labels.float().unsqueeze(1)) + if do_my_test: + return loss, code_feature, nl_feature + predictions = (logits > 0.5).int() # (Batch, ) + # predictions = logits.float() + return loss, predictions + diff --git a/Text-code/NL-code-search-WebQuery/code/run.py b/Text-code/NL-code-search-WebQuery/code/run.py new file mode 100644 index 0000000000000000000000000000000000000000..6dec0db0a774268a80edddc42d1866ec974ca6fe --- /dev/null +++ b/Text-code/NL-code-search-WebQuery/code/run.py @@ -0,0 +1,592 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import, division, print_function + +import argparse +import glob +import logging +import os +import random +import pickle + +import numpy as np +import torch +from torch.utils.data import DataLoader, SequentialSampler, RandomSampler, TensorDataset +from torch.utils.data.distributed import DistributedSampler +try: + from torch.utils.tensorboard import SummaryWriter +except: + from tensorboardX import SummaryWriter +from tqdm import tqdm, trange + +from transformers import (WEIGHTS_NAME, get_linear_schedule_with_warmup, AdamW, + RobertaConfig, + RobertaModel, + RobertaTokenizer) + +from models import Model +from utils import acc_and_f1, TextDataset +import multiprocessing +cpu_cont = multiprocessing.cpu_count() + +logger = logging.getLogger(__name__) + +MODEL_CLASSES = {'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer)} + + +def set_seed(seed=42): + random.seed(seed) + os.environ['PYHTONHASHSEED'] = str(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.backends.cudnn.deterministic = True + + +def train(args, train_dataset, model, tokenizer): + """ Train the model """ + # if args.local_rank in [-1, 0]: + # tb_writer = SummaryWriter() + + args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) + train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) + train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, num_workers=4, pin_memory=True) + + args.save_steps = len(train_dataloader) if args.save_steps<=0 else args.save_steps + args.warmup_steps = len(train_dataloader) if args.warmup_steps<=0 else args.warmup_steps + args.logging_steps = len(train_dataloader) + + if args.max_steps > 0: + t_total = args.max_steps + args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + else: + t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs + + # Prepare optimizer and schedule (linear warmup and decay) + no_decay = ['bias', 'LayerNorm.weight'] + optimizer_grouped_parameters = [ + {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], + 'weight_decay': args.weight_decay}, + {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} + ] + optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) + scheduler = get_linear_schedule_with_warmup(optimizer, args.warmup_steps, t_total) + + model.to(args.device) + if args.fp16: + try: + from apex import amp + except ImportError: + raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") + model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) + + # multi-gpu training (should be after apex fp16 initialization) + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + # Distributed training (should be after apex fp16 initialization) + if args.local_rank != -1: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], + output_device=args.local_rank, + find_unused_parameters=True) + + checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last') + scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt') + if os.path.exists(scheduler_last): + scheduler.load_state_dict(torch.load(scheduler_last)) + + # Train! + logger.info("***** Running training *****") + logger.info(" Num examples = %d", len(train_dataset)) + logger.info(" Num Epochs = %d", args.num_train_epochs) + logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) + logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", + args.train_batch_size * args.gradient_accumulation_steps * ( + torch.distributed.get_world_size() if args.local_rank != -1 else 1)) + logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) + logger.info(" Total optimization steps = %d", t_total) + + global_step = args.start_step + tr_loss, logging_loss, avg_loss, tr_nb, tr_num, train_loss = 0.0, 0.0, 0.0, 0, 0, 0 + best_results = {"acc": 0.0, "precision": 0.0, "recall": 0.0, "f1": 0.0, "acc_and_f1": 0.0} + model.zero_grad() + train_iterator = trange(args.start_epoch, int(args.num_train_epochs), desc="Epoch", + disable=args.local_rank not in [-1, 0]) + model.train() + logger.info(model) + + for idx in train_iterator: + bar = tqdm(enumerate(train_dataloader)) + tr_num=0 + train_loss=0 + for step, batch in bar: + + code_inputs = batch[0].to(args.device) + nl_inputs = batch[1].to(args.device) + labels = batch[2].to(args.device) + loss, predictions = model(code_inputs, nl_inputs, labels) + + if args.n_gpu > 1: + loss = loss.mean() # mean() to average on multi-gpu parallel training + if args.gradient_accumulation_steps > 1: + loss = loss / args.gradient_accumulation_steps + + if args.fp16: + try: + from apex import amp + except ImportError: + raise ImportError( + "Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) + else: + loss.backward() + torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) + + tr_loss += loss.item() + tr_num += 1 + train_loss += loss.item() + if avg_loss == 0: + avg_loss = tr_loss + avg_loss = round(train_loss/tr_num, 5) + bar.set_description("epoch {} step {} loss {}".format(idx, step+1, avg_loss)) + + if (step + 1) % args.gradient_accumulation_steps == 0: + optimizer.step() + optimizer.zero_grad() + scheduler.step() + global_step += 1 + avg_loss = round(np.exp((tr_loss - logging_loss) / (global_step - tr_nb)), 4) + if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: + logging_loss = tr_loss + tr_nb = global_step + + if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: + + if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well + results = evaluate(args, model, tokenizer, eval_when_training=True) + for key, value in results.items(): + logger.info(" %s = %s", key, round(value,4)) + # Save model checkpoint + if results['acc_and_f1'] >= best_results['acc_and_f1']: + best_results = results + + # save + checkpoint_prefix = 'checkpoint-best-aver' + output_dir = os.path.join(args.output_dir, checkpoint_prefix) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training + + torch.save(model_to_save.state_dict(), os.path.join(output_dir, 'pytorch_model.bin')) + tokenizer.save_pretrained(output_dir) + torch.save(args, os.path.join(output_dir, 'training_{}.bin'.format(idx))) + logger.info("Saving model checkpoint to %s", output_dir) + torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) + torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) + logger.info("Saving optimizer and scheduler states to %s", output_dir) + + if args.local_rank == -1: + checkpoint_prefix = 'checkpoint-last' + output_dir = os.path.join(args.output_dir, checkpoint_prefix) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + model_to_save = model.module if hasattr(model, 'module') else model + torch.save(model_to_save.state_dict(), os.path.join(output_dir, 'pytorch_model.bin')) + tokenizer.save_pretrained(output_dir) + + idx_file = os.path.join(output_dir, 'idx_file.txt') + with open(idx_file, 'w', encoding='utf-8') as idxf: + idxf.write(str(args.start_epoch + idx) + '\n') + logger.info("Saving model checkpoint to %s", output_dir) + torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) + torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) + logger.info("Saving optimizer and scheduler states to %s", output_dir) + step_file = os.path.join(output_dir, 'step_file.txt') + with open(step_file, 'w', encoding='utf-8') as stepf: + stepf.write(str(global_step) + '\n') + + if args.max_steps > 0 and global_step > args.max_steps: + train_iterator.close() + break + + # 每一轮记录checkpoint + output_dir = os.path.join(args.output_dir, 'epoch_{}'.format(idx+1)) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + model_to_save = model.module if hasattr(model, 'module') else model + ckpt_output_path = os.path.join(output_dir, 'subject_model.pth') + logger.info("Saving model checkpoint to %s", ckpt_output_path) + torch.save(model_to_save.state_dict(), ckpt_output_path) + # 每一轮记录表征 + # logger.info("Saving training feature") + # train_dataloader_bs1 = DataLoader(train_dataset, sampler=train_sampler, batch_size=1, num_workers=4, + # pin_memory=True) + # code_feature, nl_feature = [], [] + # for batch in tqdm(train_dataloader_bs1): + # code_inputs = batch[0].to(args.device) + # nl_inputs = batch[1].to(args.device) + # labels = batch[2].to(args.device) + # model.eval() + # with torch.no_grad(): + # _, cf, nf = model(code_inputs=code_inputs, nl_inputs=nl_inputs, labels=labels, do_my_test=True) + # code_feature.append(cf.cpu().detach().numpy()) + # nl_feature.append(nf.cpu().detach().numpy()) + # code_feature_output_path = os.path.join(output_dir, 'code_feature.pkl') + # nl_feature_output_path = os.path.join(output_dir, 'nl_feature.pkl') + # with open(code_feature_output_path, 'wb') as f1, open(nl_feature_output_path, 'wb') as f2: + # pickle.dump(code_feature, f1) + # pickle.dump(code_feature, f2) + +def evaluate(args, model, tokenizer,eval_when_training=False): + eval_output_dir = args.output_dir + eval_data_path = os.path.join(args.data_dir, args.dev_file) + eval_dataset = TextDataset(tokenizer, args, eval_data_path, type='eval') + + if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: + os.makedirs(eval_output_dir) + + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + # Note that DistributedSampler samples randomly + eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) + eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, num_workers=4, pin_memory=True) + + # multi-gpu evaluate + if args.n_gpu > 1 and eval_when_training is False: + model = torch.nn.DataParallel(model) + + # Eval! + logger.info("***** Running evaluation *****") + logger.info(" Num examples = %d", len(eval_dataset)) + logger.info(" Batch size = %d", args.eval_batch_size) + eval_loss = 0.0 + nb_eval_steps = 0 + model.eval() + all_predictions = [] + all_labels = [] + for batch in eval_dataloader: + code_inputs = batch[0].to(args.device) + nl_inputs = batch[1].to(args.device) + labels = batch[2].to(args.device) + with torch.no_grad(): + lm_loss, predictions = model(code_inputs, nl_inputs, labels) + # lm_loss,code_vec,nl_vec = model(code_inputs,nl_inputs) + eval_loss += lm_loss.mean().item() + all_predictions.append(predictions.cpu()) + all_labels.append(labels.cpu()) + nb_eval_steps += 1 + all_predictions = torch.cat(all_predictions, 0).squeeze().numpy() + all_labels = torch.cat(all_labels, 0).squeeze().numpy() + eval_loss = torch.tensor(eval_loss / nb_eval_steps) + + results = acc_and_f1(all_predictions, all_labels) + results.update({"eval_loss": float(eval_loss)}) + return results + + +def test(args, model, tokenizer): + if not args.prediction_file: + args.prediction_file = os.path.join(args.output_dir, 'predictions.txt') + if not os.path.exists(os.path.dirname(args.prediction_file)): + os.makedirs(os.path.dirname(args.prediction_file)) + if not args.answer_file: + args.answer_file = os.path.join(args.output_dir, 'golds.txt') + if not os.path.exists(os.path.dirname(args.answer_file)): + os.makedirs(os.path.dirname(args.answer_file)) + + test_data_path = os.path.join(args.data_dir, args.test_file) + eval_dataset = TextDataset(tokenizer, args, test_data_path) #, type='test') + + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + # Note that DistributedSampler samples randomly + eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) + eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) + + # multi-gpu evaluate + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + # Eval! + logger.info("***** Running Test *****") + logger.info(" Num examples = %d", len(eval_dataset)) + logger.info(" Batch size = %d", args.eval_batch_size) + + nb_eval_steps = 0 + all_predictions = [] + all_golds = [] + for batch in eval_dataloader: + code_inputs = batch[0].to(args.device) + nl_inputs = batch[1].to(args.device) + labels = batch[2].to(args.device) + with torch.no_grad(): + _, predictions = model(code_inputs, nl_inputs, labels) + all_predictions.append(predictions.cpu()) + all_golds.append(labels.cpu()) + nb_eval_steps += 1 + all_predictions = torch.cat(all_predictions, 0).squeeze().numpy() + all_golds = torch.cat(all_golds, 0).squeeze().numpy() + + logger.info("***** Saving Test Result *****") + with open(args.prediction_file,'w') as f: + for example, pred in zip(eval_dataset.examples, all_predictions.tolist()): + f.write(str(example.idx)+'\t'+str(int(pred))+'\n') + with open(args.answer_file,'w') as f: + for example, gold in zip(eval_dataset.examples, all_golds.tolist()): + f.write(str(example.idx)+'\t'+str(int(gold))+'\n') + + +def check_feature(): + code_feature = pickle.load(file=open('model_codesearchnet/checkpoint-all/epoch_0/code_feature.pkl', 'rb')) + print(len(code_feature)) + print(code_feature[0].shape) + + +def main(): + parser = argparse.ArgumentParser() + + ## Required parameters + parser.add_argument("--data_dir", default=None, type=str, required=True, + help="The input data dir. Should contain the .tsv files (or other data files) for the task.") + parser.add_argument("--train_file", default=None, type=str, + help="The input training data file (a text file).") + parser.add_argument("--output_dir", default=None, type=str, required=True, + help="The output directory where the model predictions and checkpoints will be written.") + + ## Other parameters + parser.add_argument("--dev_file", default=None, type=str, + help="An optional input evaluation data file to evaluate the perplexity on (a text file).") + parser.add_argument("--test_file", default=None, type=str, + help="An optional input evaluation data file to evaluate the perplexity on (a text file).") + + parser.add_argument("--model_type", default="roberta", type=str, + help="The model architecture to be fine-tuned.") + parser.add_argument("--pn_weight", type=float, default=1.0, + help="Ratio of positive examples in the sum of bce loss") + parser.add_argument("--encoder_name_or_path", default=None, type=str, + help="The model checkpoint for weights initialization.") + parser.add_argument("--checkpoint_path", default=None, type=str, + help="The checkpoint path of model to continue training.") + + parser.add_argument("--mlm", action='store_true', + help="Train with masked-language modeling loss instead of language modeling.") + parser.add_argument("--mlm_probability", type=float, default=0.15, + help="Ratio of tokens to mask for masked language modeling loss") + + parser.add_argument("--config_name", default="", type=str, + help="Pretrained config name or path if not the same as model_name") + parser.add_argument("--tokenizer_name", default="", type=str, + help="Pretrained tokenizer name or path if not the same as model_name") + parser.add_argument("--cache_dir", default="", type=str, + help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)") + parser.add_argument("--max_seq_length", default=-1, type=int, + help="Optional input sequence length after tokenization." + "The training dataset will be truncated in block of this size for training." + "Default to the model max input length for single sentence inputs (take into account special tokens).") + parser.add_argument("--do_train", action='store_true', + help="Whether to run training.") + parser.add_argument("--do_eval", action='store_true', + help="Whether to run eval on the dev set.") + parser.add_argument("--do_predict", action='store_true', + help="Whether to run predict on the test set.") + parser.add_argument("--evaluate_during_training", action='store_true', + help="Rul evaluation during training at each logging step.") + parser.add_argument("--do_lower_case", action='store_true', + help="Set this flag if you are using an uncased model.") + + parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, + help="Batch size per GPU/CPU for training.") + parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int, + help="Batch size per GPU/CPU for evaluation.") + parser.add_argument('--gradient_accumulation_steps', type=int, default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.") + parser.add_argument("--learning_rate", default=5e-5, type=float, + help="The initial learning rate for Adam.") + parser.add_argument("--weight_decay", default=0.0, type=float, + help="Weight deay if we apply some.") + parser.add_argument("--adam_epsilon", default=1e-8, type=float, + help="Epsilon for Adam optimizer.") + parser.add_argument("--max_grad_norm", default=1.0, type=float, + help="Max gradient norm.") + parser.add_argument("--num_train_epochs", default=3, type=int, + help="Total number of training epochs to perform.") + parser.add_argument("--max_steps", default=-1, type=int, + help="If > 0: set total number of training steps to perform. Override num_train_epochs.") + parser.add_argument("--warmup_steps", default=0, type=int, + help="Linear warmup over warmup_steps.") + + parser.add_argument('--logging_steps', type=int, default=50, + help="Log every X updates steps.") + parser.add_argument('--save_steps', type=int, default=0, + help="Save checkpoint every X updates steps.") + parser.add_argument('--save_total_limit', type=int, default=None, + help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default') + parser.add_argument("--eval_all_checkpoints", action='store_true', + help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") + parser.add_argument("--no_cuda", action='store_true', + help="Avoid using CUDA when available") + parser.add_argument('--overwrite_output_dir', action='store_true', + help="Overwrite the content of the output directory") + parser.add_argument('--overwrite_cache', action='store_true', + help="Overwrite the cached training and evaluation sets") + parser.add_argument('--seed', type=int, default=42, + help="random seed for initialization") + + parser.add_argument('--fp16', action='store_true', + help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") + parser.add_argument('--fp16_opt_level', type=str, default='O1', + help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." + "See details at https://nvidia.github.io/apex/amp.html") + parser.add_argument("--local_rank", type=int, default=-1, + help="For distributed training: local_rank") + parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.") + parser.add_argument('--server_port', type=str, default='', help="For distant debugging.") + parser.add_argument("--pred_model_dir", default=None, type=str, + help='model for prediction') + parser.add_argument("--test_result_dir", default='test_results.tsv', type=str, + help='path to store test result') + parser.add_argument("--prediction_file", default=None, type=str, + help='path to save predictions result, note to specify task name') + parser.add_argument("--answer_file", default=None, type=str, + help='path to save gold result, note to specify task name') + + args = parser.parse_args() + + # Setup distant debugging if needed + if args.server_ip and args.server_port: + # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script + import ptvsd + print("Waiting for debugger attach") + ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) + ptvsd.wait_for_attach() + + # Setup CUDA, GPU & distributed training + if args.local_rank == -1 or args.no_cuda: + device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + args.n_gpu = torch.cuda.device_count() + else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs + torch.cuda.set_device(args.local_rank) + device = torch.device("cuda", args.local_rank) + torch.distributed.init_process_group(backend='nccl') + args.n_gpu = 1 + args.device = device + + # Setup logging + logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', + datefmt='%m/%d/%Y %H:%M:%S', + level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN) + logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", + args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16) + + # Set seed + set_seed(args.seed) + + # Load pretrained model and tokenizer + if args.local_rank not in [-1, 0]: + torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab + + args.start_epoch = 0 + args.start_step = 0 + checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last') + if os.path.exists(checkpoint_last) and os.listdir(checkpoint_last): + # args.encoder_name_or_path = os.path.join(checkpoint_last, 'pytorch_model.bin') + args.config_name = os.path.join(checkpoint_last, 'config.json') + idx_file = os.path.join(checkpoint_last, 'idx_file.txt') + with open(idx_file, encoding='utf-8') as idxf: + args.start_epoch = int(idxf.readlines()[0].strip()) + 1 + + step_file = os.path.join(checkpoint_last, 'step_file.txt') + if os.path.exists(step_file): + with open(step_file, encoding='utf-8') as stepf: + args.start_step = int(stepf.readlines()[0].strip()) + + logger.info("reload model from {}, resume from {} epoch".format(checkpoint_last, args.start_epoch)) + + + + config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] + config = config_class.from_pretrained(args.config_name if args.config_name else args.encoder_name_or_path, + cache_dir=args.cache_dir if args.cache_dir else None) + config.num_labels = 2 + tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.encoder_name_or_path, + do_lower_case=args.do_lower_case, + cache_dir=args.cache_dir if args.cache_dir else None) + if args.max_seq_length <= 0: + args.max_seq_length = tokenizer.max_len_single_sentence # Our input block size will be the max possible for the model + args.max_seq_length = min(args.max_seq_length, tokenizer.max_len_single_sentence) + if args.encoder_name_or_path: + model = model_class.from_pretrained(args.encoder_name_or_path, + from_tf=bool('.ckpt' in args.encoder_name_or_path), + config=config, + cache_dir=args.cache_dir if args.cache_dir else None) + else: + model = model_class(config) + + model = Model(model, config, tokenizer, args) + + if args.checkpoint_path: + model.load_state_dict(torch.load(os.path.join(args.checkpoint_path, 'pytorch_model.bin'))) + if args.local_rank == 0: + torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab + + logger.info("Training/evaluation parameters %s", args) + + # Training + if args.do_train: + if args.local_rank not in [-1, 0]: + torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache + train_data_path = os.path.join(args.data_dir, args.train_file) + train_dataset = TextDataset(tokenizer, args, train_data_path, type='train') + train(args, train_dataset, model, tokenizer) + + # Evaluation + results = {} + if args.do_eval and args.local_rank in [-1, 0]: + checkpoint_prefix = 'checkpoint-best-aver' + output_dir = os.path.join(args.output_dir, checkpoint_prefix) + model.load_state_dict(torch.load(os.path.join(output_dir, 'pytorch_model.bin'))) + tokenizer = tokenizer.from_pretrained(output_dir) + model.to(args.device) + results = evaluate(args, model, tokenizer) + logger.info("***** Eval results *****") + for key in results.keys(): + logger.info(" Eval %s = %s", key, str(results[key])) + logger.info("Eval Model From: {}".format(os.path.join(output_dir, 'pytorch_model.bin'))) + logger.info("***** Eval results *****") + + if args.do_predict and args.local_rank in [-1, 0]: + logger.info("***** Testing results *****") + checkpoint_prefix = 'checkpoint-best-aver' + if checkpoint_prefix not in args.output_dir and \ + os.path.exists(os.path.join(args.output_dir, checkpoint_prefix)): + output_dir = os.path.join(args.output_dir, checkpoint_prefix) + else: + output_dir = args.output_dir + if not args.pred_model_dir: + model_path = os.path.join(output_dir, 'pytorch_model.bin') + else: + model_path = os.path.join(args.pred_model_dir, 'pytorch_model.bin') + model.load_state_dict(torch.load(model_path)) + tokenizer = tokenizer.from_pretrained(output_dir) + model.to(args.device) + test(args, model, tokenizer) + logger.info("Test Model From: {}".format(model_path)) + return results + + +if __name__ == "__main__": + main() diff --git a/Text-code/NL-code-search-WebQuery/code/train.sh b/Text-code/NL-code-search-WebQuery/code/train.sh new file mode 100644 index 0000000000000000000000000000000000000000..e2c6216c7072b65d7c5bb57b653c8e84f57a95e4 --- /dev/null +++ b/Text-code/NL-code-search-WebQuery/code/train.sh @@ -0,0 +1,18 @@ +CUDA_VISIBLE_DEVICES=0,1 python run.py \ +--model_type roberta \ +--do_train \ +--do_eval \ +--eval_all_checkpoints \ +--train_file train_codesearchnet_7.json \ +--dev_file dev_codesearchnet.json \ +--max_seq_length 200 \ +--per_gpu_train_batch_size 16 \ +--per_gpu_eval_batch_size 16 \ +--learning_rate 1e-5 \ +--num_train_epochs 3 \ +--gradient_accumulation_steps 1 \ +--warmup_steps 1000 \ +--evaluate_during_training \ +--data_dir ../data/CodeSearchNet/ \ +--output_dir ../model/model_codesearchnet \ +--encoder_name_or_path microsoft/codebert-base \ No newline at end of file diff --git a/Text-code/NL-code-search-WebQuery/code/utils.py b/Text-code/NL-code-search-WebQuery/code/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0ef1a905cc574152995712b9ebd2c3b7994027da --- /dev/null +++ b/Text-code/NL-code-search-WebQuery/code/utils.py @@ -0,0 +1,136 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" BERT classification fine-tuning: utilities to work with GLUE tasks """ + +from __future__ import absolute_import, division, print_function + +import csv +import json +import logging +import os +import sys +from io import open +from sklearn.metrics import f1_score, precision_score, recall_score +from torch.utils.data import Dataset +import torch + +csv.field_size_limit(sys.maxsize) +logger = logging.getLogger(__name__) + + +class InputFeatures(object): + """A single training/test features for a example.""" + def __init__(self, code_tokens, code_ids, nl_tokens, nl_ids, label, idx): + self.code_tokens = code_tokens + self.code_ids = code_ids + self.nl_tokens = nl_tokens + self.nl_ids = nl_ids + self.label = label + self.idx = idx + + +class InputFeaturesTriplet(InputFeatures): + """A single training/test features for a example. Add docstring seperately. """ + def __init__(self, code_tokens, code_ids, nl_tokens, nl_ids, ds_tokens, ds_ids, label, idx): + super(InputFeaturesTriplet, self).__init__(code_tokens, code_ids, nl_tokens, nl_ids, label, idx) + self.ds_tokens = ds_tokens + self.ds_ids = ds_ids + + +def convert_examples_to_features(js, tokenizer, args): + # label + label = js['label'] + + # code + code = js['code'] + code_tokens = tokenizer.tokenize(code)[:args.max_seq_length-2] + code_tokens = [tokenizer.cls_token]+code_tokens+[tokenizer.sep_token] + code_ids = tokenizer.convert_tokens_to_ids(code_tokens) + padding_length = args.max_seq_length - len(code_ids) + code_ids += [tokenizer.pad_token_id]*padding_length + + nl = js['doc'] # query + nl_tokens = tokenizer.tokenize(nl)[:args.max_seq_length-2] + nl_tokens = [tokenizer.cls_token]+nl_tokens+[tokenizer.sep_token] + nl_ids = tokenizer.convert_tokens_to_ids(nl_tokens) + padding_length = args.max_seq_length - len(nl_ids) + nl_ids += [tokenizer.pad_token_id]*padding_length + + return InputFeatures(code_tokens, code_ids, nl_tokens, nl_ids, label, js['idx']) + + +class TextDataset(Dataset): + def __init__(self, tokenizer, args, file_path=None, type=None): + # json file: dict: idx, query, doc, code + self.examples = [] + self.type = type + data=[] + with open(file_path, 'r') as f: + data = json.load(f) + # data = data[:114560] + if self.type == 'test': + for js in data: + js['label'] = 0 + for js in data: + self.examples.append(convert_examples_to_features(js, tokenizer, args)) + if 'train' in file_path: + for idx, example in enumerate(self.examples[:3]): + logger.info("*** Example ***") + logger.info("idx: {}".format(idx)) + logger.info("code_tokens: {}".format([x.replace('\u0120','_') for x in example.code_tokens])) + logger.info("code_ids: {}".format(' '.join(map(str, example.code_ids)))) + logger.info("nl_tokens: {}".format([x.replace('\u0120','_') for x in example.nl_tokens])) + logger.info("nl_ids: {}".format(' '.join(map(str, example.nl_ids)))) + + def __len__(self): + return len(self.examples) + + def __getitem__(self, i): + """ return both tokenized code ids and nl ids and label""" + return torch.tensor(self.examples[i].code_ids), \ + torch.tensor(self.examples[i].nl_ids),\ + torch.tensor(self.examples[i].label) + + + + +def simple_accuracy(preds, labels): + return (preds == labels).mean() + + +def acc_and_f1(preds, labels): + acc = simple_accuracy(preds, labels) + f1 = f1_score(y_true=labels, y_pred=preds) + prec = precision_score(y_true=labels, y_pred=preds) + reca = recall_score(y_true=labels, y_pred=preds) + return { + "acc": acc, + "precision": prec, + "recall": reca, + "f1": f1, + "acc_and_f1": (acc + f1) / 2, + } + + +def compute_metrics(task_name, preds, labels): + assert len(preds) == len(labels) + if task_name == "webquery": + return acc_and_f1(preds, labels) + if task_name == "staqc": + return acc_and_f1(preds, labels) + else: + raise KeyError(task_name) + diff --git a/Text-code/NL-code-search-WebQuery/data.zip b/Text-code/NL-code-search-WebQuery/data.zip new file mode 100644 index 0000000000000000000000000000000000000000..37a923e4415faae2179ebfd73d12eec517ea1bd8 --- /dev/null +++ b/Text-code/NL-code-search-WebQuery/data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24e8ea547211a3830aa3e7ba1246aaf01d6ef6b875f7c2a5e8b4fb0af5bca24b +size 361434377 diff --git a/Text-code/text-to-code/code/beam.py b/Text-code/text-to-code/code/beam.py new file mode 100644 index 0000000000000000000000000000000000000000..4b493e55626cd6cb1ab14d099691deb868e32992 --- /dev/null +++ b/Text-code/text-to-code/code/beam.py @@ -0,0 +1,118 @@ +import torch +import torch.nn as nn +import torch +from torch.autograd import Variable +import copy + +class Beam(object): + def __init__(self, size,sos,eos): + self.size = size + self.tt = torch.cuda + # The score for each translation on the beam. + self.scores = self.tt.FloatTensor(size).zero_() + # The backpointers at each time-step. + self.prevKs = [] + # The outputs at each time-step. + self.nextYs = [self.tt.LongTensor(size) + .fill_(0)] + self.nextYs[0][:] = sos + # Has EOS topped the beam yet. + self._eos = eos + self.eosTop = False + # Time and k pair for finished. + self.finished = [] + + def getCurrentState(self): + "Get the outputs for the current timestep." + batch = self.tt.LongTensor(self.nextYs[-1]).view(-1, 1) + return batch + + def getCurrentOrigin(self): + "Get the backpointers for the current timestep." + return self.prevKs[-1] + + def advance(self, wordLk): + """ + Given prob over words for every last beam `wordLk` and attention + `attnOut`: Compute and update the beam search. + + Parameters: + + * `wordLk`- probs of advancing from the last step (K x words) + * `attnOut`- attention at the last step + + Returns: True if beam search is complete. + """ + numWords = wordLk.size(1) + + # Sum the previous scores. + if len(self.prevKs) > 0: + beamLk = wordLk + self.scores.unsqueeze(1).expand_as(wordLk) + + # Don't let EOS have children. + for i in range(self.nextYs[-1].size(0)): + if self.nextYs[-1][i] == self._eos: + beamLk[i] = -1e20 + else: + beamLk = wordLk[0] + flatBeamLk = beamLk.view(-1) + bestScores, bestScoresId = flatBeamLk.topk(self.size, 0, True, True) + + self.scores = bestScores + + # bestScoresId is flattened beam x word array, so calculate which + # word and beam each score came from + prevK = bestScoresId // numWords + self.prevKs.append(prevK) + self.nextYs.append((bestScoresId - prevK * numWords)) + + + for i in range(self.nextYs[-1].size(0)): + if self.nextYs[-1][i] == self._eos: + s = self.scores[i] + self.finished.append((s, len(self.nextYs) - 1, i)) + + # End condition is when top-of-beam is EOS and no global score. + if self.nextYs[-1][0] == self._eos: + self.eosTop = True + + def done(self): + return self.eosTop and len(self.finished) >=self.size + + def getFinal(self): + if len(self.finished) == 0: + self.finished.append((self.scores[0], len(self.nextYs) - 1, 0)) + self.finished.sort(key=lambda a: -a[0]) + if len(self.finished) != self.size: + unfinished=[] + for i in range(self.nextYs[-1].size(0)): + if self.nextYs[-1][i] != self._eos: + s = self.scores[i] + unfinished.append((s, len(self.nextYs) - 1, i)) + unfinished.sort(key=lambda a: -a[0]) + self.finished+=unfinished[:self.size-len(self.finished)] + return self.finished[:self.size] + + def getHyp(self, beam_res): + """ + Walk back to construct the full hypothesis. + """ + hyps=[] + for _,timestep, k in beam_res: + hyp = [] + for j in range(len(self.prevKs[:timestep]) - 1, -1, -1): + hyp.append(self.nextYs[j+1][k]) + k = self.prevKs[j][k] + hyps.append(hyp[::-1]) + return hyps + + def buildTargetTokens(self, preds): + sentence=[] + for pred in preds: + tokens = [] + for tok in pred: + if tok==self._eos: + break + tokens.append(tok) + sentence.append(tokens) + return sentence diff --git a/Text-code/text-to-code/code/bleu.py b/Text-code/text-to-code/code/bleu.py new file mode 100644 index 0000000000000000000000000000000000000000..47e1335796082b5568089150d7799d37c0527ada --- /dev/null +++ b/Text-code/text-to-code/code/bleu.py @@ -0,0 +1,134 @@ +# Copyright 2017 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Python implementation of BLEU and smooth-BLEU. + +This module provides a Python implementation of BLEU and smooth-BLEU. +Smooth BLEU is computed following the method outlined in the paper: +Chin-Yew Lin, Franz Josef Och. ORANGE: a method for evaluating automatic +evaluation metrics for machine translation. COLING 2004. +""" + +import collections +import math + + +def _get_ngrams(segment, max_order): + """Extracts all n-grams upto a given maximum order from an input segment. + + Args: + segment: text segment from which n-grams will be extracted. + max_order: maximum length in tokens of the n-grams returned by this + methods. + + Returns: + The Counter containing all n-grams upto max_order in segment + with a count of how many times each n-gram occurred. + """ + ngram_counts = collections.Counter() + for order in range(1, max_order + 1): + for i in range(0, len(segment) - order + 1): + ngram = tuple(segment[i:i+order]) + ngram_counts[ngram] += 1 + return ngram_counts + + +def compute_bleu(reference_corpus, translation_corpus, max_order=4, + smooth=False): + """Computes BLEU score of translated segments against one or more references. + + Args: + reference_corpus: list of lists of references for each translation. Each + reference should be tokenized into a list of tokens. + translation_corpus: list of translations to score. Each translation + should be tokenized into a list of tokens. + max_order: Maximum n-gram order to use when computing BLEU score. + smooth: Whether or not to apply Lin et al. 2004 smoothing. + + Returns: + 3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram + precisions and brevity penalty. + """ + matches_by_order = [0] * max_order + possible_matches_by_order = [0] * max_order + reference_length = 0 + translation_length = 0 + for (references, translation) in zip(reference_corpus, + translation_corpus): + reference_length += min(len(r) for r in references) + translation_length += len(translation) + + merged_ref_ngram_counts = collections.Counter() + for reference in references: + merged_ref_ngram_counts |= _get_ngrams(reference, max_order) + translation_ngram_counts = _get_ngrams(translation, max_order) + overlap = translation_ngram_counts & merged_ref_ngram_counts + for ngram in overlap: + matches_by_order[len(ngram)-1] += overlap[ngram] + for order in range(1, max_order+1): + possible_matches = len(translation) - order + 1 + if possible_matches > 0: + possible_matches_by_order[order-1] += possible_matches + + precisions = [0] * max_order + for i in range(0, max_order): + if smooth: + precisions[i] = ((matches_by_order[i] + 1.) / + (possible_matches_by_order[i] + 1.)) + else: + if possible_matches_by_order[i] > 0: + precisions[i] = (float(matches_by_order[i]) / + possible_matches_by_order[i]) + else: + precisions[i] = 0.0 + + if min(precisions) > 0: + p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions) + geo_mean = math.exp(p_log_sum) + else: + geo_mean = 0 + + ratio = float(translation_length) / reference_length + + if ratio > 1.0: + bp = 1. + else: + bp = math.exp(1 - 1. / ratio) + + bleu = geo_mean * bp + + return (bleu, precisions, bp, ratio, translation_length, reference_length) + + +def _bleu(ref_file, trans_file, subword_option=None): + max_order = 4 + smooth = True + ref_files = [ref_file] + reference_text = [] + for reference_filename in ref_files: + with open(reference_filename) as fh: + reference_text.append(fh.readlines()) + per_segment_references = [] + for references in zip(*reference_text): + reference_list = [] + for reference in references: + reference_list.append(reference.strip().split()) + per_segment_references.append(reference_list) + translations = [] + with open(trans_file) as fh: + for line in fh: + translations.append(line.strip().split()) + bleu_score, _, _, _, _, _ = compute_bleu(per_segment_references, translations, max_order, smooth) + return round(100 * bleu_score,2) \ No newline at end of file diff --git a/Text-code/text-to-code/code/dataset.py b/Text-code/text-to-code/code/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..e2b0db54519397e03a5085fe76fcc0e0b7ec9526 --- /dev/null +++ b/Text-code/text-to-code/code/dataset.py @@ -0,0 +1,116 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +from __future__ import absolute_import, division, print_function + +import argparse +import glob +import logging +import os +import pickle +import random +import re +import gc +import shutil +import json + +import numpy as np +import torch +from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset +from torch.utils.data.distributed import DistributedSampler + +try: + from torch.utils.tensorboard import SummaryWriter +except: + from tensorboardX import SummaryWriter + +from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup, + BertConfig, BertForMaskedLM, BertTokenizer, + GPT2Config, GPT2LMHeadModel, GPT2Tokenizer, + OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, + RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, + DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer) + + +class concodeDataset(Dataset): + def __init__(self, tokenizer, args, logger, file_type='train', block_size=512, mode='train'): + if args.local_rank==-1: + local_rank=0 + world_size=1 + else: + local_rank=args.local_rank + world_size=torch.distributed.get_world_size() + + self.block_size = block_size + self.mode = mode + + if not os.path.exists(args.output_dir): + os.makedirs(args.output_dir) + cached_file = os.path.join(args.output_dir, file_type+"_blocksize_%d"%(block_size)+"_wordsize_%d"%(world_size)+"_rank_%d"%(local_rank)) + if mode != 'test' and os.path.exists(cached_file) and not args.overwrite_cache: + if file_type == 'train': + logger.warning("Loading features from cached file %s", cached_file) + with open(cached_file, 'rb') as handle: + data = pickle.load(handle) + self.inputs = data['inputs'] + self.token_labels = data['token_labels'] + + else: + self.inputs = [] + self.token_labels = [] + + datafile = os.path.join(args.data_dir, f"{file_type}.json") + if file_type == 'train': + logger.warning("Creating features from dataset file at %s", datafile) + datas = open(datafile).readlines() + + length = len(datas) + logger.info("Data size: %d"%(length)) + for idx, x in enumerate(datas): + if idx % (length//10) == 0: + percent = idx / (length//10) * 10 + logger.warning("Rank %d, load %d"%(local_rank, percent)) + if idx % world_size != local_rank: + continue + x = json.loads(x) + code = tokenizer.encode(x["code"]) + nl = tokenizer.encode(x["nl"]) + + input_ids, input_labels = self.pad_and_get_mask(code, nl, tokenizer) + self.inputs.append(input_ids) + self.token_labels.append(input_labels) + + if file_type == 'train': + logger.warning("Rank %d Training %d token, %d samples"%(local_rank, length, len(self.inputs))) + logger.warning("Saving features into cached file %s", cached_file) + if mode != 'test': + with open(cached_file, 'wb') as handle: + pickle.dump({'inputs': self.inputs, 'token_labels': self.token_labels}, handle, protocol=pickle.HIGHEST_PROTOCOL) + + def pad_and_get_mask(self, code, nl, tokenizer): + if self.mode == 'test': + code = [] + while (len(code) + len(nl) + 2 > self.block_size): + if (len(code) > len(nl)): + code = code[:-1] + else: + nl = nl[:-1] + if self.mode == 'train': + inputs = nl + [tokenizer.bos_token_id] + code + [tokenizer.eos_token_id] + labels = [1] * len(nl) + [2] * (len(code)+1) + [0] + else: + inputs = nl + [tokenizer.bos_token_id] + labels = [1] * len(nl) + [2] + return inputs, labels + assert len(inputs) <= self.block_size + pad_len = self.block_size - len(inputs) + inputs += [tokenizer.pad_token_id] * pad_len + labels += [0] * pad_len + assert len(inputs) == len(labels) + return inputs, labels + + + def __len__(self): + return len(self.inputs) + + def __getitem__(self, item): + return torch.tensor(self.inputs[item]), torch.tensor(self.token_labels[item]) diff --git a/Text-code/text-to-code/code/eval.sh b/Text-code/text-to-code/code/eval.sh new file mode 100644 index 0000000000000000000000000000000000000000..acefab7a7a72205451a31043313e6638d5c58652 --- /dev/null +++ b/Text-code/text-to-code/code/eval.sh @@ -0,0 +1,17 @@ +LANG=java +DATADIR=../dataset +OUTPUTDIR=../model +PRETRAINDIR=../model/checkpoint-last +LOGFILE=text2code_concode_eval.log + +CUDA_VISIBLE_DEVICES=0 python run.py \ + --data_dir=$DATADIR \ + --langs=$LANG \ + --output_dir=$OUTPUTDIR \ + --pretrain_dir=$PRETRAINDIR \ + --log_file=$LOGFILE \ + --model_type=gpt2 \ + --block_size=512 \ + --do_eval \ + --logging_steps=100 \ + --seed=42 \ No newline at end of file diff --git a/Text-code/text-to-code/code/evaluate.sh b/Text-code/text-to-code/code/evaluate.sh new file mode 100644 index 0000000000000000000000000000000000000000..fae379e830801a3aca3c56ee3f9fd2b259bc157d --- /dev/null +++ b/Text-code/text-to-code/code/evaluate.sh @@ -0,0 +1,3 @@ +python evaluator.py \ +-a=../dataset/dev.json \ +-p=../model/dev.output \ No newline at end of file diff --git a/Text-code/text-to-code/code/evaluator.py b/Text-code/text-to-code/code/evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..ed57ee70de10077b73d206683505dc9ba3e6ea18 --- /dev/null +++ b/Text-code/text-to-code/code/evaluator.py @@ -0,0 +1,42 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT license. +import os +import logging +import argparse +from bleu import _bleu +import json + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + +def main(): + parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for code completion (line level).') + parser.add_argument('--answers', '-a', required=True, help="filename of the labels, in json format.") + parser.add_argument('--predictions', '-p', required=True, help="filename of the leaderboard predictions, in txt format.") + args = parser.parse_args() + + preds = open(args.predictions, "r").readlines() + gts = open(args.answers, "r").readlines() + + assert len(preds) == len(gts), f"Samples of predictions and answers are not equal, {len(preds)}: {len(gts)}" + + total = len(gts) + EM = 0.0 + with open("ground_truth.txt", "w") as wf: + for pred, gt in zip(preds, gts): + pred = pred.strip() + gt = json.loads(gt)["code"] + wf.write(gt+"\n") + if pred.split() == gt.split(): + EM += 1 + + bleu_score = round(_bleu("ground_truth.txt", args.predictions), 2) + logger.info(f"BLEU: {bleu_score}, EM: {round(EM/total*100, 2)}") + + try: + os.remove("ground_truth.txt") + except Exception: + pass + +if __name__ == "__main__": + main() diff --git a/Text-code/text-to-code/code/run.py b/Text-code/text-to-code/code/run.py new file mode 100644 index 0000000000000000000000000000000000000000..98901742228fe36492689af682f13932298c9016 --- /dev/null +++ b/Text-code/text-to-code/code/run.py @@ -0,0 +1,673 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Text to code generation pipeline in CodeXGLUE +""" + +from __future__ import absolute_import, division, print_function + +import argparse +import glob +import logging +import os +import pickle +import random +import re +import shutil +import json + +import numpy as np +import torch +from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset +from torch.utils.data.distributed import DistributedSampler +from dataset import concodeDataset +from beam import Beam + +try: + from torch.utils.tensorboard import SummaryWriter +except: + from tensorboardX import SummaryWriter + +from torch.nn import CrossEntropyLoss + +from bleu import _bleu +from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup, + BertConfig, BertForMaskedLM, BertTokenizer, + GPT2Config, GPT2LMHeadModel, GPT2Tokenizer, + OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, + RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, + DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer) + +logger = logging.getLogger(__name__) + +MODEL_CLASSES = { + 'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer), + 'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer), + 'bert': (BertConfig, BertForMaskedLM, BertTokenizer), + 'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), + 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer) +} + + + +def load_and_cache_examples(args, tokenizer, evaluate=False): + dataset = concodeDataset(tokenizer, args, logger, file_type='dev' if evaluate else 'train', + block_size=args.block_size) + return dataset + + +def set_seed(args): + random.seed(args.seed) + np.random.seed(args.seed) + torch.manual_seed(args.seed) + if args.n_gpu > 0: + torch.cuda.manual_seed_all(args.seed) + + +def update_config(model, tokenizer): + model.config.bos_token_id = tokenizer.bos_token_id + model.config.eos_token_id = tokenizer.eos_token_id + model.config.pad_token_id = tokenizer.pad_token_id + + +def train(args, train_dataset, model, tokenizer, fh, pool): + """ Train the model """ + if args.local_rank in [-1, 0]: + args.tensorboard_dir = os.path.join(args.output_dir, 'tensorboard') + if not os.path.exists(args.tensorboard_dir): + os.makedirs(args.tensorboard_dir) + tb_writer = SummaryWriter(args.tensorboard_dir) + + args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) + train_sampler = RandomSampler(train_dataset) + + train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.batch_size, drop_last=True) + total_examples = len(train_dataset) * ( + torch.distributed.get_world_size() if args.local_rank != -1 else 1) + batch_size = args.batch_size * args.gradient_accumulation_steps * ( + torch.distributed.get_world_size() if args.local_rank != -1 else 1) + # if args.max_steps > 0: + # t_total = args.max_steps + # args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 + if args.num_train_epochs > 0: + t_total = total_examples // batch_size * args.num_train_epochs + args.max_steps = t_total + model.to(args.device) + if args.local_rank not in [-1, 0]: + torch.distributed.barrier() + # Prepare optimizer and schedule (linear warmup and decay) + no_decay = ['bias', 'LayerNorm.weight'] + optimizer_grouped_parameters = [ + {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], + 'weight_decay': args.weight_decay}, + {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} + ] + optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) + scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, + num_training_steps=t_total) + checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last') + scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt') + optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt') + if os.path.exists(scheduler_last): + scheduler.load_state_dict(torch.load(scheduler_last, map_location="cpu")) + if os.path.exists(optimizer_last): + optimizer.load_state_dict(torch.load(optimizer_last, map_location="cpu")) + if args.local_rank == 0: + torch.distributed.barrier() + if args.fp16: + try: + from apex import amp + except ImportError: + raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") + model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) + + # multi-gpu training (should be after apex fp16 initialization) + if args.n_gpu > 1: + model = torch.nn.DataParallel(model) + + # Distributed training (should be after apex fp16 initialization) + if args.local_rank != -1: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank%args.gpu_per_node], + output_device=args.local_rank%args.gpu_per_node, + find_unused_parameters=True) + + # Train! + logger.info("***** Running training *****") + logger.info(" Num examples = %d", total_examples ) + logger.info(" Num epoch = %d", t_total*batch_size//total_examples) + logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) + logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", batch_size) + logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) + logger.info(" Total optimization steps = %d", t_total) + + global_step = args.start_step + tr_loss, logging_loss,avg_loss,tr_nb = 0.0, 0.0,0.0,0 + # model.resize_token_embeddings(len(tokenizer)) + model.zero_grad() + set_seed(args) # Added here for reproducibility (even between python 2 and 3) + + best_bleu = 0.0 + + for idx in range(args.start_epoch, int(args.num_train_epochs)): + for step, (batch, token_labels) in enumerate(train_dataloader): + inputs = batch.to(args.device) + attn_mask = torch.tensor(token_labels.clone().detach() != 0, dtype=torch.uint8, device=args.device) + loss_mask = torch.tensor(token_labels.clone().detach() == 2, dtype=torch.uint8, device=args.device) + model.train() + # outputs = model(inputs, attention_mask=attn_mask, labels=inputs, loss_mask=loss_mask) + # loss = outputs[0] + outputs = model(inputs, attention_mask=attn_mask) + logits = outputs[0] + labels = inputs + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + flatten_shift_loss_mask = loss_mask[..., :-1].contiguous().view(-1) + ids = torch.nonzero(flatten_shift_loss_mask).view(-1) + loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1))[ids], shift_labels.view(-1)[ids]) + + if args.n_gpu > 1: + loss = loss.mean() # mean() to average on multi-gpu parallel training + if args.gradient_accumulation_steps > 1: + loss = loss / args.gradient_accumulation_steps + + if args.fp16: + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) + else: + loss.backward() + torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) + + tr_loss += loss.item() + + if (step + 1) % args.gradient_accumulation_steps == 0: + optimizer.step() + optimizer.zero_grad() + scheduler.step() + global_step += 1 + output_flag=True + avg_loss=round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4) + if global_step % args.logging_steps == 0: + logger.info(" steps: %s ppl: %s", global_step, round(avg_loss,5)) + if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: + # Log metrics + tb_writer.add_scalar('lr', scheduler.get_last_lr()[0], global_step) + tb_writer.add_scalar('loss', (tr_loss - logging_loss) / args.logging_steps, global_step) + logging_loss = tr_loss + tr_nb=global_step + + if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: + checkpoint_prefix = "checkpoint" + # Save model checkpoint + if args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well + results = evaluate(args, model, tokenizer, eval_when_training=True) + for key, value in results.items(): + tb_writer.add_scalar('eval_{}'.format(key), value, global_step) + logger.info(" %s = %s", key, round(value,4)) + output_dir = os.path.join(args.output_dir, '{}-{}-{}'.format(checkpoint_prefix, global_step, round(results['perplexity'],4))) + # dev_bleu, dev_EM = eval_bleu(args, model, tokenizer, file_type='dev', num=100) + # logger.info(f"dev bleu: {dev_bleu}, dev EM: {dev_EM}") + # output_dir = os.path.join(args.output_dir, '{}-{}-{}'.format(checkpoint_prefix, global_step, round(dev_bleu,2))) + # if dev_bleu > best_bleu: + # best_bleu = dev_bleu + # logger.info(f"best bleu updated. saved in {output_dir}") + # logger.info(f"best bleu: {best_bleu}") + else: + output_dir = os.path.join(args.output_dir, "{}-{}".format(checkpoint_prefix, global_step)) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + model_to_save = ( + model.module if hasattr(model, "module") else model + ) # Take care of distributed/parallel training + model_to_save.save_pretrained(output_dir) + tokenizer.save_pretrained(output_dir) + + torch.save(args, os.path.join(output_dir, "training_args.bin")) + logger.info("Saving model checkpoint to %s", output_dir) + + # _rotate_checkpoints(args, checkpoint_prefix) + last_output_dir = os.path.join(args.output_dir, 'checkpoint-last') + if not os.path.exists(last_output_dir): + os.makedirs(last_output_dir) + model_to_save.save_pretrained(last_output_dir) + tokenizer.save_pretrained(last_output_dir) + idx_file = os.path.join(last_output_dir, 'idx_file.txt') + with open(idx_file, 'w', encoding='utf-8') as idxf: + idxf.write(str(0) + '\n') + + torch.save(optimizer.state_dict(), os.path.join(last_output_dir, "optimizer.pt")) + torch.save(scheduler.state_dict(), os.path.join(last_output_dir, "scheduler.pt")) + logger.info("Saving optimizer and scheduler states to %s", last_output_dir) + + step_file = os.path.join(last_output_dir, 'step_file.txt') + with open(step_file, 'w', encoding='utf-8') as stepf: + stepf.write(str(global_step) + '\n') + + # torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) + # torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) + # logger.info("Saving optimizer and scheduler states to %s", output_dir) + + + if args.max_steps > 0 and global_step > args.max_steps: + break + if args.max_steps > 0 and global_step > args.max_steps: + break + + # 每一轮记录checkpoint + output_dir = os.path.join(args.output_dir, 'epoch_{}'.format(idx+1)) + if not os.path.exists(output_dir): + os.makedirs(output_dir) + model_to_save = model.module if hasattr(model, 'module') else model + ckpt_output_path = os.path.join(output_dir, 'subject_model.pth') + logger.info("Saving model checkpoint to %s", ckpt_output_path) + torch.save(model_to_save.state_dict(), ckpt_output_path) + + if args.local_rank in [-1, 0]: + tb_writer.close() + + return global_step, tr_loss / global_step + + +def evaluate(args, model, tokenizer, prefix="", eval_when_training=False): + # Loop to handle MNLI double evaluation (matched, mis-matched) + eval_output_dir = args.output_dir + + eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True) + + if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: + os.makedirs(eval_output_dir) + + args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) + # Note that DistributedSampler samples randomly + eval_sampler = SequentialSampler(eval_dataset) + eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) + + # multi-gpu evaluate + if args.n_gpu > 1 and eval_when_training is False: + model = torch.nn.DataParallel(model) + + # Eval! + #logger.info("***** Running evaluation {} *****".format(prefix)) + #logger.info(" Num examples = %d", len(eval_dataset)) + #logger.info(" Batch size = %d", args.eval_batch_size) + eval_loss = 0.0 + nb_eval_steps = 0 + model.eval() + + for step, (batch, token_labels) in enumerate(eval_dataloader): + + inputs = batch.to(args.device) + attn_mask = torch.tensor(token_labels.clone().detach() != 0, dtype=torch.uint8, device=args.device) + loss_mask = torch.tensor(token_labels.clone().detach() == 2, dtype=torch.uint8, device=args.device) + with torch.no_grad(): + outputs = model(inputs, attention_mask=attn_mask) + logits = outputs[0] + labels = inputs + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + flatten_shift_loss_mask = loss_mask[..., :-1].contiguous().view(-1) + ids = torch.nonzero(flatten_shift_loss_mask).view(-1) + loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1))[ids], shift_labels.view(-1)[ids]) + eval_loss += loss.mean().item() + nb_eval_steps += 1 + + # inputs = batch.to(args.device) + # attn_mask = torch.tensor(token_labels.clone().detach() != 0, dtype=torch.uint8, device=args.device) + # loss_mask = torch.tensor(token_labels.clone().detach() == 2, dtype=torch.uint8, device=args.device) + # with torch.no_grad(): + # outputs = model(inputs, attention_mask=attn_mask, labels=inputs, loss_mask=loss_mask) + # loss = outputs[0] + # eval_loss += loss.mean().item() + # nb_eval_steps += 1 + + eval_loss = eval_loss / nb_eval_steps + perplexity = torch.exp(torch.tensor(eval_loss)) + + result = { + "perplexity": float(perplexity) + } + + output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") + with open(output_eval_file, "w") as writer: + #logger.info("***** Eval results {} *****".format(prefix)) + for key in sorted(result.keys()): + #logger.info(" %s = %s", key, str(result[key])) + writer.write("%s = %s\n" % (key, str(result[key]))) + + return result + +def eval_bleu(args, model, tokenizer, file_type='test', num=2000): + dataset = concodeDataset(tokenizer, args, logger, file_type=file_type, block_size=args.block_size, mode='test') + test_sampler = SequentialSampler(dataset) + test_dataloader = DataLoader(dataset, sampler=test_sampler, batch_size=1) + model.to(args.device) + model.zero_grad() + model.eval() + + preds = [] + max_gen_len = 100 + for step, (batch, token_labels) in enumerate(test_dataloader): + if step >= num: + break + inputs = batch.to(args.device) + # with torch.no_grad(): + # outputs = model.generate(inputs, max_length=args.block_size, num_beams=10, temperature=0.7, early_stopping=False, top_k=70, \ + # bos_token_id=tokenizer.bos_token_id, eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id) + # # outputs = model.generate(inputs, max_length=args.block_size, do_sample=True, temperature=0.7, top_k=70, top_p=0.95, \ + # # bos_token_id=tokenizer.bos_token_id, eos_token_id=tokenizer.pad_token_id, pad_token_id=tokenizer.pad_token_id) + # # outputs = model.generate(inputs, max_length=args.block_size, num_beams=10, temperature=0.7, early_stopping=False, top_k=70) + # # outputs = model.generate(inputs, max_length=args.block_size, do_sample=True, temperature=0.7, top_k=70, top_p=0.95) + # generation = tokenizer.decode(outputs[0])[len(tokenizer.decode(inputs[0])):] + # preds.append(generation.rstrip("")) + + with torch.no_grad(): + beam_size = 10 + m = torch.nn.LogSoftmax(dim=-1) + outputs = model(inputs)[1] + p = [] + zero = torch.cuda.LongTensor(1).fill_(0) + for i in range(inputs.shape[0]): + # Compatible with transformers version 3.3.0 and 4.13.0 + past = [torch.cat([x[0].unsqueeze(0),x[1].unsqueeze(0)],dim=0) if type(x)==tuple else x for x in outputs] + past_hidden = [x[:, i:i+1].expand(-1, beam_size, -1, -1, -1) for x in past] + # context_mask=source_mask[i:i+1,:].expand(beam_size,-1) + beam = Beam(beam_size, tokenizer.bos_token_id, tokenizer.eos_token_id) + input_ids = None + for _ in range(max_gen_len): + if beam.done(): + break + input_ids = beam.getCurrentState() + # context_mask=torch.cat((context_mask,input_ids*0+1),-1) + # mask=context_mask.unsqueeze(0).unsqueeze(-2).unsqueeze(-2).expand(self.config.n_layer, -1, -1, -1, -1) + transformer_outputs = model(input_ids, past_key_values=past_hidden) + out = m(transformer_outputs[0][:, -1, :]).data + # out = self.lsm(self.lm_head(transformer_outputs[0][:,-1,:])).data + beam.advance(out) + past = [torch.cat([x[0].unsqueeze(0),x[1].unsqueeze(0)],dim=0) if type(x)==tuple else x for x in transformer_outputs[1]] + past_hidden = [x.data.index_select(1, beam.getCurrentOrigin()) for x in past] + hyp = beam.getHyp(beam.getFinal()) + pred =beam.buildTargetTokens(hyp)[:beam_size] + + pred = [torch.cat([x.view(-1) for x in p]+[zero]*(max_gen_len-len(p))).view(1,-1) for p in pred] + p.append(torch.cat(pred, 0).unsqueeze(0)) + p = torch.cat(p, 0) + for pred in p: + t = pred[0].cpu().numpy() + t = list(t) + if 0 in t: + t = t[:t.index(0)] + text = tokenizer.decode(t, clean_up_tokenization_spaces=False) + # print(text) + preds.append(text) + + if step % args.logging_steps == 0: + logger.info(f"{step} are done!") + + golds = [] + datafile = os.path.join(args.data_dir, f"{file_type}.json") + datas = open(datafile).readlines() + for x in datas[:num]: + x = json.loads(x) + golds.append(x["code"]) + + assert len(preds) == len(golds) + + EM = [] + with open(os.path.join(args.output_dir, f"{file_type}.output"), 'w') as f, open(os.path.join(args.output_dir, f"{file_type}.gold"), 'w') as f1: + for pred, gold in zip(preds, golds): + f.write(pred+'\n') + f1.write(gold+'\n') + EM.append(pred.split() == gold.split()) + + if file_type == "test": + return 0, 0 + + bleu_score = round(_bleu(os.path.join(args.output_dir, f"{file_type}.gold"), os.path.join(args.output_dir, f"{file_type}.output")), 2) + EM = round(np.mean(EM) * 100, 2) + return bleu_score, EM + + + +def main(): + parser = argparse.ArgumentParser() + + ## Required parameters + parser.add_argument("--data_dir", default=None, type=str, required=True, + help="The input data path.") + parser.add_argument("--langs", default=None, type=str, required=True, + help="Languages to train, if all, train all languages in data_dir") + parser.add_argument("--output_dir", default=None, type=str, required=True, + help="The output directory where the model predictions and checkpoints will be written.") + + ## Other parameters + parser.add_argument("--model_type", default="gpt2", type=str, + help="The model architecture to be fine-tuned.") + parser.add_argument("--pretrain_dir", default="", type=str, + help="The output directory where the model predictions and checkpoints will be written.") + parser.add_argument("--config_dir", type=str, + help="config name. Required when training from scratch") + parser.add_argument("--tokenizer_dir", type=str, + help="Pre-trained tokenizer dir. Required when training from scratch") + parser.add_argument("--load_name", type=str, default="pretrained", + help="Load pretrained model name") + + parser.add_argument("--mlm", action='store_true', + help="Train with masked-language modeling loss instead of language modeling.") + parser.add_argument("--mlm_probability", type=float, default=0.15, + help="Ratio of tokens to mask for masked language modeling loss") + + parser.add_argument("--cache_dir", default="", type=str, + help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)") + parser.add_argument("--block_size", default=1024, type=int, + help="Optional input sequence length after tokenization." + "The training dataset will be truncated in block of this size for training." + "Default to the model max input length for single sentence inputs (take into account special tokens).") + parser.add_argument("--do_train", action='store_true', + help="Whether to run training.") + parser.add_argument("--do_eval", action='store_true', + help="Whether to run eval on the dev set.") + parser.add_argument("--do_infer", action='store_true', + help="Whether to run inference on test set.") + parser.add_argument("--evaluate_during_training", action='store_true', + help="Run evaluation during training at each logging step.") + parser.add_argument("--do_lower_case", action='store_true', + help="Set this flag if you are using an uncased model.") + + parser.add_argument("--per_gpu_train_batch_size", default=2, type=int, + help="Batch size per GPU/CPU for training.") + parser.add_argument("--per_gpu_eval_batch_size", default=4, type=int, + help="Batch size per GPU/CPU for evaluation.") + parser.add_argument('--gradient_accumulation_steps', type=int, default=1, + help="Number of updates steps to accumulate before performing a backward/update pass.") + parser.add_argument("--learning_rate", default=5e-5, type=float, + help="The initial learning rate for Adam.") + parser.add_argument("--weight_decay", default=0.0, type=float, + help="Weight deay if we apply some.") + parser.add_argument("--adam_epsilon", default=1e-8, type=float, + help="Epsilon for Adam optimizer.") + parser.add_argument("--max_grad_norm", default=1.0, type=float, + help="Max gradient norm.") + parser.add_argument("--num_train_epochs", default=1.0, type=float, + help="Total number of training epochs to perform.") + parser.add_argument("--max_steps", default=-1, type=int, + help="If > 0: set total number of training steps to perform. Override num_train_epochs.") + parser.add_argument("--warmup_steps", default=0, type=int, + help="Linear warmup over warmup_steps.") + + parser.add_argument('--logging_steps', type=int, default=10, + help="Log every X updates steps.") + parser.add_argument('--save_steps', type=int, default=50, + help="Save checkpoint every X updates steps.") + parser.add_argument('--save_total_limit', type=int, default=None, + help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default') + parser.add_argument("--eval_all_checkpoints", action='store_true', + help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number") + parser.add_argument("--no_cuda", action='store_true', + help="Avoid using CUDA when available") + parser.add_argument('--overwrite_output_dir', action='store_true', + help="Overwrite the content of the output directory") + parser.add_argument('--overwrite_cache', action='store_true', + help="Overwrite the cached training and evaluation sets") + parser.add_argument('--seed', type=int, default=42, + help="random seed for initialization") + + parser.add_argument('--fp16', action='store_true', + help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit") + parser.add_argument('--fp16_opt_level', type=str, default='O1', + help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." + "See details at https://nvidia.github.io/apex/amp.html") + parser.add_argument("--local_rank", type=int, default=-1, + help="For distributed training: local_rank") + parser.add_argument("--node_index", type=int, default=-1, + help="node index if multi-node running") + parser.add_argument("--gpu_per_node", type=int, default=-1, + help="num of gpus per node") + parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.") + parser.add_argument('--server_port', type=str, default='', help="For distant debugging.") + + parser.add_argument('--log_file', type=str, default='') + parser.add_argument('--tensorboard_dir', type=str) + + pool = None + args = parser.parse_args() + + # args.output_dir = os.path.join(args.output_dir, args.dataset) + + if args.model_type in ["bert", "roberta", "distilbert"] and not args.mlm: + raise ValueError("BERT and RoBERTa do not have LM heads but masked LM heads. They must be run using the --mlm " + "flag (masked language modeling).") + + if os.path.exists(args.output_dir) and os.listdir( + args.output_dir) and args.do_train and not args.overwrite_output_dir: + raise ValueError( + "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( + args.output_dir)) + + # Setup distant debugging if needed + if args.server_ip and args.server_port: + # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script + import ptvsd + print("Waiting for debugger attach") + ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) + ptvsd.wait_for_attach() + + logger.warning("local_rank: %d, node_index: %d, gpu_per_node: %d"%(args.local_rank, args.node_index, args.gpu_per_node)) + # Setup CUDA, GPU & distributed training + if args.local_rank == -1 or args.no_cuda: + device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") + args.n_gpu = torch.cuda.device_count() + else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs + torch.cuda.set_device(args.local_rank) + device = torch.device("cuda", args.local_rank) + torch.distributed.init_process_group(backend='nccl') + args.local_rank += args.node_index * args.gpu_per_node + args.n_gpu = 1 + args.device = device + # args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) + + # Setup logging + logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', + datefmt='%m/%d/%Y %H:%M:%S', + level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN) + logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s, world size: %s", + args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, + torch.distributed.get_world_size() if args.local_rank != -1 else 1) + + # 使用FileHandler输出到文件 + fh = logging.FileHandler(args.log_file) + logger.addHandler(fh) + + # Set seed + set_seed(args) + + # Load pretrained model and tokenizer + if args.local_rank not in [-1, 0]: + torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab + + args.start_epoch = 0 + args.start_step = 0 + checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last') + if args.do_train and os.path.exists(checkpoint_last) and os.listdir(checkpoint_last): + args.pretrain_dir = os.path.join(checkpoint_last) + args.config_name = os.path.join(checkpoint_last, 'config.json') + idx_file = os.path.join(checkpoint_last, 'idx_file.txt') + with open(idx_file, encoding='utf-8') as idxf: + args.start_epoch = int(idxf.readlines()[0].strip()) + 1 + + step_file = os.path.join(checkpoint_last, 'step_file.txt') + if os.path.exists(step_file): + with open(step_file, encoding='utf-8') as stepf: + args.start_step = int(stepf.readlines()[0].strip()) + + logger.info("reload model from {}, resume from {} epoch".format(checkpoint_last, args.start_epoch)) + + # Load pre-trained model + config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] + pretrained = args.pretrain_dir + if pretrained: + tokenizer = tokenizer_class.from_pretrained(pretrained, do_lower_case=args.do_lower_case, bos_token='', eos_token='', pad_token='', unk_token='<|UNKNOWN|>', sep_token='concode_elem_sep') + logger.info(tokenizer.encode(" hello world ")) + model = model_class.from_pretrained(pretrained) + model.resize_token_embeddings(len(tokenizer)) + update_config(model, tokenizer) + logger.info(model.config) + else: + tokenizer = tokenizer_class.from_pretrained(args.tokenizer_dir, bos_token='', eos_token='', pad_token='', unk_token='<|UNKNOWN|>', sep_token='concode_elem_sep') + args.vocab_size = tokenizer.vocab_size + config = config_class.from_pretrained(args.config_dir) + model = model_class(config) + model.resize_token_embeddings(len(tokenizer)) + update_config(model, tokenizer) + + model_parameters = model.parameters() + num_params = sum([np.prod(p.size()) for p in model_parameters]) + logger.info(f"Model has a total of {num_params} trainable parameters") + + if args.local_rank == 0: + torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab + + logger.info("Training/evaluation parameters %s", args) + + # Training + if args.do_train: + train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False) + + global_step, tr_loss = train(args, train_dataset, model, tokenizer, fh, pool) + logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) + + if args.do_eval: # only works on 1 GPU + checkpoint_prefix = 'epoch_23/subject_model.pth' + output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix)) + model.load_state_dict(torch.load(output_dir)) + model.to(args.device) + dev_bleu, dev_EM = eval_bleu(args, model, tokenizer, file_type='dev', num=2000) + logger.info(f"dev bleu: {dev_bleu}, dev EM: {dev_EM}") + + if args.do_infer: # only works on 1 GPU + checkpoint_prefix = 'epoch_23/subject_model.pth' + output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix)) + model.load_state_dict(torch.load(output_dir)) + model.to(args.device) + test_bleu, test_EM = eval_bleu(args, model, tokenizer, file_type='test', num=2000) + logger.info(f"test bleu: {test_bleu}, test EM: {test_EM}") + + +if __name__ == "__main__": + main() diff --git a/Text-code/text-to-code/code/train.sh b/Text-code/text-to-code/code/train.sh new file mode 100644 index 0000000000000000000000000000000000000000..b1901054e84c5ed290ffbbdc88d22fa274e2e52e --- /dev/null +++ b/Text-code/text-to-code/code/train.sh @@ -0,0 +1,29 @@ +LANG=java +DATADIR=../dataset +OUTPUTDIR=../model +PRETRAINDIR=microsoft/CodeGPT-small-java-adaptedGPT2 # will download pre-trained CodeGPT model +LOGFILE=text2code_concode.log +PER_NODE_GPU=2 # modify YOUR_GPU_NUM + +CUDA_VISIBLE_DEVICES=2,3 python run.py \ + --data_dir=$DATADIR \ + --langs=$LANG \ + --output_dir=$OUTPUTDIR \ + --pretrain_dir=$PRETRAINDIR \ + --log_file=$LOGFILE \ + --model_type=gpt2 \ + --block_size=512 \ + --do_train \ + --node_index 0 \ + --gpu_per_node $PER_NODE_GPU \ + --learning_rate=5e-5 \ + --weight_decay=0.01 \ + --evaluate_during_training \ + --per_gpu_train_batch_size=6 \ + --per_gpu_eval_batch_size=12 \ + --gradient_accumulation_steps=2 \ + --num_train_epochs=30 \ + --logging_steps=100 \ + --save_steps=5000 \ + --overwrite_output_dir \ + --seed=42 \ No newline at end of file diff --git a/Text-code/text-to-code/data.zip b/Text-code/text-to-code/data.zip new file mode 100644 index 0000000000000000000000000000000000000000..ea0a56fb048eb23d354a17511e3db617b4a61d72 --- /dev/null +++ b/Text-code/text-to-code/data.zip @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f18c01ec758232af7c7f68c448b6ae3d9a979099f1f7b8d56e2e525acb7bdf6b +size 20278243