+
+ 模型名称 |
+ 模型简介 |
+ 模型类型 |
+ Epoch数量 |
+ 数据集信息 |
+
+
+ Clone-detection-BigCloneBench |
+ 基于大规模代码克隆基准数据集的代码克隆检测模型,任务是进行二元分类(0/1),其中1代表语义等价,0代表其他情况。 |
+ 代码克隆检测 |
+ 2个epoch |
+ BigCloneBench数据集 |
+
+
+ Clone-detection-POJ-104 |
+ 基于POJ-104数据集的代码克隆检测模型,任务是识别不同编程题目中相似的代码实现,给定一段代码和一组候选代码,任务是返回具有相同语义的Top K个代码 |
+ 代码克隆检测 |
+ 2个epoch (0-1) |
+ POJ-104编程题目数据集 |
+
+
+ CodeCompletion-token |
+ 基于token级别的代码自动补全模型 |
+ 代码补全 |
+ 5个epoch (Java语料库) |
+ Java代码token序列数据集 |
+
+
+ Defect-detection |
+ 代码缺陷检测模型,通过分析代码来识别潜在的缺陷和错误(进行二元分类(0/1)) |
+ 代码缺陷检测 |
+ 5个epoch |
+ 包含缺陷标注的C语言代码数据集 |
+
+
+ code-refinement |
+ 代码优化模型 |
+ 代码优化/重构 |
+ 34个epoch(small数据集) |
+ 代码优化前后对数据集(C语言) |
+
+
+ code-to-text |
+ 代码到自然语言的转换模型 |
+ 代码注释生成 |
+ 每种语言10个epoch (支持Python/Java/JavaScript/PHP/Ruby/Go) |
+ 多语言代码-文本对数据集 |
+
+
+ NL-code-search-Adv |
+ 高级自然语言代码搜索模型,通过计算自然语言查询与代码片段之间的相似性来实现代码搜索, |
+ 代码搜索 |
+ 2个epoch |
+ 自然语言-(python)代码对数据集 |
+
+
+ NL-code-search-WebQuery |
+ 基于Web查询的代码搜索模型,该模型通过编码器处理代码和自然语言输入,并利用多层感知器(MLP)来计算相似性得分 |
+ 代码搜索 |
+ 两个数据集各3个epoch |
+ Web查询-代码对数据集(CodeSearchNet数据集和CoSQA数据集(python)) |
+
+
+ text-to-code |
+ 自然语言到代码的生成模型 |
+ 代码生成 |
+ 23个epoch |
+ 文本描述-代码(c语言)对数据集 |
+
+
diff --git a/Text-code/NL-code-search-Adv/code/demo.py b/Text-code/NL-code-search-Adv/code/demo.py
new file mode 100644
index 0000000000000000000000000000000000000000..72659edd5ed6389ec57b1fccc91b35dd9bb62810
--- /dev/null
+++ b/Text-code/NL-code-search-Adv/code/demo.py
@@ -0,0 +1,55 @@
+from transformers import RobertaTokenizer, RobertaConfig, RobertaModel
+import torch
+import sys
+import os
+
+from model import Model
+
+
+def single_tokenize(text, tokenizer, block_size=256):
+ tokens = tokenizer.tokenize(text)[:block_size - 2]
+ tokens = [tokenizer.cls_token] + tokens + [tokenizer.sep_token]
+ ids = tokenizer.convert_tokens_to_ids(tokens)
+ padding_length = block_size - len(ids)
+ ids += [tokenizer.pad_token_id] * padding_length
+ return torch.tensor([ids])
+
+
+if __name__ == "__main__":
+ config =RobertaConfig.from_pretrained("../../../../active_dataset_debugging/base/codebert-base")
+ config.num_labels = 1
+ tokenizer = RobertaTokenizer.from_pretrained("../../../../active_dataset_debugging/base/codebert-base", do_lower_case=True)
+ model = RobertaModel.from_pretrained("../../../../active_dataset_debugging/base/roberta-base", config=config)
+ model = Model(model, config, tokenizer, args=None)
+ model.load_state_dict(torch.load("../model/python/epoch_2/subject_model.pth", map_location=torch.device('cpu')))
+
+
+ query = "print hello world"
+ code_1 = """
+ import numpy as np
+ """
+ code_2 = """
+ a = 'hello world'
+ """
+ code_3 = """
+ cout << "hello world" << endl;
+ """
+ code_4 = '''
+ print('hello world')
+ '''
+ codes = []
+ codes.append(code_1)
+ codes.append(code_2)
+ codes.append(code_3)
+ codes.append(code_4)
+ scores = []
+ nl_inputs = single_tokenize(query, tokenizer)
+ for code in codes:
+ code_inputs = single_tokenize(code, tokenizer)
+ score = model(code_inputs, nl_inputs, return_scores=True)
+ scores.append(score)
+ print("Query:", query)
+ for i in range(len(codes)):
+ print('------------------------------')
+ print("Code:", codes[i])
+ print("Score:", float(scores[i]))
\ No newline at end of file
diff --git a/Text-code/NL-code-search-Adv/code/evaluate.sh b/Text-code/NL-code-search-Adv/code/evaluate.sh
new file mode 100644
index 0000000000000000000000000000000000000000..9f135df15bc32325cd80ebc3364a2ef051a07766
--- /dev/null
+++ b/Text-code/NL-code-search-Adv/code/evaluate.sh
@@ -0,0 +1,5 @@
+lang=python
+
+python evaluator.py \
+-a ../dataset/$lang/valid.jsonl \
+-p ../model/$lang/predictions.jsonl
\ No newline at end of file
diff --git a/Text-code/NL-code-search-Adv/code/evaluator.py b/Text-code/NL-code-search-Adv/code/evaluator.py
new file mode 100644
index 0000000000000000000000000000000000000000..a75b4944ca9d656641ed6214ce14c2709dca946e
--- /dev/null
+++ b/Text-code/NL-code-search-Adv/code/evaluator.py
@@ -0,0 +1,57 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+import logging
+import sys,json
+import numpy as np
+
+def read_answers(filename):
+ answers={}
+ with open(filename) as f:
+ for line in f:
+ line=line.strip()
+ js=json.loads(line)
+ answers[js['url']]=js['idx']
+ return answers
+
+def read_predictions(filename):
+ predictions={}
+ with open(filename) as f:
+ for line in f:
+ line=line.strip()
+ js=json.loads(line)
+ predictions[js['url']]=js['answers']
+ return predictions
+
+def calculate_scores(answers,predictions):
+ scores=[]
+ for key in answers:
+ if key not in predictions:
+ logging.error("Missing prediction for url {}.".format(key))
+ sys.exit()
+ flag=False
+ for rank,idx in enumerate(predictions[key]):
+ if idx==answers[key]:
+ scores.append(1/(rank+1))
+ flag=True
+ break
+ if flag is False:
+ scores.append(0)
+ result={}
+ result['MRR']=round(np.mean(scores),4)
+ return result
+
+def main():
+ import argparse
+ parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for NL-code-search-Adv dataset.')
+ parser.add_argument('--answers', '-a',help="filename of the labels, in txt format.")
+ parser.add_argument('--predictions', '-p',help="filename of the leaderboard predictions, in txt format.")
+
+
+ args = parser.parse_args()
+ answers=read_answers(args.answers)
+ predictions=read_predictions(args.predictions)
+ scores=calculate_scores(answers,predictions)
+ print(scores)
+
+if __name__ == '__main__':
+ main()
diff --git a/Text-code/NL-code-search-Adv/code/model.py b/Text-code/NL-code-search-Adv/code/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..161d6fc55e24918921ba3d6bfb29cb0d691cc5cc
--- /dev/null
+++ b/Text-code/NL-code-search-Adv/code/model.py
@@ -0,0 +1,44 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+import torch
+import torch.nn as nn
+import torch
+from torch.autograd import Variable
+import copy
+import torch.nn.functional as F
+from torch.nn import CrossEntropyLoss, MSELoss
+
+
+class Model(nn.Module):
+ def __init__(self, encoder, config, tokenizer, args):
+ super(Model, self).__init__()
+ self.encoder = encoder
+ self.config = config
+ self.tokenizer = tokenizer
+ self.args = args
+
+ def forward(self, code_inputs, nl_inputs, return_vec=False, return_scores=False):
+ bs = code_inputs.shape[0]
+ inputs = torch.cat((code_inputs, nl_inputs), 0)
+ encoder_output = self.encoder(inputs, attention_mask=inputs.ne(1))
+ outputs = encoder_output[1]
+
+ code_vec = outputs[:bs]
+ nl_vec = outputs[bs:]
+
+ if return_vec:
+ return code_vec, nl_vec
+ scores = (nl_vec[:, None, :] * code_vec[None, :, :]).sum(-1)
+ if return_scores:
+ return scores
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(scores, torch.arange(bs, device=scores.device))
+ return loss, code_vec, nl_vec
+
+ def feature(self, code_inputs, nl_inputs):
+ bs = code_inputs.shape[0]
+ inputs = torch.cat((code_inputs, nl_inputs), 0)
+ encoder_output = self.encoder(inputs, attention_mask=inputs.ne(1))
+ code_feature = encoder_output.pooler_output[:bs]
+ nl_feature = encoder_output.pooler_output[bs:]
+ return code_feature, nl_feature
\ No newline at end of file
diff --git a/Text-code/NL-code-search-Adv/code/run.py b/Text-code/NL-code-search-Adv/code/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..bab2b78d139d58152ba9f3076f1d4b9f9085eb10
--- /dev/null
+++ b/Text-code/NL-code-search-Adv/code/run.py
@@ -0,0 +1,634 @@
+# coding=utf-8
+# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
+GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
+using a masked language modeling (MLM) loss.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import argparse
+import glob
+import logging
+import os
+import pickle
+import random
+import re
+import shutil
+
+import numpy as np
+import torch
+from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler, TensorDataset
+from torch.utils.data.distributed import DistributedSampler
+import json
+
+try:
+ from torch.utils.tensorboard import SummaryWriter
+except:
+ from tensorboardX import SummaryWriter
+
+from tqdm import tqdm, trange
+import multiprocessing
+from model import Model
+
+cpu_cont = multiprocessing.cpu_count()
+from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
+ BertConfig, BertForMaskedLM, BertTokenizer,
+ GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
+ OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
+ RobertaConfig, RobertaModel, RobertaTokenizer,
+ DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
+
+logger = logging.getLogger(__name__)
+
+MODEL_CLASSES = {
+ 'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
+ 'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
+ 'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
+ 'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer),
+ 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
+}
+
+
+class InputFeatures(object):
+ """A single training/test features for a example."""
+
+ def __init__(self,
+ code_tokens,
+ code_ids,
+ nl_tokens,
+ nl_ids,
+ url,
+ idx,
+
+ ):
+ self.code_tokens = code_tokens
+ self.code_ids = code_ids
+ self.nl_tokens = nl_tokens
+ self.nl_ids = nl_ids
+ self.url = url
+ self.idx = idx
+
+
+def convert_examples_to_features(js, tokenizer, args):
+ # code
+ if 'code_tokens' in js:
+ code = ' '.join(js['code_tokens'])
+ else:
+ code = ' '.join(js['function_tokens'])
+ code_tokens = tokenizer.tokenize(code)[:args.block_size - 2]
+ code_tokens = [tokenizer.cls_token] + code_tokens + [tokenizer.sep_token]
+ code_ids = tokenizer.convert_tokens_to_ids(code_tokens)
+ padding_length = args.block_size - len(code_ids)
+ code_ids += [tokenizer.pad_token_id] * padding_length
+
+ nl = ' '.join(js['docstring_tokens'])
+ nl_tokens = tokenizer.tokenize(nl)[:args.block_size - 2]
+ nl_tokens = [tokenizer.cls_token] + nl_tokens + [tokenizer.sep_token]
+ nl_ids = tokenizer.convert_tokens_to_ids(nl_tokens)
+ padding_length = args.block_size - len(nl_ids)
+ nl_ids += [tokenizer.pad_token_id] * padding_length
+
+ return InputFeatures(code_tokens, code_ids, nl_tokens, nl_ids, js['url'], js['idx'])
+
+
+class TextDataset(Dataset):
+ def __init__(self, tokenizer, args, file_path=None):
+ self.examples = []
+ data = []
+ with open(file_path) as f:
+ for i, line in enumerate(f):
+ # if i>200:
+ # break
+ line = line.strip()
+ js = json.loads(line)
+ data.append(js)
+ for js in data:
+ self.examples.append(convert_examples_to_features(js, tokenizer, args))
+ if 'train' in file_path:
+ for idx, example in enumerate(self.examples[:1]):
+ logger.info("*** Example ***")
+ logger.info("idx: {}".format(idx))
+ logger.info("code_tokens: {}".format([x.replace('\u0120', '_') for x in example.code_tokens]))
+ logger.info("code_ids: {}".format(' '.join(map(str, example.code_ids))))
+ logger.info("nl_tokens: {}".format([x.replace('\u0120', '_') for x in example.nl_tokens]))
+ logger.info("nl_ids: {}".format(' '.join(map(str, example.nl_ids))))
+
+ def __len__(self):
+ return len(self.examples)
+
+ def __getitem__(self, i):
+ return (torch.tensor(self.examples[i].code_ids), torch.tensor(self.examples[i].nl_ids))
+
+
+def set_seed(seed=42):
+ random.seed(seed)
+ os.environ['PYHTONHASHSEED'] = str(seed)
+ np.random.seed(seed)
+ torch.manual_seed(seed)
+ torch.cuda.manual_seed(seed)
+ torch.backends.cudnn.deterministic = True
+
+
+def train(args, train_dataset, model, tokenizer):
+ """ Train the model """
+
+ args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
+ train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
+
+ train_dataloader = DataLoader(train_dataset, sampler=train_sampler,
+ batch_size=args.train_batch_size, num_workers=4, pin_memory=True)
+ args.max_steps = args.epoch * len(train_dataloader)
+ args.save_steps = len(train_dataloader) // 10
+ args.warmup_steps = len(train_dataloader)
+ args.logging_steps = len(train_dataloader)
+ args.num_train_epochs = args.epoch
+ model.to(args.device)
+ # Prepare optimizer and schedule (linear warmup and decay)
+ no_decay = ['bias', 'LayerNorm.weight']
+ optimizer_grouped_parameters = [
+ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
+ 'weight_decay': args.weight_decay},
+ {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
+ ]
+ optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
+ scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.max_steps * 0.1,
+ num_training_steps=args.max_steps)
+ if args.fp16:
+ try:
+ from apex import amp
+ except ImportError:
+ raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
+ model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
+
+ # multi-gpu training (should be after apex fp16 initialization)
+ if args.n_gpu > 1:
+ model = torch.nn.DataParallel(model)
+
+ # Distributed training (should be after apex fp16 initialization)
+ if args.local_rank != -1:
+ model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
+ output_device=args.local_rank,
+ find_unused_parameters=True)
+
+ checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
+ scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt')
+ optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt')
+ if os.path.exists(scheduler_last):
+ scheduler.load_state_dict(torch.load(scheduler_last))
+ if os.path.exists(optimizer_last):
+ optimizer.load_state_dict(torch.load(optimizer_last))
+
+ # Train!
+ logger.info("***** Running training *****")
+ logger.info(" Num examples = %d", len(train_dataset))
+ logger.info(" Num Epochs = %d", args.num_train_epochs)
+ logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
+ logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
+ args.train_batch_size * args.gradient_accumulation_steps * (
+ torch.distributed.get_world_size() if args.local_rank != -1 else 1))
+ logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
+ logger.info(" Total optimization steps = %d", args.max_steps)
+
+ global_step = args.start_step
+ tr_loss, logging_loss, avg_loss, tr_nb, tr_num, train_loss = 0.0, 0.0, 0.0, 0, 0, 0
+ best_mrr = 0.0
+ best_acc = 0.0
+ # model.resize_token_embeddings(len(tokenizer))
+ model.zero_grad()
+
+ for idx in range(args.start_epoch, int(args.num_train_epochs)):
+ bar = train_dataloader
+ tr_num = 0
+ train_loss = 0
+ for step, batch in enumerate(tqdm(bar)):
+ code_inputs = batch[0].to(args.device)
+ nl_inputs = batch[1].to(args.device)
+
+ model.train()
+ loss, code_vec, nl_vec = model(code_inputs, nl_inputs)
+
+ if args.n_gpu > 1:
+ loss = loss.mean() # mean() to average on multi-gpu parallel training
+ if args.gradient_accumulation_steps > 1:
+ loss = loss / args.gradient_accumulation_steps
+
+ if args.fp16:
+ with amp.scale_loss(loss, optimizer) as scaled_loss:
+ scaled_loss.backward()
+ torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
+ else:
+ loss.backward()
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
+
+ tr_loss += loss.item()
+ tr_num += 1
+ train_loss += loss.item()
+ if avg_loss == 0:
+ avg_loss = tr_loss
+ avg_loss = round(train_loss / tr_num, 5)
+ if (step + 1) % 100 == 0:
+ logger.info("epoch {} step {} loss {}".format(idx, step + 1, avg_loss))
+ # bar.set_description("epoch {} loss {}".format(idx,avg_loss))
+
+ if (step + 1) % args.gradient_accumulation_steps == 0:
+ optimizer.step()
+ optimizer.zero_grad()
+ scheduler.step()
+ global_step += 1
+ output_flag = True
+ avg_loss = round(np.exp((tr_loss - logging_loss) / (global_step - tr_nb)), 4)
+ if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
+ logging_loss = tr_loss
+ tr_nb = global_step
+
+ if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
+
+ if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
+ results = evaluate(args, model, tokenizer, eval_when_training=True)
+ for key, value in results.items():
+ logger.info(" %s = %s", key, round(value, 4))
+ # Save model checkpoint
+ tr_num = 0
+ train_loss = 0
+
+ if results['eval_mrr'] > best_acc:
+ best_acc = results['eval_mrr']
+ logger.info(" " + "*" * 20)
+ logger.info(" Best mrr:%s", round(best_acc, 4))
+ logger.info(" " + "*" * 20)
+
+ checkpoint_prefix = 'checkpoint-best-mrr'
+ output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
+ if not os.path.exists(output_dir):
+ os.makedirs(output_dir)
+ model_to_save = model.module if hasattr(model, 'module') else model
+ output_dir = os.path.join(output_dir, '{}'.format('model.bin'))
+ torch.save(model_to_save.state_dict(), output_dir)
+ logger.info("Saving model checkpoint to %s", output_dir)
+
+ # 每一轮记录checkpoint
+ output_dir = os.path.join(args.output_dir, 'epoch_{}'.format(idx+1))
+ if not os.path.exists(output_dir):
+ os.makedirs(output_dir)
+ model_to_save = model.module if hasattr(model, 'module') else model
+ ckpt_output_path = os.path.join(output_dir, 'subject_model.pth')
+ logger.info("Saving model checkpoint to %s", ckpt_output_path)
+ torch.save(model_to_save.state_dict(), ckpt_output_path)
+ # 每一轮记录表征
+ # logger.info("Saving training feature")
+ # train_dataloader_bs1 = DataLoader(train_dataset, sampler=train_sampler, batch_size=1,num_workers=4,pin_memory=True)
+ # code_feature, nl_feature = [], []
+ # for batch in tqdm(train_dataloader_bs1):
+ # code_inputs = batch[0].to(args.device)
+ # nl_inputs = batch[1].to(args.device)
+ # model.eval()
+ # with torch.no_grad():
+ # cf, nf = model.feature(code_inputs=code_inputs, nl_inputs=nl_inputs)
+ # code_feature.append(cf.cpu().detach().numpy())
+ # nl_feature.append(nf.cpu().detach().numpy())
+ # code_feature_output_path = os.path.join(output_dir, 'code_feature.pkl')
+ # nl_feature_output_path = os.path.join(output_dir, 'nl_feature.pkl')
+ # with open(code_feature_output_path, 'wb') as f1, open(nl_feature_output_path, 'wb') as f2:
+ # pickle.dump(code_feature, f1)
+ # pickle.dump(code_feature, f2)
+
+
+eval_dataset = None
+def evaluate(args, model, tokenizer, eval_when_training=False):
+ # Loop to handle MNLI double evaluation (matched, mis-matched)
+ eval_output_dir = args.output_dir
+ global eval_dataset
+ if eval_dataset is None:
+ eval_dataset = TextDataset(tokenizer, args, args.eval_data_file)
+
+ if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
+ os.makedirs(eval_output_dir)
+
+ args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
+ # Note that DistributedSampler samples randomly
+ eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
+ eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, num_workers=4,
+ pin_memory=True)
+
+ # multi-gpu evaluate
+ if args.n_gpu > 1 and eval_when_training is False:
+ model = torch.nn.DataParallel(model)
+
+ # Eval!
+ logger.info("***** Running evaluation *****")
+ logger.info(" Num examples = %d", len(eval_dataset))
+ logger.info(" Batch size = %d", args.eval_batch_size)
+ eval_loss = 0.0
+ nb_eval_steps = 0
+ model.eval()
+ code_vecs = []
+ nl_vecs = []
+ for batch in eval_dataloader:
+ code_inputs = batch[0].to(args.device)
+ nl_inputs = batch[1].to(args.device)
+ with torch.no_grad():
+ lm_loss, code_vec, nl_vec = model(code_inputs, nl_inputs)
+ eval_loss += lm_loss.mean().item()
+ code_vecs.append(code_vec.cpu().numpy())
+ nl_vecs.append(nl_vec.cpu().numpy())
+ nb_eval_steps += 1
+ code_vecs = np.concatenate(code_vecs, 0)
+ nl_vecs = np.concatenate(nl_vecs, 0)
+ eval_loss = eval_loss / nb_eval_steps
+ perplexity = torch.tensor(eval_loss)
+
+ scores = np.matmul(nl_vecs, code_vecs.T)
+ ranks = []
+ for i in range(len(scores)):
+ score = scores[i, i]
+ rank = 1
+ for j in range(len(scores)):
+ if i != j and scores[i, j] >= score:
+ rank += 1
+ ranks.append(1 / rank)
+
+ result = {
+ "eval_loss": float(perplexity),
+ "eval_mrr": float(np.mean(ranks))
+ }
+
+ return result
+
+
+def test(args, model, tokenizer):
+ # Loop to handle MNLI double evaluation (matched, mis-matched)
+ eval_dataset = TextDataset(tokenizer, args, args.test_data_file)
+
+ args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
+ # Note that DistributedSampler samples randomly
+ eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
+ eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
+
+ # multi-gpu evaluate
+ if args.n_gpu > 1:
+ model = torch.nn.DataParallel(model)
+
+ # Eval!
+ logger.info("***** Running Test *****")
+ logger.info(" Num examples = %d", len(eval_dataset))
+ logger.info(" Batch size = %d", args.eval_batch_size)
+ eval_loss = 0.0
+ nb_eval_steps = 0
+ code_vecs = []
+ nl_vecs = []
+ for batch in eval_dataloader:
+ code_inputs = batch[0].to(args.device)
+ nl_inputs = batch[1].to(args.device)
+ with torch.no_grad():
+ lm_loss, code_vec, nl_vec = model(code_inputs, nl_inputs)
+ eval_loss += lm_loss.mean().item()
+ code_vecs.append(code_vec.cpu().numpy())
+ nl_vecs.append(nl_vec.cpu().numpy())
+ nb_eval_steps += 1
+ code_vecs = np.concatenate(code_vecs, 0)
+ nl_vecs = np.concatenate(nl_vecs, 0)
+ eval_loss = eval_loss / nb_eval_steps
+ perplexity = torch.tensor(eval_loss)
+
+ scores = np.matmul(nl_vecs, code_vecs.T)
+
+ sort_ids = np.argsort(scores, axis=-1, kind='quicksort', order=None)[:, ::-1]
+ indexs = []
+ urls = []
+ for example in eval_dataset.examples:
+ indexs.append(example.idx)
+ urls.append(example.url)
+ with open(os.path.join(args.output_dir, "predictions.jsonl"), 'w') as f:
+ for index, url, sort_id in zip(indexs, urls, sort_ids):
+ js = {}
+ js['url'] = url
+ js['answers'] = []
+ for idx in sort_id[:100]:
+ js['answers'].append(indexs[int(idx)])
+ f.write(json.dumps(js) + '\n')
+
+
+def main():
+ parser = argparse.ArgumentParser()
+
+ ## Required parameters
+ parser.add_argument("--output_dir", default=None, type=str, required=True,
+ help="The output directory where the model predictions and checkpoints will be written.")
+
+ ## Other parameters
+ parser.add_argument("--train_data_file", default=None, type=str,
+ help="The input training data file (a text file).")
+ parser.add_argument("--eval_data_file", default=None, type=str,
+ help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
+ parser.add_argument("--test_data_file", default=None, type=str,
+ help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
+
+ parser.add_argument("--model_type", default="bert", type=str,
+ help="The model architecture to be fine-tuned.")
+ parser.add_argument("--model_name_or_path", default=None, type=str,
+ help="The model checkpoint for weights initialization.")
+
+ parser.add_argument("--mlm", action='store_true',
+ help="Train with masked-language modeling loss instead of language modeling.")
+ parser.add_argument("--mlm_probability", type=float, default=0.15,
+ help="Ratio of tokens to mask for masked language modeling loss")
+
+ parser.add_argument("--config_name", default="", type=str,
+ help="Optional pretrained config name or path if not the same as model_name_or_path")
+ parser.add_argument("--tokenizer_name", default="", type=str,
+ help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
+ parser.add_argument("--cache_dir", default="", type=str,
+ help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)")
+ parser.add_argument("--block_size", default=-1, type=int,
+ help="Optional input sequence length after tokenization."
+ "The training dataset will be truncated in block of this size for training."
+ "Default to the model max input length for single sentence inputs (take into account special tokens).")
+ parser.add_argument("--do_train", action='store_true',
+ help="Whether to run training.")
+ parser.add_argument("--do_eval", action='store_true',
+ help="Whether to run eval on the dev set.")
+ parser.add_argument("--do_test", action='store_true',
+ help="Whether to run eval on the dev set.")
+ parser.add_argument("--evaluate_during_training", action='store_true',
+ help="Run evaluation during training at each logging step.")
+ parser.add_argument("--do_lower_case", action='store_true',
+ help="Set this flag if you are using an uncased model.")
+
+ parser.add_argument("--train_batch_size", default=4, type=int,
+ help="Batch size per GPU/CPU for training.")
+ parser.add_argument("--eval_batch_size", default=4, type=int,
+ help="Batch size per GPU/CPU for evaluation.")
+ parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
+ help="Number of updates steps to accumulate before performing a backward/update pass.")
+ parser.add_argument("--learning_rate", default=5e-5, type=float,
+ help="The initial learning rate for Adam.")
+ parser.add_argument("--weight_decay", default=0.0, type=float,
+ help="Weight deay if we apply some.")
+ parser.add_argument("--adam_epsilon", default=1e-8, type=float,
+ help="Epsilon for Adam optimizer.")
+ parser.add_argument("--max_grad_norm", default=1.0, type=float,
+ help="Max gradient norm.")
+ parser.add_argument("--num_train_epochs", default=1.0, type=float,
+ help="Total number of training epochs to perform.")
+ parser.add_argument("--max_steps", default=-1, type=int,
+ help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
+ parser.add_argument("--warmup_steps", default=0, type=int,
+ help="Linear warmup over warmup_steps.")
+
+ parser.add_argument('--logging_steps', type=int, default=50,
+ help="Log every X updates steps.")
+ parser.add_argument('--save_steps', type=int, default=50,
+ help="Save checkpoint every X updates steps.")
+ parser.add_argument('--save_total_limit', type=int, default=None,
+ help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default')
+ parser.add_argument("--eval_all_checkpoints", action='store_true',
+ help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number")
+ parser.add_argument("--no_cuda", action='store_true',
+ help="Avoid using CUDA when available")
+ parser.add_argument('--overwrite_output_dir', action='store_true',
+ help="Overwrite the content of the output directory")
+ parser.add_argument('--overwrite_cache', action='store_true',
+ help="Overwrite the cached training and evaluation sets")
+ parser.add_argument('--seed', type=int, default=42,
+ help="random seed for initialization")
+ parser.add_argument('--epoch', type=int, default=42,
+ help="random seed for initialization")
+ parser.add_argument('--fp16', action='store_true',
+ help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
+ parser.add_argument('--fp16_opt_level', type=str, default='O1',
+ help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
+ "See details at https://nvidia.github.io/apex/amp.html")
+ parser.add_argument("--local_rank", type=int, default=-1,
+ help="For distributed training: local_rank")
+ parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
+ parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
+
+ args = parser.parse_args()
+
+ # Setup distant debugging if needed
+ if args.server_ip and args.server_port:
+ # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
+ import ptvsd
+ print("Waiting for debugger attach")
+ ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
+ ptvsd.wait_for_attach()
+
+ # Setup CUDA, GPU & distributed training
+ if args.local_rank == -1 or args.no_cuda:
+ device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
+ args.n_gpu = torch.cuda.device_count()
+ else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
+ torch.cuda.set_device(args.local_rank)
+ device = torch.device("cuda", args.local_rank)
+ torch.distributed.init_process_group(backend='nccl')
+ args.n_gpu = 1
+ args.device = device
+ args.per_gpu_train_batch_size = args.train_batch_size # 修改//args.n_gpu
+ args.per_gpu_eval_batch_size = args.eval_batch_size # 修改//args.n_gpu
+ # Setup logging
+ logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
+ datefmt='%m/%d/%Y %H:%M:%S',
+ level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
+ logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
+ args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
+
+ # Set seed
+ set_seed(args.seed)
+
+ # Load pretrained model and tokenizer
+ if args.local_rank not in [-1, 0]:
+ torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab
+
+ args.start_epoch = 0
+ args.start_step = 0
+ checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
+ if os.path.exists(checkpoint_last) and os.listdir(checkpoint_last):
+ args.model_name_or_path = os.path.join(checkpoint_last, 'pytorch_model.bin')
+ args.config_name = os.path.join(checkpoint_last, 'config.json')
+ idx_file = os.path.join(checkpoint_last, 'idx_file.txt')
+ with open(idx_file, encoding='utf-8') as idxf:
+ args.start_epoch = int(idxf.readlines()[0].strip()) + 1
+
+ step_file = os.path.join(checkpoint_last, 'step_file.txt')
+ if os.path.exists(step_file):
+ with open(step_file, encoding='utf-8') as stepf:
+ args.start_step = int(stepf.readlines()[0].strip())
+
+ logger.info("reload model from {}, resume from {} epoch".format(checkpoint_last, args.start_epoch))
+
+ config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
+ config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
+ cache_dir=args.cache_dir if args.cache_dir else None)
+ config.num_labels = 1
+ tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name,
+ do_lower_case=args.do_lower_case,
+ cache_dir=args.cache_dir if args.cache_dir else None)
+ if args.block_size <= 0:
+ args.block_size = tokenizer.max_len_single_sentence # Our input block size will be the max possible for the model
+ args.block_size = min(args.block_size, tokenizer.max_len_single_sentence)
+ if args.model_name_or_path:
+ model = model_class.from_pretrained(args.model_name_or_path,
+ config=config,
+ cache_dir=args.cache_dir if args.cache_dir else None)
+ else:
+ model = model_class(config)
+
+ model = Model(model, config, tokenizer, args)
+ if args.local_rank == 0:
+ torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
+
+ logger.info("Training/evaluation parameters %s", args)
+
+ # Training
+ if args.do_train:
+ if args.local_rank not in [-1, 0]:
+ torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache
+
+ train_dataset = TextDataset(tokenizer, args, args.train_data_file)
+
+ if args.local_rank == 0:
+ torch.distributed.barrier()
+
+ train(args, train_dataset, model, tokenizer)
+
+ # Evaluation
+ results = {}
+ if args.do_eval and args.local_rank in [-1, 0]:
+ checkpoint_prefix = 'epoch_2/subject_model.pth'
+ output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
+ model.load_state_dict(torch.load(output_dir))
+ model.to(args.device)
+ result = evaluate(args, model, tokenizer)
+ logger.info("***** Eval results *****")
+ for key in sorted(result.keys()):
+ logger.info(" %s = %s", key, str(round(result[key], 4)))
+
+ if args.do_test and args.local_rank in [-1, 0]:
+ checkpoint_prefix = 'epoch_2/subject_model.pth'
+ output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
+ model.load_state_dict(torch.load(output_dir))
+ model.to(args.device)
+ test(args, model, tokenizer)
+
+ return results
+
+
+if __name__ == "__main__":
+ main()
+
+
diff --git a/Text-code/NL-code-search-Adv/code/test.sh b/Text-code/NL-code-search-Adv/code/test.sh
new file mode 100644
index 0000000000000000000000000000000000000000..217a265a2c909a972b31e53577633c2902c978df
--- /dev/null
+++ b/Text-code/NL-code-search-Adv/code/test.sh
@@ -0,0 +1,21 @@
+lang=python
+
+CUDA_VISIBLE_DEVICES=2,3 python run.py \
+--output_dir ../$lang/model \
+--model_type roberta \
+--config_name microsoft/codebert-base \
+--model_name_or_path microsoft/codebert-base \
+--tokenizer_name roberta-base \
+--do_test \
+--train_data_file ../dataset/$lang/train.jsonl \
+--eval_data_file ../dataset/$lang/valid.jsonl \
+--test_data_file ../dataset/$lang/valid.jsonl \
+--epoch 2 \
+--block_size 256 \
+--train_batch_size 32 \
+--eval_batch_size 64 \
+--learning_rate 5e-5 \
+--max_grad_norm 1.0 \
+--evaluate_during_training \
+--seed 123456 \
+2>&1| tee train.log
\ No newline at end of file
diff --git a/Text-code/NL-code-search-Adv/code/train.sh b/Text-code/NL-code-search-Adv/code/train.sh
new file mode 100644
index 0000000000000000000000000000000000000000..fa512b30389d3ea192947543c61b638c839ccd49
--- /dev/null
+++ b/Text-code/NL-code-search-Adv/code/train.sh
@@ -0,0 +1,20 @@
+lang=python
+
+CUDA_VISIBLE_DEVICES=2,3 python run.py \
+--output_dir ../$lang/model \
+--model_type roberta \
+--config_name microsoft/codebert-base \
+--model_name_or_path microsoft/codebert-base \
+--tokenizer_name roberta-base \
+--do_train \
+--train_data_file ../dataset/$lang/train.jsonl \
+--eval_data_file ../dataset/$lang/valid.jsonl \
+--epoch 2 \
+--block_size 256 \
+--train_batch_size 32 \
+--eval_batch_size 64 \
+--learning_rate 5e-5 \
+--max_grad_norm 1.0 \
+--evaluate_during_training \
+--seed 123456 \
+2>&1| tee train.log
\ No newline at end of file
diff --git a/Text-code/NL-code-search-Adv/dataset.zip b/Text-code/NL-code-search-Adv/dataset.zip
new file mode 100644
index 0000000000000000000000000000000000000000..737413b472c2755a0055e66357a2e3c071afbbff
--- /dev/null
+++ b/Text-code/NL-code-search-Adv/dataset.zip
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:415e59ea6ac79b87deca6e7adbfaab930c22e047d37bf81060c6cc1ccdedd1a7
+size 126844430
diff --git a/Text-code/NL-code-search-WebQuery/code/eval.sh b/Text-code/NL-code-search-WebQuery/code/eval.sh
new file mode 100644
index 0000000000000000000000000000000000000000..f20d2f76e164c17a4a94e2dde4b6e97623918d45
--- /dev/null
+++ b/Text-code/NL-code-search-WebQuery/code/eval.sh
@@ -0,0 +1,9 @@
+CUDA_VISIBLE_DEVICES=0,1 python run.py \
+--model_type roberta \
+--do_eval \
+--dev_file cosqa-dev.json \
+--max_seq_length 200 \
+--per_gpu_eval_batch_size 2 \
+--data_dir ../data/CoSQA \
+--output_dir ../model/model_cosqa_continue_training \
+--encoder_name_or_path microsoft/codebert-base
\ No newline at end of file
diff --git a/Text-code/NL-code-search-WebQuery/code/evaluate.sh b/Text-code/NL-code-search-WebQuery/code/evaluate.sh
new file mode 100644
index 0000000000000000000000000000000000000000..8bbf7814769d4872f3aa23acd816c28f26a237bd
--- /dev/null
+++ b/Text-code/NL-code-search-WebQuery/code/evaluate.sh
@@ -0,0 +1,3 @@
+python evaluator.py \
+ --answers_webquery ../model/model_cosqa_continue_training/answer_predictions.txt \
+ --predictions_webquery ../model/model_cosqa_continue_training/webquery_predictions.txt
\ No newline at end of file
diff --git a/Text-code/NL-code-search-WebQuery/code/evaluator.py b/Text-code/NL-code-search-WebQuery/code/evaluator.py
new file mode 100644
index 0000000000000000000000000000000000000000..58f61f83f00c2d463d2af1b05346dcd47515aa61
--- /dev/null
+++ b/Text-code/NL-code-search-WebQuery/code/evaluator.py
@@ -0,0 +1,59 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+import logging
+import sys, json, os
+import numpy as np
+import argparse
+from sklearn.metrics import recall_score, precision_score, f1_score, accuracy_score
+
+
+def read_answers(filename):
+ answers = {}
+ with open(filename, 'r', encoding='utf-8') as f:
+ for line in f.readlines():
+ line = line.strip()
+ answers[line.split('\t')[0]] = int(line.split('\t')[1])
+ return answers
+
+
+def read_predictions(filename):
+ predictions = {}
+ with open(filename, 'r', encoding='utf-8') as f:
+ for line in f.readlines():
+ line = line.strip()
+ predictions[line.split('\t')[0]] = int(line.split('\t')[1])
+ return predictions
+
+
+def calculate_scores(answers, predictions):
+ y_trues, y_preds = [], []
+ for key in answers:
+ if key not in predictions:
+ logging.error("Missing prediction for index {}.".format(key))
+ sys.exit()
+ y_trues.append(answers[key])
+ y_preds.append(predictions[key])
+ scores={}
+ scores['Precision']=precision_score(y_trues, y_preds)
+ scores['Recall']=recall_score(y_trues, y_preds)
+ scores['F1']=f1_score(y_trues, y_preds)
+ scores['Accuracy']=accuracy_score(y_trues, y_preds)
+ return scores
+
+
+def main():
+ parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for ClozeTest-maxmin dataset.')
+ parser.add_argument('--answers_webquery', '-aw', help="filename of the labels on webquery test set, in txt format.")
+ parser.add_argument('--predictions_webquery', '-pw', help="filename of the leaderboard predictions on webquery test set, in txt format.")
+ args = parser.parse_args()
+
+ answers = read_answers(args.answers_webquery)
+ predictions = read_predictions(args.predictions_webquery)
+ acc_webquery = calculate_scores(answers, predictions)
+ # print('NL-code-search-WebQuery on WebQuery test set, acc: {}'.format(acc_webquery))
+ print('NL-code-search-WebQuery on WebQuery test set:')
+ print(acc_webquery)
+
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
diff --git a/Text-code/NL-code-search-WebQuery/code/finetune.sh b/Text-code/NL-code-search-WebQuery/code/finetune.sh
new file mode 100644
index 0000000000000000000000000000000000000000..8ffd41bde351f9084489acd889c62866df3a28e4
--- /dev/null
+++ b/Text-code/NL-code-search-WebQuery/code/finetune.sh
@@ -0,0 +1,18 @@
+CUDA_VISIBLE_DEVICES=0,1 python run.py \
+--model_type roberta \
+--do_train \
+--do_eval \
+--eval_all_checkpoints \
+--train_file cosqa-train.json \
+--dev_file cosqa-dev.json \
+--max_seq_length 200 \
+--per_gpu_train_batch_size 16 \
+--per_gpu_eval_batch_size 16 \
+--learning_rate 1e-5 \
+--num_train_epochs 3 \
+--gradient_accumulation_steps 1 \
+--warmup_steps 5000 \
+--evaluate_during_training \
+--data_dir ../data/CoSQA/ \
+--output_dir ../model/model_cosqa_continue_training \
+--encoder_name_or_path ./model_codesearchnet/checkpoint-best-aver
\ No newline at end of file
diff --git a/Text-code/NL-code-search-WebQuery/code/models.py b/Text-code/NL-code-search-WebQuery/code/models.py
new file mode 100644
index 0000000000000000000000000000000000000000..7d3b377dbc4e8ebb3c2cd27ef7b73e82c7600f86
--- /dev/null
+++ b/Text-code/NL-code-search-WebQuery/code/models.py
@@ -0,0 +1,52 @@
+import torch
+import torch.nn as nn
+import torch
+from torch.autograd import Variable
+import copy
+# from transformers.modeling_bert import BertLayerNorm
+import torch.nn.functional as F
+from torch.nn import CrossEntropyLoss, MSELoss
+# from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
+# BertConfig, BertForMaskedLM, BertTokenizer,
+# GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
+# OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
+# RobertaConfig, RobertaModel, RobertaTokenizer,
+# DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
+from transformers.modeling_utils import PreTrainedModel
+
+
+class Model(PreTrainedModel):
+ def __init__(self, encoder, config, tokenizer, args):
+ super(Model, self).__init__(config)
+ self.encoder = encoder
+ self.config = config
+ self.tokenizer = tokenizer
+ self.mlp = nn.Sequential(nn.Linear(768*4, 768),
+ nn.Tanh(),
+ nn.Linear(768, 1),
+ nn.Sigmoid())
+ self.loss_func = nn.BCELoss()
+ self.args = args
+
+ def forward(self, code_inputs, nl_inputs, labels, return_vec=False, do_my_test=False):
+ bs = code_inputs.shape[0]
+ inputs = torch.cat((code_inputs, nl_inputs), 0)
+ encoder_output = self.encoder(inputs, attention_mask=inputs.ne(1))
+ outputs = encoder_output[1]
+
+ code_vec = outputs[:bs]
+ nl_vec = outputs[bs:]
+
+ code_feature = encoder_output.pooler_output[:bs]
+ nl_feature = encoder_output.pooler_output[bs:]
+
+ if return_vec:
+ return code_vec, nl_vec
+ logits = self.mlp(torch.cat((nl_vec, code_vec, nl_vec-code_vec, nl_vec*code_vec), 1))
+ loss = self.loss_func(logits, labels.float().unsqueeze(1))
+ if do_my_test:
+ return loss, code_feature, nl_feature
+ predictions = (logits > 0.5).int() # (Batch, )
+ # predictions = logits.float()
+ return loss, predictions
+
diff --git a/Text-code/NL-code-search-WebQuery/code/run.py b/Text-code/NL-code-search-WebQuery/code/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..6dec0db0a774268a80edddc42d1866ec974ca6fe
--- /dev/null
+++ b/Text-code/NL-code-search-WebQuery/code/run.py
@@ -0,0 +1,592 @@
+# coding=utf-8
+# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import, division, print_function
+
+import argparse
+import glob
+import logging
+import os
+import random
+import pickle
+
+import numpy as np
+import torch
+from torch.utils.data import DataLoader, SequentialSampler, RandomSampler, TensorDataset
+from torch.utils.data.distributed import DistributedSampler
+try:
+ from torch.utils.tensorboard import SummaryWriter
+except:
+ from tensorboardX import SummaryWriter
+from tqdm import tqdm, trange
+
+from transformers import (WEIGHTS_NAME, get_linear_schedule_with_warmup, AdamW,
+ RobertaConfig,
+ RobertaModel,
+ RobertaTokenizer)
+
+from models import Model
+from utils import acc_and_f1, TextDataset
+import multiprocessing
+cpu_cont = multiprocessing.cpu_count()
+
+logger = logging.getLogger(__name__)
+
+MODEL_CLASSES = {'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer)}
+
+
+def set_seed(seed=42):
+ random.seed(seed)
+ os.environ['PYHTONHASHSEED'] = str(seed)
+ np.random.seed(seed)
+ torch.manual_seed(seed)
+ torch.cuda.manual_seed(seed)
+ torch.backends.cudnn.deterministic = True
+
+
+def train(args, train_dataset, model, tokenizer):
+ """ Train the model """
+ # if args.local_rank in [-1, 0]:
+ # tb_writer = SummaryWriter()
+
+ args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
+ train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
+ train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, num_workers=4, pin_memory=True)
+
+ args.save_steps = len(train_dataloader) if args.save_steps<=0 else args.save_steps
+ args.warmup_steps = len(train_dataloader) if args.warmup_steps<=0 else args.warmup_steps
+ args.logging_steps = len(train_dataloader)
+
+ if args.max_steps > 0:
+ t_total = args.max_steps
+ args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps)
+ else:
+ t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
+
+ # Prepare optimizer and schedule (linear warmup and decay)
+ no_decay = ['bias', 'LayerNorm.weight']
+ optimizer_grouped_parameters = [
+ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
+ 'weight_decay': args.weight_decay},
+ {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
+ ]
+ optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
+ scheduler = get_linear_schedule_with_warmup(optimizer, args.warmup_steps, t_total)
+
+ model.to(args.device)
+ if args.fp16:
+ try:
+ from apex import amp
+ except ImportError:
+ raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
+ model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
+
+ # multi-gpu training (should be after apex fp16 initialization)
+ if args.n_gpu > 1:
+ model = torch.nn.DataParallel(model)
+
+ # Distributed training (should be after apex fp16 initialization)
+ if args.local_rank != -1:
+ model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
+ output_device=args.local_rank,
+ find_unused_parameters=True)
+
+ checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
+ scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt')
+ if os.path.exists(scheduler_last):
+ scheduler.load_state_dict(torch.load(scheduler_last))
+
+ # Train!
+ logger.info("***** Running training *****")
+ logger.info(" Num examples = %d", len(train_dataset))
+ logger.info(" Num Epochs = %d", args.num_train_epochs)
+ logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
+ logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
+ args.train_batch_size * args.gradient_accumulation_steps * (
+ torch.distributed.get_world_size() if args.local_rank != -1 else 1))
+ logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
+ logger.info(" Total optimization steps = %d", t_total)
+
+ global_step = args.start_step
+ tr_loss, logging_loss, avg_loss, tr_nb, tr_num, train_loss = 0.0, 0.0, 0.0, 0, 0, 0
+ best_results = {"acc": 0.0, "precision": 0.0, "recall": 0.0, "f1": 0.0, "acc_and_f1": 0.0}
+ model.zero_grad()
+ train_iterator = trange(args.start_epoch, int(args.num_train_epochs), desc="Epoch",
+ disable=args.local_rank not in [-1, 0])
+ model.train()
+ logger.info(model)
+
+ for idx in train_iterator:
+ bar = tqdm(enumerate(train_dataloader))
+ tr_num=0
+ train_loss=0
+ for step, batch in bar:
+
+ code_inputs = batch[0].to(args.device)
+ nl_inputs = batch[1].to(args.device)
+ labels = batch[2].to(args.device)
+ loss, predictions = model(code_inputs, nl_inputs, labels)
+
+ if args.n_gpu > 1:
+ loss = loss.mean() # mean() to average on multi-gpu parallel training
+ if args.gradient_accumulation_steps > 1:
+ loss = loss / args.gradient_accumulation_steps
+
+ if args.fp16:
+ try:
+ from apex import amp
+ except ImportError:
+ raise ImportError(
+ "Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
+ with amp.scale_loss(loss, optimizer) as scaled_loss:
+ scaled_loss.backward()
+ torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
+ else:
+ loss.backward()
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
+
+ tr_loss += loss.item()
+ tr_num += 1
+ train_loss += loss.item()
+ if avg_loss == 0:
+ avg_loss = tr_loss
+ avg_loss = round(train_loss/tr_num, 5)
+ bar.set_description("epoch {} step {} loss {}".format(idx, step+1, avg_loss))
+
+ if (step + 1) % args.gradient_accumulation_steps == 0:
+ optimizer.step()
+ optimizer.zero_grad()
+ scheduler.step()
+ global_step += 1
+ avg_loss = round(np.exp((tr_loss - logging_loss) / (global_step - tr_nb)), 4)
+ if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
+ logging_loss = tr_loss
+ tr_nb = global_step
+
+ if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
+
+ if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
+ results = evaluate(args, model, tokenizer, eval_when_training=True)
+ for key, value in results.items():
+ logger.info(" %s = %s", key, round(value,4))
+ # Save model checkpoint
+ if results['acc_and_f1'] >= best_results['acc_and_f1']:
+ best_results = results
+
+ # save
+ checkpoint_prefix = 'checkpoint-best-aver'
+ output_dir = os.path.join(args.output_dir, checkpoint_prefix)
+ if not os.path.exists(output_dir):
+ os.makedirs(output_dir)
+ model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
+
+ torch.save(model_to_save.state_dict(), os.path.join(output_dir, 'pytorch_model.bin'))
+ tokenizer.save_pretrained(output_dir)
+ torch.save(args, os.path.join(output_dir, 'training_{}.bin'.format(idx)))
+ logger.info("Saving model checkpoint to %s", output_dir)
+ torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
+ torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
+ logger.info("Saving optimizer and scheduler states to %s", output_dir)
+
+ if args.local_rank == -1:
+ checkpoint_prefix = 'checkpoint-last'
+ output_dir = os.path.join(args.output_dir, checkpoint_prefix)
+ if not os.path.exists(output_dir):
+ os.makedirs(output_dir)
+ model_to_save = model.module if hasattr(model, 'module') else model
+ torch.save(model_to_save.state_dict(), os.path.join(output_dir, 'pytorch_model.bin'))
+ tokenizer.save_pretrained(output_dir)
+
+ idx_file = os.path.join(output_dir, 'idx_file.txt')
+ with open(idx_file, 'w', encoding='utf-8') as idxf:
+ idxf.write(str(args.start_epoch + idx) + '\n')
+ logger.info("Saving model checkpoint to %s", output_dir)
+ torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
+ torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
+ logger.info("Saving optimizer and scheduler states to %s", output_dir)
+ step_file = os.path.join(output_dir, 'step_file.txt')
+ with open(step_file, 'w', encoding='utf-8') as stepf:
+ stepf.write(str(global_step) + '\n')
+
+ if args.max_steps > 0 and global_step > args.max_steps:
+ train_iterator.close()
+ break
+
+ # 每一轮记录checkpoint
+ output_dir = os.path.join(args.output_dir, 'epoch_{}'.format(idx+1))
+ if not os.path.exists(output_dir):
+ os.makedirs(output_dir)
+ model_to_save = model.module if hasattr(model, 'module') else model
+ ckpt_output_path = os.path.join(output_dir, 'subject_model.pth')
+ logger.info("Saving model checkpoint to %s", ckpt_output_path)
+ torch.save(model_to_save.state_dict(), ckpt_output_path)
+ # 每一轮记录表征
+ # logger.info("Saving training feature")
+ # train_dataloader_bs1 = DataLoader(train_dataset, sampler=train_sampler, batch_size=1, num_workers=4,
+ # pin_memory=True)
+ # code_feature, nl_feature = [], []
+ # for batch in tqdm(train_dataloader_bs1):
+ # code_inputs = batch[0].to(args.device)
+ # nl_inputs = batch[1].to(args.device)
+ # labels = batch[2].to(args.device)
+ # model.eval()
+ # with torch.no_grad():
+ # _, cf, nf = model(code_inputs=code_inputs, nl_inputs=nl_inputs, labels=labels, do_my_test=True)
+ # code_feature.append(cf.cpu().detach().numpy())
+ # nl_feature.append(nf.cpu().detach().numpy())
+ # code_feature_output_path = os.path.join(output_dir, 'code_feature.pkl')
+ # nl_feature_output_path = os.path.join(output_dir, 'nl_feature.pkl')
+ # with open(code_feature_output_path, 'wb') as f1, open(nl_feature_output_path, 'wb') as f2:
+ # pickle.dump(code_feature, f1)
+ # pickle.dump(code_feature, f2)
+
+def evaluate(args, model, tokenizer,eval_when_training=False):
+ eval_output_dir = args.output_dir
+ eval_data_path = os.path.join(args.data_dir, args.dev_file)
+ eval_dataset = TextDataset(tokenizer, args, eval_data_path, type='eval')
+
+ if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
+ os.makedirs(eval_output_dir)
+
+ args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
+ # Note that DistributedSampler samples randomly
+ eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
+ eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, num_workers=4, pin_memory=True)
+
+ # multi-gpu evaluate
+ if args.n_gpu > 1 and eval_when_training is False:
+ model = torch.nn.DataParallel(model)
+
+ # Eval!
+ logger.info("***** Running evaluation *****")
+ logger.info(" Num examples = %d", len(eval_dataset))
+ logger.info(" Batch size = %d", args.eval_batch_size)
+ eval_loss = 0.0
+ nb_eval_steps = 0
+ model.eval()
+ all_predictions = []
+ all_labels = []
+ for batch in eval_dataloader:
+ code_inputs = batch[0].to(args.device)
+ nl_inputs = batch[1].to(args.device)
+ labels = batch[2].to(args.device)
+ with torch.no_grad():
+ lm_loss, predictions = model(code_inputs, nl_inputs, labels)
+ # lm_loss,code_vec,nl_vec = model(code_inputs,nl_inputs)
+ eval_loss += lm_loss.mean().item()
+ all_predictions.append(predictions.cpu())
+ all_labels.append(labels.cpu())
+ nb_eval_steps += 1
+ all_predictions = torch.cat(all_predictions, 0).squeeze().numpy()
+ all_labels = torch.cat(all_labels, 0).squeeze().numpy()
+ eval_loss = torch.tensor(eval_loss / nb_eval_steps)
+
+ results = acc_and_f1(all_predictions, all_labels)
+ results.update({"eval_loss": float(eval_loss)})
+ return results
+
+
+def test(args, model, tokenizer):
+ if not args.prediction_file:
+ args.prediction_file = os.path.join(args.output_dir, 'predictions.txt')
+ if not os.path.exists(os.path.dirname(args.prediction_file)):
+ os.makedirs(os.path.dirname(args.prediction_file))
+ if not args.answer_file:
+ args.answer_file = os.path.join(args.output_dir, 'golds.txt')
+ if not os.path.exists(os.path.dirname(args.answer_file)):
+ os.makedirs(os.path.dirname(args.answer_file))
+
+ test_data_path = os.path.join(args.data_dir, args.test_file)
+ eval_dataset = TextDataset(tokenizer, args, test_data_path) #, type='test')
+
+ args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
+ # Note that DistributedSampler samples randomly
+ eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
+ eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
+
+ # multi-gpu evaluate
+ if args.n_gpu > 1:
+ model = torch.nn.DataParallel(model)
+
+ # Eval!
+ logger.info("***** Running Test *****")
+ logger.info(" Num examples = %d", len(eval_dataset))
+ logger.info(" Batch size = %d", args.eval_batch_size)
+
+ nb_eval_steps = 0
+ all_predictions = []
+ all_golds = []
+ for batch in eval_dataloader:
+ code_inputs = batch[0].to(args.device)
+ nl_inputs = batch[1].to(args.device)
+ labels = batch[2].to(args.device)
+ with torch.no_grad():
+ _, predictions = model(code_inputs, nl_inputs, labels)
+ all_predictions.append(predictions.cpu())
+ all_golds.append(labels.cpu())
+ nb_eval_steps += 1
+ all_predictions = torch.cat(all_predictions, 0).squeeze().numpy()
+ all_golds = torch.cat(all_golds, 0).squeeze().numpy()
+
+ logger.info("***** Saving Test Result *****")
+ with open(args.prediction_file,'w') as f:
+ for example, pred in zip(eval_dataset.examples, all_predictions.tolist()):
+ f.write(str(example.idx)+'\t'+str(int(pred))+'\n')
+ with open(args.answer_file,'w') as f:
+ for example, gold in zip(eval_dataset.examples, all_golds.tolist()):
+ f.write(str(example.idx)+'\t'+str(int(gold))+'\n')
+
+
+def check_feature():
+ code_feature = pickle.load(file=open('model_codesearchnet/checkpoint-all/epoch_0/code_feature.pkl', 'rb'))
+ print(len(code_feature))
+ print(code_feature[0].shape)
+
+
+def main():
+ parser = argparse.ArgumentParser()
+
+ ## Required parameters
+ parser.add_argument("--data_dir", default=None, type=str, required=True,
+ help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
+ parser.add_argument("--train_file", default=None, type=str,
+ help="The input training data file (a text file).")
+ parser.add_argument("--output_dir", default=None, type=str, required=True,
+ help="The output directory where the model predictions and checkpoints will be written.")
+
+ ## Other parameters
+ parser.add_argument("--dev_file", default=None, type=str,
+ help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
+ parser.add_argument("--test_file", default=None, type=str,
+ help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
+
+ parser.add_argument("--model_type", default="roberta", type=str,
+ help="The model architecture to be fine-tuned.")
+ parser.add_argument("--pn_weight", type=float, default=1.0,
+ help="Ratio of positive examples in the sum of bce loss")
+ parser.add_argument("--encoder_name_or_path", default=None, type=str,
+ help="The model checkpoint for weights initialization.")
+ parser.add_argument("--checkpoint_path", default=None, type=str,
+ help="The checkpoint path of model to continue training.")
+
+ parser.add_argument("--mlm", action='store_true',
+ help="Train with masked-language modeling loss instead of language modeling.")
+ parser.add_argument("--mlm_probability", type=float, default=0.15,
+ help="Ratio of tokens to mask for masked language modeling loss")
+
+ parser.add_argument("--config_name", default="", type=str,
+ help="Pretrained config name or path if not the same as model_name")
+ parser.add_argument("--tokenizer_name", default="", type=str,
+ help="Pretrained tokenizer name or path if not the same as model_name")
+ parser.add_argument("--cache_dir", default="", type=str,
+ help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)")
+ parser.add_argument("--max_seq_length", default=-1, type=int,
+ help="Optional input sequence length after tokenization."
+ "The training dataset will be truncated in block of this size for training."
+ "Default to the model max input length for single sentence inputs (take into account special tokens).")
+ parser.add_argument("--do_train", action='store_true',
+ help="Whether to run training.")
+ parser.add_argument("--do_eval", action='store_true',
+ help="Whether to run eval on the dev set.")
+ parser.add_argument("--do_predict", action='store_true',
+ help="Whether to run predict on the test set.")
+ parser.add_argument("--evaluate_during_training", action='store_true',
+ help="Rul evaluation during training at each logging step.")
+ parser.add_argument("--do_lower_case", action='store_true',
+ help="Set this flag if you are using an uncased model.")
+
+ parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
+ help="Batch size per GPU/CPU for training.")
+ parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
+ help="Batch size per GPU/CPU for evaluation.")
+ parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
+ help="Number of updates steps to accumulate before performing a backward/update pass.")
+ parser.add_argument("--learning_rate", default=5e-5, type=float,
+ help="The initial learning rate for Adam.")
+ parser.add_argument("--weight_decay", default=0.0, type=float,
+ help="Weight deay if we apply some.")
+ parser.add_argument("--adam_epsilon", default=1e-8, type=float,
+ help="Epsilon for Adam optimizer.")
+ parser.add_argument("--max_grad_norm", default=1.0, type=float,
+ help="Max gradient norm.")
+ parser.add_argument("--num_train_epochs", default=3, type=int,
+ help="Total number of training epochs to perform.")
+ parser.add_argument("--max_steps", default=-1, type=int,
+ help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
+ parser.add_argument("--warmup_steps", default=0, type=int,
+ help="Linear warmup over warmup_steps.")
+
+ parser.add_argument('--logging_steps', type=int, default=50,
+ help="Log every X updates steps.")
+ parser.add_argument('--save_steps', type=int, default=0,
+ help="Save checkpoint every X updates steps.")
+ parser.add_argument('--save_total_limit', type=int, default=None,
+ help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default')
+ parser.add_argument("--eval_all_checkpoints", action='store_true',
+ help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
+ parser.add_argument("--no_cuda", action='store_true',
+ help="Avoid using CUDA when available")
+ parser.add_argument('--overwrite_output_dir', action='store_true',
+ help="Overwrite the content of the output directory")
+ parser.add_argument('--overwrite_cache', action='store_true',
+ help="Overwrite the cached training and evaluation sets")
+ parser.add_argument('--seed', type=int, default=42,
+ help="random seed for initialization")
+
+ parser.add_argument('--fp16', action='store_true',
+ help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
+ parser.add_argument('--fp16_opt_level', type=str, default='O1',
+ help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
+ "See details at https://nvidia.github.io/apex/amp.html")
+ parser.add_argument("--local_rank", type=int, default=-1,
+ help="For distributed training: local_rank")
+ parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
+ parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
+ parser.add_argument("--pred_model_dir", default=None, type=str,
+ help='model for prediction')
+ parser.add_argument("--test_result_dir", default='test_results.tsv', type=str,
+ help='path to store test result')
+ parser.add_argument("--prediction_file", default=None, type=str,
+ help='path to save predictions result, note to specify task name')
+ parser.add_argument("--answer_file", default=None, type=str,
+ help='path to save gold result, note to specify task name')
+
+ args = parser.parse_args()
+
+ # Setup distant debugging if needed
+ if args.server_ip and args.server_port:
+ # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
+ import ptvsd
+ print("Waiting for debugger attach")
+ ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
+ ptvsd.wait_for_attach()
+
+ # Setup CUDA, GPU & distributed training
+ if args.local_rank == -1 or args.no_cuda:
+ device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
+ args.n_gpu = torch.cuda.device_count()
+ else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
+ torch.cuda.set_device(args.local_rank)
+ device = torch.device("cuda", args.local_rank)
+ torch.distributed.init_process_group(backend='nccl')
+ args.n_gpu = 1
+ args.device = device
+
+ # Setup logging
+ logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
+ datefmt='%m/%d/%Y %H:%M:%S',
+ level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
+ logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
+ args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
+
+ # Set seed
+ set_seed(args.seed)
+
+ # Load pretrained model and tokenizer
+ if args.local_rank not in [-1, 0]:
+ torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
+
+ args.start_epoch = 0
+ args.start_step = 0
+ checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
+ if os.path.exists(checkpoint_last) and os.listdir(checkpoint_last):
+ # args.encoder_name_or_path = os.path.join(checkpoint_last, 'pytorch_model.bin')
+ args.config_name = os.path.join(checkpoint_last, 'config.json')
+ idx_file = os.path.join(checkpoint_last, 'idx_file.txt')
+ with open(idx_file, encoding='utf-8') as idxf:
+ args.start_epoch = int(idxf.readlines()[0].strip()) + 1
+
+ step_file = os.path.join(checkpoint_last, 'step_file.txt')
+ if os.path.exists(step_file):
+ with open(step_file, encoding='utf-8') as stepf:
+ args.start_step = int(stepf.readlines()[0].strip())
+
+ logger.info("reload model from {}, resume from {} epoch".format(checkpoint_last, args.start_epoch))
+
+
+
+ config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
+ config = config_class.from_pretrained(args.config_name if args.config_name else args.encoder_name_or_path,
+ cache_dir=args.cache_dir if args.cache_dir else None)
+ config.num_labels = 2
+ tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.encoder_name_or_path,
+ do_lower_case=args.do_lower_case,
+ cache_dir=args.cache_dir if args.cache_dir else None)
+ if args.max_seq_length <= 0:
+ args.max_seq_length = tokenizer.max_len_single_sentence # Our input block size will be the max possible for the model
+ args.max_seq_length = min(args.max_seq_length, tokenizer.max_len_single_sentence)
+ if args.encoder_name_or_path:
+ model = model_class.from_pretrained(args.encoder_name_or_path,
+ from_tf=bool('.ckpt' in args.encoder_name_or_path),
+ config=config,
+ cache_dir=args.cache_dir if args.cache_dir else None)
+ else:
+ model = model_class(config)
+
+ model = Model(model, config, tokenizer, args)
+
+ if args.checkpoint_path:
+ model.load_state_dict(torch.load(os.path.join(args.checkpoint_path, 'pytorch_model.bin')))
+ if args.local_rank == 0:
+ torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
+
+ logger.info("Training/evaluation parameters %s", args)
+
+ # Training
+ if args.do_train:
+ if args.local_rank not in [-1, 0]:
+ torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache
+ train_data_path = os.path.join(args.data_dir, args.train_file)
+ train_dataset = TextDataset(tokenizer, args, train_data_path, type='train')
+ train(args, train_dataset, model, tokenizer)
+
+ # Evaluation
+ results = {}
+ if args.do_eval and args.local_rank in [-1, 0]:
+ checkpoint_prefix = 'checkpoint-best-aver'
+ output_dir = os.path.join(args.output_dir, checkpoint_prefix)
+ model.load_state_dict(torch.load(os.path.join(output_dir, 'pytorch_model.bin')))
+ tokenizer = tokenizer.from_pretrained(output_dir)
+ model.to(args.device)
+ results = evaluate(args, model, tokenizer)
+ logger.info("***** Eval results *****")
+ for key in results.keys():
+ logger.info(" Eval %s = %s", key, str(results[key]))
+ logger.info("Eval Model From: {}".format(os.path.join(output_dir, 'pytorch_model.bin')))
+ logger.info("***** Eval results *****")
+
+ if args.do_predict and args.local_rank in [-1, 0]:
+ logger.info("***** Testing results *****")
+ checkpoint_prefix = 'checkpoint-best-aver'
+ if checkpoint_prefix not in args.output_dir and \
+ os.path.exists(os.path.join(args.output_dir, checkpoint_prefix)):
+ output_dir = os.path.join(args.output_dir, checkpoint_prefix)
+ else:
+ output_dir = args.output_dir
+ if not args.pred_model_dir:
+ model_path = os.path.join(output_dir, 'pytorch_model.bin')
+ else:
+ model_path = os.path.join(args.pred_model_dir, 'pytorch_model.bin')
+ model.load_state_dict(torch.load(model_path))
+ tokenizer = tokenizer.from_pretrained(output_dir)
+ model.to(args.device)
+ test(args, model, tokenizer)
+ logger.info("Test Model From: {}".format(model_path))
+ return results
+
+
+if __name__ == "__main__":
+ main()
diff --git a/Text-code/NL-code-search-WebQuery/code/train.sh b/Text-code/NL-code-search-WebQuery/code/train.sh
new file mode 100644
index 0000000000000000000000000000000000000000..e2c6216c7072b65d7c5bb57b653c8e84f57a95e4
--- /dev/null
+++ b/Text-code/NL-code-search-WebQuery/code/train.sh
@@ -0,0 +1,18 @@
+CUDA_VISIBLE_DEVICES=0,1 python run.py \
+--model_type roberta \
+--do_train \
+--do_eval \
+--eval_all_checkpoints \
+--train_file train_codesearchnet_7.json \
+--dev_file dev_codesearchnet.json \
+--max_seq_length 200 \
+--per_gpu_train_batch_size 16 \
+--per_gpu_eval_batch_size 16 \
+--learning_rate 1e-5 \
+--num_train_epochs 3 \
+--gradient_accumulation_steps 1 \
+--warmup_steps 1000 \
+--evaluate_during_training \
+--data_dir ../data/CodeSearchNet/ \
+--output_dir ../model/model_codesearchnet \
+--encoder_name_or_path microsoft/codebert-base
\ No newline at end of file
diff --git a/Text-code/NL-code-search-WebQuery/code/utils.py b/Text-code/NL-code-search-WebQuery/code/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ef1a905cc574152995712b9ebd2c3b7994027da
--- /dev/null
+++ b/Text-code/NL-code-search-WebQuery/code/utils.py
@@ -0,0 +1,136 @@
+# coding=utf-8
+# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" BERT classification fine-tuning: utilities to work with GLUE tasks """
+
+from __future__ import absolute_import, division, print_function
+
+import csv
+import json
+import logging
+import os
+import sys
+from io import open
+from sklearn.metrics import f1_score, precision_score, recall_score
+from torch.utils.data import Dataset
+import torch
+
+csv.field_size_limit(sys.maxsize)
+logger = logging.getLogger(__name__)
+
+
+class InputFeatures(object):
+ """A single training/test features for a example."""
+ def __init__(self, code_tokens, code_ids, nl_tokens, nl_ids, label, idx):
+ self.code_tokens = code_tokens
+ self.code_ids = code_ids
+ self.nl_tokens = nl_tokens
+ self.nl_ids = nl_ids
+ self.label = label
+ self.idx = idx
+
+
+class InputFeaturesTriplet(InputFeatures):
+ """A single training/test features for a example. Add docstring seperately. """
+ def __init__(self, code_tokens, code_ids, nl_tokens, nl_ids, ds_tokens, ds_ids, label, idx):
+ super(InputFeaturesTriplet, self).__init__(code_tokens, code_ids, nl_tokens, nl_ids, label, idx)
+ self.ds_tokens = ds_tokens
+ self.ds_ids = ds_ids
+
+
+def convert_examples_to_features(js, tokenizer, args):
+ # label
+ label = js['label']
+
+ # code
+ code = js['code']
+ code_tokens = tokenizer.tokenize(code)[:args.max_seq_length-2]
+ code_tokens = [tokenizer.cls_token]+code_tokens+[tokenizer.sep_token]
+ code_ids = tokenizer.convert_tokens_to_ids(code_tokens)
+ padding_length = args.max_seq_length - len(code_ids)
+ code_ids += [tokenizer.pad_token_id]*padding_length
+
+ nl = js['doc'] # query
+ nl_tokens = tokenizer.tokenize(nl)[:args.max_seq_length-2]
+ nl_tokens = [tokenizer.cls_token]+nl_tokens+[tokenizer.sep_token]
+ nl_ids = tokenizer.convert_tokens_to_ids(nl_tokens)
+ padding_length = args.max_seq_length - len(nl_ids)
+ nl_ids += [tokenizer.pad_token_id]*padding_length
+
+ return InputFeatures(code_tokens, code_ids, nl_tokens, nl_ids, label, js['idx'])
+
+
+class TextDataset(Dataset):
+ def __init__(self, tokenizer, args, file_path=None, type=None):
+ # json file: dict: idx, query, doc, code
+ self.examples = []
+ self.type = type
+ data=[]
+ with open(file_path, 'r') as f:
+ data = json.load(f)
+ # data = data[:114560]
+ if self.type == 'test':
+ for js in data:
+ js['label'] = 0
+ for js in data:
+ self.examples.append(convert_examples_to_features(js, tokenizer, args))
+ if 'train' in file_path:
+ for idx, example in enumerate(self.examples[:3]):
+ logger.info("*** Example ***")
+ logger.info("idx: {}".format(idx))
+ logger.info("code_tokens: {}".format([x.replace('\u0120','_') for x in example.code_tokens]))
+ logger.info("code_ids: {}".format(' '.join(map(str, example.code_ids))))
+ logger.info("nl_tokens: {}".format([x.replace('\u0120','_') for x in example.nl_tokens]))
+ logger.info("nl_ids: {}".format(' '.join(map(str, example.nl_ids))))
+
+ def __len__(self):
+ return len(self.examples)
+
+ def __getitem__(self, i):
+ """ return both tokenized code ids and nl ids and label"""
+ return torch.tensor(self.examples[i].code_ids), \
+ torch.tensor(self.examples[i].nl_ids),\
+ torch.tensor(self.examples[i].label)
+
+
+
+
+def simple_accuracy(preds, labels):
+ return (preds == labels).mean()
+
+
+def acc_and_f1(preds, labels):
+ acc = simple_accuracy(preds, labels)
+ f1 = f1_score(y_true=labels, y_pred=preds)
+ prec = precision_score(y_true=labels, y_pred=preds)
+ reca = recall_score(y_true=labels, y_pred=preds)
+ return {
+ "acc": acc,
+ "precision": prec,
+ "recall": reca,
+ "f1": f1,
+ "acc_and_f1": (acc + f1) / 2,
+ }
+
+
+def compute_metrics(task_name, preds, labels):
+ assert len(preds) == len(labels)
+ if task_name == "webquery":
+ return acc_and_f1(preds, labels)
+ if task_name == "staqc":
+ return acc_and_f1(preds, labels)
+ else:
+ raise KeyError(task_name)
+
diff --git a/Text-code/NL-code-search-WebQuery/data.zip b/Text-code/NL-code-search-WebQuery/data.zip
new file mode 100644
index 0000000000000000000000000000000000000000..37a923e4415faae2179ebfd73d12eec517ea1bd8
--- /dev/null
+++ b/Text-code/NL-code-search-WebQuery/data.zip
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:24e8ea547211a3830aa3e7ba1246aaf01d6ef6b875f7c2a5e8b4fb0af5bca24b
+size 361434377
diff --git a/Text-code/text-to-code/code/beam.py b/Text-code/text-to-code/code/beam.py
new file mode 100644
index 0000000000000000000000000000000000000000..4b493e55626cd6cb1ab14d099691deb868e32992
--- /dev/null
+++ b/Text-code/text-to-code/code/beam.py
@@ -0,0 +1,118 @@
+import torch
+import torch.nn as nn
+import torch
+from torch.autograd import Variable
+import copy
+
+class Beam(object):
+ def __init__(self, size,sos,eos):
+ self.size = size
+ self.tt = torch.cuda
+ # The score for each translation on the beam.
+ self.scores = self.tt.FloatTensor(size).zero_()
+ # The backpointers at each time-step.
+ self.prevKs = []
+ # The outputs at each time-step.
+ self.nextYs = [self.tt.LongTensor(size)
+ .fill_(0)]
+ self.nextYs[0][:] = sos
+ # Has EOS topped the beam yet.
+ self._eos = eos
+ self.eosTop = False
+ # Time and k pair for finished.
+ self.finished = []
+
+ def getCurrentState(self):
+ "Get the outputs for the current timestep."
+ batch = self.tt.LongTensor(self.nextYs[-1]).view(-1, 1)
+ return batch
+
+ def getCurrentOrigin(self):
+ "Get the backpointers for the current timestep."
+ return self.prevKs[-1]
+
+ def advance(self, wordLk):
+ """
+ Given prob over words for every last beam `wordLk` and attention
+ `attnOut`: Compute and update the beam search.
+
+ Parameters:
+
+ * `wordLk`- probs of advancing from the last step (K x words)
+ * `attnOut`- attention at the last step
+
+ Returns: True if beam search is complete.
+ """
+ numWords = wordLk.size(1)
+
+ # Sum the previous scores.
+ if len(self.prevKs) > 0:
+ beamLk = wordLk + self.scores.unsqueeze(1).expand_as(wordLk)
+
+ # Don't let EOS have children.
+ for i in range(self.nextYs[-1].size(0)):
+ if self.nextYs[-1][i] == self._eos:
+ beamLk[i] = -1e20
+ else:
+ beamLk = wordLk[0]
+ flatBeamLk = beamLk.view(-1)
+ bestScores, bestScoresId = flatBeamLk.topk(self.size, 0, True, True)
+
+ self.scores = bestScores
+
+ # bestScoresId is flattened beam x word array, so calculate which
+ # word and beam each score came from
+ prevK = bestScoresId // numWords
+ self.prevKs.append(prevK)
+ self.nextYs.append((bestScoresId - prevK * numWords))
+
+
+ for i in range(self.nextYs[-1].size(0)):
+ if self.nextYs[-1][i] == self._eos:
+ s = self.scores[i]
+ self.finished.append((s, len(self.nextYs) - 1, i))
+
+ # End condition is when top-of-beam is EOS and no global score.
+ if self.nextYs[-1][0] == self._eos:
+ self.eosTop = True
+
+ def done(self):
+ return self.eosTop and len(self.finished) >=self.size
+
+ def getFinal(self):
+ if len(self.finished) == 0:
+ self.finished.append((self.scores[0], len(self.nextYs) - 1, 0))
+ self.finished.sort(key=lambda a: -a[0])
+ if len(self.finished) != self.size:
+ unfinished=[]
+ for i in range(self.nextYs[-1].size(0)):
+ if self.nextYs[-1][i] != self._eos:
+ s = self.scores[i]
+ unfinished.append((s, len(self.nextYs) - 1, i))
+ unfinished.sort(key=lambda a: -a[0])
+ self.finished+=unfinished[:self.size-len(self.finished)]
+ return self.finished[:self.size]
+
+ def getHyp(self, beam_res):
+ """
+ Walk back to construct the full hypothesis.
+ """
+ hyps=[]
+ for _,timestep, k in beam_res:
+ hyp = []
+ for j in range(len(self.prevKs[:timestep]) - 1, -1, -1):
+ hyp.append(self.nextYs[j+1][k])
+ k = self.prevKs[j][k]
+ hyps.append(hyp[::-1])
+ return hyps
+
+ def buildTargetTokens(self, preds):
+ sentence=[]
+ for pred in preds:
+ tokens = []
+ for tok in pred:
+ if tok==self._eos:
+ break
+ tokens.append(tok)
+ sentence.append(tokens)
+ return sentence
diff --git a/Text-code/text-to-code/code/bleu.py b/Text-code/text-to-code/code/bleu.py
new file mode 100644
index 0000000000000000000000000000000000000000..47e1335796082b5568089150d7799d37c0527ada
--- /dev/null
+++ b/Text-code/text-to-code/code/bleu.py
@@ -0,0 +1,134 @@
+# Copyright 2017 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+"""Python implementation of BLEU and smooth-BLEU.
+
+This module provides a Python implementation of BLEU and smooth-BLEU.
+Smooth BLEU is computed following the method outlined in the paper:
+Chin-Yew Lin, Franz Josef Och. ORANGE: a method for evaluating automatic
+evaluation metrics for machine translation. COLING 2004.
+"""
+
+import collections
+import math
+
+
+def _get_ngrams(segment, max_order):
+ """Extracts all n-grams upto a given maximum order from an input segment.
+
+ Args:
+ segment: text segment from which n-grams will be extracted.
+ max_order: maximum length in tokens of the n-grams returned by this
+ methods.
+
+ Returns:
+ The Counter containing all n-grams upto max_order in segment
+ with a count of how many times each n-gram occurred.
+ """
+ ngram_counts = collections.Counter()
+ for order in range(1, max_order + 1):
+ for i in range(0, len(segment) - order + 1):
+ ngram = tuple(segment[i:i+order])
+ ngram_counts[ngram] += 1
+ return ngram_counts
+
+
+def compute_bleu(reference_corpus, translation_corpus, max_order=4,
+ smooth=False):
+ """Computes BLEU score of translated segments against one or more references.
+
+ Args:
+ reference_corpus: list of lists of references for each translation. Each
+ reference should be tokenized into a list of tokens.
+ translation_corpus: list of translations to score. Each translation
+ should be tokenized into a list of tokens.
+ max_order: Maximum n-gram order to use when computing BLEU score.
+ smooth: Whether or not to apply Lin et al. 2004 smoothing.
+
+ Returns:
+ 3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
+ precisions and brevity penalty.
+ """
+ matches_by_order = [0] * max_order
+ possible_matches_by_order = [0] * max_order
+ reference_length = 0
+ translation_length = 0
+ for (references, translation) in zip(reference_corpus,
+ translation_corpus):
+ reference_length += min(len(r) for r in references)
+ translation_length += len(translation)
+
+ merged_ref_ngram_counts = collections.Counter()
+ for reference in references:
+ merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
+ translation_ngram_counts = _get_ngrams(translation, max_order)
+ overlap = translation_ngram_counts & merged_ref_ngram_counts
+ for ngram in overlap:
+ matches_by_order[len(ngram)-1] += overlap[ngram]
+ for order in range(1, max_order+1):
+ possible_matches = len(translation) - order + 1
+ if possible_matches > 0:
+ possible_matches_by_order[order-1] += possible_matches
+
+ precisions = [0] * max_order
+ for i in range(0, max_order):
+ if smooth:
+ precisions[i] = ((matches_by_order[i] + 1.) /
+ (possible_matches_by_order[i] + 1.))
+ else:
+ if possible_matches_by_order[i] > 0:
+ precisions[i] = (float(matches_by_order[i]) /
+ possible_matches_by_order[i])
+ else:
+ precisions[i] = 0.0
+
+ if min(precisions) > 0:
+ p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
+ geo_mean = math.exp(p_log_sum)
+ else:
+ geo_mean = 0
+
+ ratio = float(translation_length) / reference_length
+
+ if ratio > 1.0:
+ bp = 1.
+ else:
+ bp = math.exp(1 - 1. / ratio)
+
+ bleu = geo_mean * bp
+
+ return (bleu, precisions, bp, ratio, translation_length, reference_length)
+
+
+def _bleu(ref_file, trans_file, subword_option=None):
+ max_order = 4
+ smooth = True
+ ref_files = [ref_file]
+ reference_text = []
+ for reference_filename in ref_files:
+ with open(reference_filename) as fh:
+ reference_text.append(fh.readlines())
+ per_segment_references = []
+ for references in zip(*reference_text):
+ reference_list = []
+ for reference in references:
+ reference_list.append(reference.strip().split())
+ per_segment_references.append(reference_list)
+ translations = []
+ with open(trans_file) as fh:
+ for line in fh:
+ translations.append(line.strip().split())
+ bleu_score, _, _, _, _, _ = compute_bleu(per_segment_references, translations, max_order, smooth)
+ return round(100 * bleu_score,2)
\ No newline at end of file
diff --git a/Text-code/text-to-code/code/dataset.py b/Text-code/text-to-code/code/dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..e2b0db54519397e03a5085fe76fcc0e0b7ec9526
--- /dev/null
+++ b/Text-code/text-to-code/code/dataset.py
@@ -0,0 +1,116 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT License.
+from __future__ import absolute_import, division, print_function
+
+import argparse
+import glob
+import logging
+import os
+import pickle
+import random
+import re
+import gc
+import shutil
+import json
+
+import numpy as np
+import torch
+from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
+from torch.utils.data.distributed import DistributedSampler
+
+try:
+ from torch.utils.tensorboard import SummaryWriter
+except:
+ from tensorboardX import SummaryWriter
+
+from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
+ BertConfig, BertForMaskedLM, BertTokenizer,
+ GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
+ OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
+ RobertaConfig, RobertaForMaskedLM, RobertaTokenizer,
+ DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
+
+
+class concodeDataset(Dataset):
+ def __init__(self, tokenizer, args, logger, file_type='train', block_size=512, mode='train'):
+ if args.local_rank==-1:
+ local_rank=0
+ world_size=1
+ else:
+ local_rank=args.local_rank
+ world_size=torch.distributed.get_world_size()
+
+ self.block_size = block_size
+ self.mode = mode
+
+ if not os.path.exists(args.output_dir):
+ os.makedirs(args.output_dir)
+ cached_file = os.path.join(args.output_dir, file_type+"_blocksize_%d"%(block_size)+"_wordsize_%d"%(world_size)+"_rank_%d"%(local_rank))
+ if mode != 'test' and os.path.exists(cached_file) and not args.overwrite_cache:
+ if file_type == 'train':
+ logger.warning("Loading features from cached file %s", cached_file)
+ with open(cached_file, 'rb') as handle:
+ data = pickle.load(handle)
+ self.inputs = data['inputs']
+ self.token_labels = data['token_labels']
+
+ else:
+ self.inputs = []
+ self.token_labels = []
+
+ datafile = os.path.join(args.data_dir, f"{file_type}.json")
+ if file_type == 'train':
+ logger.warning("Creating features from dataset file at %s", datafile)
+ datas = open(datafile).readlines()
+
+ length = len(datas)
+ logger.info("Data size: %d"%(length))
+ for idx, x in enumerate(datas):
+ if idx % (length//10) == 0:
+ percent = idx / (length//10) * 10
+ logger.warning("Rank %d, load %d"%(local_rank, percent))
+ if idx % world_size != local_rank:
+ continue
+ x = json.loads(x)
+ code = tokenizer.encode(x["code"])
+ nl = tokenizer.encode(x["nl"])
+
+ input_ids, input_labels = self.pad_and_get_mask(code, nl, tokenizer)
+ self.inputs.append(input_ids)
+ self.token_labels.append(input_labels)
+
+ if file_type == 'train':
+ logger.warning("Rank %d Training %d token, %d samples"%(local_rank, length, len(self.inputs)))
+ logger.warning("Saving features into cached file %s", cached_file)
+ if mode != 'test':
+ with open(cached_file, 'wb') as handle:
+ pickle.dump({'inputs': self.inputs, 'token_labels': self.token_labels}, handle, protocol=pickle.HIGHEST_PROTOCOL)
+
+ def pad_and_get_mask(self, code, nl, tokenizer):
+ if self.mode == 'test':
+ code = []
+ while (len(code) + len(nl) + 2 > self.block_size):
+ if (len(code) > len(nl)):
+ code = code[:-1]
+ else:
+ nl = nl[:-1]
+ if self.mode == 'train':
+ inputs = nl + [tokenizer.bos_token_id] + code + [tokenizer.eos_token_id]
+ labels = [1] * len(nl) + [2] * (len(code)+1) + [0]
+ else:
+ inputs = nl + [tokenizer.bos_token_id]
+ labels = [1] * len(nl) + [2]
+ return inputs, labels
+ assert len(inputs) <= self.block_size
+ pad_len = self.block_size - len(inputs)
+ inputs += [tokenizer.pad_token_id] * pad_len
+ labels += [0] * pad_len
+ assert len(inputs) == len(labels)
+ return inputs, labels
+
+
+ def __len__(self):
+ return len(self.inputs)
+
+ def __getitem__(self, item):
+ return torch.tensor(self.inputs[item]), torch.tensor(self.token_labels[item])
diff --git a/Text-code/text-to-code/code/eval.sh b/Text-code/text-to-code/code/eval.sh
new file mode 100644
index 0000000000000000000000000000000000000000..acefab7a7a72205451a31043313e6638d5c58652
--- /dev/null
+++ b/Text-code/text-to-code/code/eval.sh
@@ -0,0 +1,17 @@
+LANG=java
+DATADIR=../dataset
+OUTPUTDIR=../model
+PRETRAINDIR=../model/checkpoint-last
+LOGFILE=text2code_concode_eval.log
+
+CUDA_VISIBLE_DEVICES=0 python run.py \
+ --data_dir=$DATADIR \
+ --langs=$LANG \
+ --output_dir=$OUTPUTDIR \
+ --pretrain_dir=$PRETRAINDIR \
+ --log_file=$LOGFILE \
+ --model_type=gpt2 \
+ --block_size=512 \
+ --do_eval \
+ --logging_steps=100 \
+ --seed=42
\ No newline at end of file
diff --git a/Text-code/text-to-code/code/evaluate.sh b/Text-code/text-to-code/code/evaluate.sh
new file mode 100644
index 0000000000000000000000000000000000000000..fae379e830801a3aca3c56ee3f9fd2b259bc157d
--- /dev/null
+++ b/Text-code/text-to-code/code/evaluate.sh
@@ -0,0 +1,3 @@
+python evaluator.py \
+-a=../dataset/dev.json \
+-p=../model/dev.output
\ No newline at end of file
diff --git a/Text-code/text-to-code/code/evaluator.py b/Text-code/text-to-code/code/evaluator.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed57ee70de10077b73d206683505dc9ba3e6ea18
--- /dev/null
+++ b/Text-code/text-to-code/code/evaluator.py
@@ -0,0 +1,42 @@
+# Copyright (c) Microsoft Corporation.
+# Licensed under the MIT license.
+import os
+import logging
+import argparse
+from bleu import _bleu
+import json
+
+logger = logging.getLogger(__name__)
+logging.basicConfig(level=logging.INFO)
+
+def main():
+ parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for code completion (line level).')
+ parser.add_argument('--answers', '-a', required=True, help="filename of the labels, in json format.")
+ parser.add_argument('--predictions', '-p', required=True, help="filename of the leaderboard predictions, in txt format.")
+ args = parser.parse_args()
+
+ preds = open(args.predictions, "r").readlines()
+ gts = open(args.answers, "r").readlines()
+
+ assert len(preds) == len(gts), f"Samples of predictions and answers are not equal, {len(preds)}: {len(gts)}"
+
+ total = len(gts)
+ EM = 0.0
+ with open("ground_truth.txt", "w") as wf:
+ for pred, gt in zip(preds, gts):
+ pred = pred.strip()
+ gt = json.loads(gt)["code"]
+ wf.write(gt+"\n")
+ if pred.split() == gt.split():
+ EM += 1
+
+ bleu_score = round(_bleu("ground_truth.txt", args.predictions), 2)
+ logger.info(f"BLEU: {bleu_score}, EM: {round(EM/total*100, 2)}")
+
+ try:
+ os.remove("ground_truth.txt")
+ except Exception:
+ pass
+
+if __name__ == "__main__":
+ main()
diff --git a/Text-code/text-to-code/code/run.py b/Text-code/text-to-code/code/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..98901742228fe36492689af682f13932298c9016
--- /dev/null
+++ b/Text-code/text-to-code/code/run.py
@@ -0,0 +1,673 @@
+# coding=utf-8
+# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Text to code generation pipeline in CodeXGLUE
+"""
+
+from __future__ import absolute_import, division, print_function
+
+import argparse
+import glob
+import logging
+import os
+import pickle
+import random
+import re
+import shutil
+import json
+
+import numpy as np
+import torch
+from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
+from torch.utils.data.distributed import DistributedSampler
+from dataset import concodeDataset
+from beam import Beam
+
+try:
+ from torch.utils.tensorboard import SummaryWriter
+except:
+ from tensorboardX import SummaryWriter
+
+from torch.nn import CrossEntropyLoss
+
+from bleu import _bleu
+from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
+ BertConfig, BertForMaskedLM, BertTokenizer,
+ GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
+ OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
+ RobertaConfig, RobertaForMaskedLM, RobertaTokenizer,
+ DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
+
+logger = logging.getLogger(__name__)
+
+MODEL_CLASSES = {
+ 'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
+ 'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
+ 'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
+ 'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
+ 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
+}
+
+
+
+def load_and_cache_examples(args, tokenizer, evaluate=False):
+ dataset = concodeDataset(tokenizer, args, logger, file_type='dev' if evaluate else 'train',
+ block_size=args.block_size)
+ return dataset
+
+
+def set_seed(args):
+ random.seed(args.seed)
+ np.random.seed(args.seed)
+ torch.manual_seed(args.seed)
+ if args.n_gpu > 0:
+ torch.cuda.manual_seed_all(args.seed)
+
+
+def update_config(model, tokenizer):
+ model.config.bos_token_id = tokenizer.bos_token_id
+ model.config.eos_token_id = tokenizer.eos_token_id
+ model.config.pad_token_id = tokenizer.pad_token_id
+
+
+def train(args, train_dataset, model, tokenizer, fh, pool):
+ """ Train the model """
+ if args.local_rank in [-1, 0]:
+ args.tensorboard_dir = os.path.join(args.output_dir, 'tensorboard')
+ if not os.path.exists(args.tensorboard_dir):
+ os.makedirs(args.tensorboard_dir)
+ tb_writer = SummaryWriter(args.tensorboard_dir)
+
+ args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
+ train_sampler = RandomSampler(train_dataset)
+
+ train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.batch_size, drop_last=True)
+ total_examples = len(train_dataset) * (
+ torch.distributed.get_world_size() if args.local_rank != -1 else 1)
+ batch_size = args.batch_size * args.gradient_accumulation_steps * (
+ torch.distributed.get_world_size() if args.local_rank != -1 else 1)
+ # if args.max_steps > 0:
+ # t_total = args.max_steps
+ # args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
+ if args.num_train_epochs > 0:
+ t_total = total_examples // batch_size * args.num_train_epochs
+ args.max_steps = t_total
+ model.to(args.device)
+ if args.local_rank not in [-1, 0]:
+ torch.distributed.barrier()
+ # Prepare optimizer and schedule (linear warmup and decay)
+ no_decay = ['bias', 'LayerNorm.weight']
+ optimizer_grouped_parameters = [
+ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
+ 'weight_decay': args.weight_decay},
+ {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
+ ]
+ optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
+ scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,
+ num_training_steps=t_total)
+ checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
+ scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt')
+ optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt')
+ if os.path.exists(scheduler_last):
+ scheduler.load_state_dict(torch.load(scheduler_last, map_location="cpu"))
+ if os.path.exists(optimizer_last):
+ optimizer.load_state_dict(torch.load(optimizer_last, map_location="cpu"))
+ if args.local_rank == 0:
+ torch.distributed.barrier()
+ if args.fp16:
+ try:
+ from apex import amp
+ except ImportError:
+ raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
+ model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
+
+ # multi-gpu training (should be after apex fp16 initialization)
+ if args.n_gpu > 1:
+ model = torch.nn.DataParallel(model)
+
+ # Distributed training (should be after apex fp16 initialization)
+ if args.local_rank != -1:
+ model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank%args.gpu_per_node],
+ output_device=args.local_rank%args.gpu_per_node,
+ find_unused_parameters=True)
+
+ # Train!
+ logger.info("***** Running training *****")
+ logger.info(" Num examples = %d", total_examples )
+ logger.info(" Num epoch = %d", t_total*batch_size//total_examples)
+ logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
+ logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", batch_size)
+ logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
+ logger.info(" Total optimization steps = %d", t_total)
+
+ global_step = args.start_step
+ tr_loss, logging_loss,avg_loss,tr_nb = 0.0, 0.0,0.0,0
+ # model.resize_token_embeddings(len(tokenizer))
+ model.zero_grad()
+ set_seed(args) # Added here for reproducibility (even between python 2 and 3)
+
+ best_bleu = 0.0
+
+ for idx in range(args.start_epoch, int(args.num_train_epochs)):
+ for step, (batch, token_labels) in enumerate(train_dataloader):
+ inputs = batch.to(args.device)
+ attn_mask = torch.tensor(token_labels.clone().detach() != 0, dtype=torch.uint8, device=args.device)
+ loss_mask = torch.tensor(token_labels.clone().detach() == 2, dtype=torch.uint8, device=args.device)
+ model.train()
+ # outputs = model(inputs, attention_mask=attn_mask, labels=inputs, loss_mask=loss_mask)
+ # loss = outputs[0]
+ outputs = model(inputs, attention_mask=attn_mask)
+ logits = outputs[0]
+ labels = inputs
+ shift_logits = logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ # Flatten the tokens
+ loss_fct = CrossEntropyLoss()
+ flatten_shift_loss_mask = loss_mask[..., :-1].contiguous().view(-1)
+ ids = torch.nonzero(flatten_shift_loss_mask).view(-1)
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1))[ids], shift_labels.view(-1)[ids])
+
+ if args.n_gpu > 1:
+ loss = loss.mean() # mean() to average on multi-gpu parallel training
+ if args.gradient_accumulation_steps > 1:
+ loss = loss / args.gradient_accumulation_steps
+
+ if args.fp16:
+ with amp.scale_loss(loss, optimizer) as scaled_loss:
+ scaled_loss.backward()
+ torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
+ else:
+ loss.backward()
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
+
+ tr_loss += loss.item()
+
+ if (step + 1) % args.gradient_accumulation_steps == 0:
+ optimizer.step()
+ optimizer.zero_grad()
+ scheduler.step()
+ global_step += 1
+ output_flag=True
+ avg_loss=round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4)
+ if global_step % args.logging_steps == 0:
+ logger.info(" steps: %s ppl: %s", global_step, round(avg_loss,5))
+ if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
+ # Log metrics
+ tb_writer.add_scalar('lr', scheduler.get_last_lr()[0], global_step)
+ tb_writer.add_scalar('loss', (tr_loss - logging_loss) / args.logging_steps, global_step)
+ logging_loss = tr_loss
+ tr_nb=global_step
+
+ if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
+ checkpoint_prefix = "checkpoint"
+ # Save model checkpoint
+ if args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
+ results = evaluate(args, model, tokenizer, eval_when_training=True)
+ for key, value in results.items():
+ tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
+ logger.info(" %s = %s", key, round(value,4))
+ output_dir = os.path.join(args.output_dir, '{}-{}-{}'.format(checkpoint_prefix, global_step, round(results['perplexity'],4)))
+ # dev_bleu, dev_EM = eval_bleu(args, model, tokenizer, file_type='dev', num=100)
+ # logger.info(f"dev bleu: {dev_bleu}, dev EM: {dev_EM}")
+ # output_dir = os.path.join(args.output_dir, '{}-{}-{}'.format(checkpoint_prefix, global_step, round(dev_bleu,2)))
+ # if dev_bleu > best_bleu:
+ # best_bleu = dev_bleu
+ # logger.info(f"best bleu updated. saved in {output_dir}")
+ # logger.info(f"best bleu: {best_bleu}")
+ else:
+ output_dir = os.path.join(args.output_dir, "{}-{}".format(checkpoint_prefix, global_step))
+ if not os.path.exists(output_dir):
+ os.makedirs(output_dir)
+ model_to_save = (
+ model.module if hasattr(model, "module") else model
+ ) # Take care of distributed/parallel training
+ model_to_save.save_pretrained(output_dir)
+ tokenizer.save_pretrained(output_dir)
+
+ torch.save(args, os.path.join(output_dir, "training_args.bin"))
+ logger.info("Saving model checkpoint to %s", output_dir)
+
+ # _rotate_checkpoints(args, checkpoint_prefix)
+ last_output_dir = os.path.join(args.output_dir, 'checkpoint-last')
+ if not os.path.exists(last_output_dir):
+ os.makedirs(last_output_dir)
+ model_to_save.save_pretrained(last_output_dir)
+ tokenizer.save_pretrained(last_output_dir)
+ idx_file = os.path.join(last_output_dir, 'idx_file.txt')
+ with open(idx_file, 'w', encoding='utf-8') as idxf:
+ idxf.write(str(0) + '\n')
+
+ torch.save(optimizer.state_dict(), os.path.join(last_output_dir, "optimizer.pt"))
+ torch.save(scheduler.state_dict(), os.path.join(last_output_dir, "scheduler.pt"))
+ logger.info("Saving optimizer and scheduler states to %s", last_output_dir)
+
+ step_file = os.path.join(last_output_dir, 'step_file.txt')
+ with open(step_file, 'w', encoding='utf-8') as stepf:
+ stepf.write(str(global_step) + '\n')
+
+ # torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
+ # torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
+ # logger.info("Saving optimizer and scheduler states to %s", output_dir)
+
+
+ if args.max_steps > 0 and global_step > args.max_steps:
+ break
+ if args.max_steps > 0 and global_step > args.max_steps:
+ break
+
+ # 每一轮记录checkpoint
+ output_dir = os.path.join(args.output_dir, 'epoch_{}'.format(idx+1))
+ if not os.path.exists(output_dir):
+ os.makedirs(output_dir)
+ model_to_save = model.module if hasattr(model, 'module') else model
+ ckpt_output_path = os.path.join(output_dir, 'subject_model.pth')
+ logger.info("Saving model checkpoint to %s", ckpt_output_path)
+ torch.save(model_to_save.state_dict(), ckpt_output_path)
+
+ if args.local_rank in [-1, 0]:
+ tb_writer.close()
+
+ return global_step, tr_loss / global_step
+
+
+def evaluate(args, model, tokenizer, prefix="", eval_when_training=False):
+ # Loop to handle MNLI double evaluation (matched, mis-matched)
+ eval_output_dir = args.output_dir
+
+ eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True)
+
+ if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
+ os.makedirs(eval_output_dir)
+
+ args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
+ # Note that DistributedSampler samples randomly
+ eval_sampler = SequentialSampler(eval_dataset)
+ eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
+
+ # multi-gpu evaluate
+ if args.n_gpu > 1 and eval_when_training is False:
+ model = torch.nn.DataParallel(model)
+
+ # Eval!
+ #logger.info("***** Running evaluation {} *****".format(prefix))
+ #logger.info(" Num examples = %d", len(eval_dataset))
+ #logger.info(" Batch size = %d", args.eval_batch_size)
+ eval_loss = 0.0
+ nb_eval_steps = 0
+ model.eval()
+
+ for step, (batch, token_labels) in enumerate(eval_dataloader):
+
+ inputs = batch.to(args.device)
+ attn_mask = torch.tensor(token_labels.clone().detach() != 0, dtype=torch.uint8, device=args.device)
+ loss_mask = torch.tensor(token_labels.clone().detach() == 2, dtype=torch.uint8, device=args.device)
+ with torch.no_grad():
+ outputs = model(inputs, attention_mask=attn_mask)
+ logits = outputs[0]
+ labels = inputs
+ shift_logits = logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ # Flatten the tokens
+ loss_fct = CrossEntropyLoss()
+ flatten_shift_loss_mask = loss_mask[..., :-1].contiguous().view(-1)
+ ids = torch.nonzero(flatten_shift_loss_mask).view(-1)
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1))[ids], shift_labels.view(-1)[ids])
+ eval_loss += loss.mean().item()
+ nb_eval_steps += 1
+
+ # inputs = batch.to(args.device)
+ # attn_mask = torch.tensor(token_labels.clone().detach() != 0, dtype=torch.uint8, device=args.device)
+ # loss_mask = torch.tensor(token_labels.clone().detach() == 2, dtype=torch.uint8, device=args.device)
+ # with torch.no_grad():
+ # outputs = model(inputs, attention_mask=attn_mask, labels=inputs, loss_mask=loss_mask)
+ # loss = outputs[0]
+ # eval_loss += loss.mean().item()
+ # nb_eval_steps += 1
+
+ eval_loss = eval_loss / nb_eval_steps
+ perplexity = torch.exp(torch.tensor(eval_loss))
+
+ result = {
+ "perplexity": float(perplexity)
+ }
+
+ output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
+ with open(output_eval_file, "w") as writer:
+ #logger.info("***** Eval results {} *****".format(prefix))
+ for key in sorted(result.keys()):
+ #logger.info(" %s = %s", key, str(result[key]))
+ writer.write("%s = %s\n" % (key, str(result[key])))
+
+ return result
+
+def eval_bleu(args, model, tokenizer, file_type='test', num=2000):
+ dataset = concodeDataset(tokenizer, args, logger, file_type=file_type, block_size=args.block_size, mode='test')
+ test_sampler = SequentialSampler(dataset)
+ test_dataloader = DataLoader(dataset, sampler=test_sampler, batch_size=1)
+ model.to(args.device)
+ model.zero_grad()
+ model.eval()
+
+ preds = []
+ max_gen_len = 100
+ for step, (batch, token_labels) in enumerate(test_dataloader):
+ if step >= num:
+ break
+ inputs = batch.to(args.device)
+ # with torch.no_grad():
+ # outputs = model.generate(inputs, max_length=args.block_size, num_beams=10, temperature=0.7, early_stopping=False, top_k=70, \
+ # bos_token_id=tokenizer.bos_token_id, eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id)
+ # # outputs = model.generate(inputs, max_length=args.block_size, do_sample=True, temperature=0.7, top_k=70, top_p=0.95, \
+ # # bos_token_id=tokenizer.bos_token_id, eos_token_id=tokenizer.pad_token_id, pad_token_id=tokenizer.pad_token_id)
+ # # outputs = model.generate(inputs, max_length=args.block_size, num_beams=10, temperature=0.7, early_stopping=False, top_k=70)
+ # # outputs = model.generate(inputs, max_length=args.block_size, do_sample=True, temperature=0.7, top_k=70, top_p=0.95)
+ # generation = tokenizer.decode(outputs[0])[len(tokenizer.decode(inputs[0])):]
+ # preds.append(generation.rstrip("