RRFRRF commited on
Commit
dee113c
0 Parent(s):

init commit without .pth

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +55 -0
  2. Code-Code/Clone-detection-BigCloneBench/code/eval.sh +19 -0
  3. Code-Code/Clone-detection-BigCloneBench/code/evaluate.sh +3 -0
  4. Code-Code/Clone-detection-BigCloneBench/code/evaluator.py +53 -0
  5. Code-Code/Clone-detection-BigCloneBench/code/model.py +62 -0
  6. Code-Code/Clone-detection-BigCloneBench/code/run.py +649 -0
  7. Code-Code/Clone-detection-BigCloneBench/code/train.log +0 -0
  8. Code-Code/Clone-detection-BigCloneBench/code/train.sh +18 -0
  9. Code-Code/Clone-detection-BigCloneBench/dataset.zip +3 -0
  10. Code-Code/Clone-detection-POJ-104/code/eval.sh +17 -0
  11. Code-Code/Clone-detection-POJ-104/code/evaluate.sh +6 -0
  12. Code-Code/Clone-detection-POJ-104/code/evaluator.py +64 -0
  13. Code-Code/Clone-detection-POJ-104/code/extract_answers.py +39 -0
  14. Code-Code/Clone-detection-POJ-104/code/model.py +48 -0
  15. Code-Code/Clone-detection-POJ-104/code/run.py +632 -0
  16. Code-Code/Clone-detection-POJ-104/code/test.sh +17 -0
  17. Code-Code/Clone-detection-POJ-104/code/train.sh +18 -0
  18. Code-Code/Clone-detection-POJ-104/dataset.zip +3 -0
  19. Code-Code/CodeCompletion-token/code/beam.py +118 -0
  20. Code-Code/CodeCompletion-token/code/dataset.py +261 -0
  21. Code-Code/CodeCompletion-token/code/eval.sh +20 -0
  22. Code-Code/CodeCompletion-token/code/evaluate.sh +3 -0
  23. Code-Code/CodeCompletion-token/code/evaluator.py +36 -0
  24. Code-Code/CodeCompletion-token/code/model.py +68 -0
  25. Code-Code/CodeCompletion-token/code/run_lm.py +728 -0
  26. Code-Code/CodeCompletion-token/code/train.sh +31 -0
  27. Code-Code/CodeCompletion-token/data.zip +3 -0
  28. Code-Code/Defect-detection/code/eval.sh +18 -0
  29. Code-Code/Defect-detection/code/evaluate.sh +1 -0
  30. Code-Code/Defect-detection/code/evaluator.py +52 -0
  31. Code-Code/Defect-detection/code/model.py +45 -0
  32. Code-Code/Defect-detection/code/run.py +598 -0
  33. Code-Code/Defect-detection/code/train.sh +17 -0
  34. Code-Code/Defect-detection/dataset.zip +3 -0
  35. Code-Code/code-refinement/code/bleu.py +134 -0
  36. Code-Code/code-refinement/code/eval.sh +17 -0
  37. Code-Code/code-refinement/code/evaluate.sh +3 -0
  38. Code-Code/code-refinement/code/evaluator.py +35 -0
  39. Code-Code/code-refinement/code/model.py +223 -0
  40. Code-Code/code-refinement/code/run.py +575 -0
  41. Code-Code/code-refinement/code/train.sh +22 -0
  42. Code-Code/code-refinement/dataset.zip +3 -0
  43. Code-Text/code-to-text/code/bleu.py +200 -0
  44. Code-Text/code-to-text/code/evaluate.sh +6 -0
  45. Code-Text/code-to-text/code/evaluator.py +200 -0
  46. Code-Text/code-to-text/code/model.py +222 -0
  47. Code-Text/code-to-text/code/run.py +544 -0
  48. Code-Text/code-to-text/code/test.sh +22 -0
  49. Code-Text/code-to-text/code/train.sh +28 -0
  50. Code-Text/code-to-text/data.zip +3 -0
.gitattributes ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar filter=lfs diff=lfs merge=lfs -text
30
+ *.tflite filter=lfs diff=lfs merge=lfs -text
31
+ *.tgz filter=lfs diff=lfs merge=lfs -text
32
+ *.wasm filter=lfs diff=lfs merge=lfs -text
33
+ *.xz filter=lfs diff=lfs merge=lfs -text
34
+ *.zip filter=lfs diff=lfs merge=lfs -text
35
+ *.zst filter=lfs diff=lfs merge=lfs -text
36
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
37
+ # Audio files - uncompressed
38
+ *.pcm filter=lfs diff=lfs merge=lfs -text
39
+ *.sam filter=lfs diff=lfs merge=lfs -text
40
+ *.raw filter=lfs diff=lfs merge=lfs -text
41
+ # Audio files - compressed
42
+ *.aac filter=lfs diff=lfs merge=lfs -text
43
+ *.flac filter=lfs diff=lfs merge=lfs -text
44
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
45
+ *.ogg filter=lfs diff=lfs merge=lfs -text
46
+ *.wav filter=lfs diff=lfs merge=lfs -text
47
+ # Image files - uncompressed
48
+ *.bmp filter=lfs diff=lfs merge=lfs -text
49
+ *.gif filter=lfs diff=lfs merge=lfs -text
50
+ *.png filter=lfs diff=lfs merge=lfs -text
51
+ *.tiff filter=lfs diff=lfs merge=lfs -text
52
+ # Image files - compressed
53
+ *.jpg filter=lfs diff=lfs merge=lfs -text
54
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
55
+ *.webp filter=lfs diff=lfs merge=lfs -text
Code-Code/Clone-detection-BigCloneBench/code/eval.sh ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CUDA_VISIBLE_DEVICES=0,1 python run.py \
2
+ --output_dir=../model \
3
+ --model_type=roberta \
4
+ --config_name=microsoft/codebert-base \
5
+ --model_name_or_path=microsoft/codebert-base \
6
+ --tokenizer_name=roberta-base \
7
+ --do_eval \
8
+ --do_test \
9
+ --train_data_file=../dataset/train.txt \
10
+ --eval_data_file=../dataset/valid.txt \
11
+ --test_data_file=../dataset/valid.txt \
12
+ --epoch 2 \
13
+ --block_size 400 \
14
+ --train_batch_size 16 \
15
+ --eval_batch_size 32 \
16
+ --learning_rate 5e-5 \
17
+ --max_grad_norm 1.0 \
18
+ --evaluate_during_training \
19
+ --seed 123456
Code-Code/Clone-detection-BigCloneBench/code/evaluate.sh ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ python evaluator.py \
2
+ -a ../dataset/valid.txt \
3
+ -p ../model/predictions.txt
Code-Code/Clone-detection-BigCloneBench/code/evaluator.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # Licensed under the MIT license.
3
+ import logging
4
+ import sys
5
+ from sklearn.metrics import recall_score,precision_score,f1_score
6
+
7
+ def read_answers(filename):
8
+ answers={}
9
+ with open(filename) as f:
10
+ for line in f:
11
+ line=line.strip()
12
+ idx1,idx2,label=line.split()
13
+ answers[(idx1,idx2)]=int(label)
14
+ return answers
15
+
16
+ def read_predictions(filename):
17
+ predictions={}
18
+ with open(filename) as f:
19
+ for line in f:
20
+ line=line.strip()
21
+ idx1,idx2,label=line.split()
22
+ predictions[(idx1,idx2)]=int(label)
23
+ return predictions
24
+
25
+ def calculate_scores(answers,predictions):
26
+ y_trues,y_preds=[],[]
27
+ for key in answers:
28
+ if key not in predictions:
29
+ logging.error("Missing prediction for ({},{}) pair.".format(key[0],key[1]))
30
+ sys.exit()
31
+ y_trues.append(answers[key])
32
+ y_preds.append(predictions[key])
33
+ scores={}
34
+ scores['Recall']=recall_score(y_trues, y_preds)
35
+ scores['Precision']=precision_score(y_trues, y_preds)
36
+ scores['F1']=f1_score(y_trues, y_preds)
37
+ return scores
38
+
39
+ def main():
40
+ import argparse
41
+ parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for BigCloneBench dataset.')
42
+ parser.add_argument('--answers', '-a',help="filename of the labels, in txt format.")
43
+ parser.add_argument('--predictions', '-p',help="filename of the leaderboard predictions, in txt format.")
44
+
45
+
46
+ args = parser.parse_args()
47
+ answers=read_answers(args.answers)
48
+ predictions=read_predictions(args.predictions)
49
+ scores=calculate_scores(answers,predictions)
50
+ print(scores)
51
+
52
+ if __name__ == '__main__':
53
+ main()
Code-Code/Clone-detection-BigCloneBench/code/model.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # Licensed under the MIT license.
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch
6
+ from torch.autograd import Variable
7
+ import copy
8
+ import torch.nn.functional as F
9
+ from torch.nn import CrossEntropyLoss, MSELoss
10
+
11
+ class RobertaClassificationHead(nn.Module):
12
+ """Head for sentence-level classification tasks."""
13
+
14
+ def __init__(self, config):
15
+ super().__init__()
16
+ self.dense = nn.Linear(config.hidden_size*2, config.hidden_size)
17
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
18
+ self.out_proj = nn.Linear(config.hidden_size, 2)
19
+
20
+ def forward(self, features, **kwargs):
21
+ x = features[:, 0, :] # take <s> token (equiv. to [CLS])
22
+ x = x.reshape(-1,x.size(-1)*2)
23
+ x = self.dropout(x)
24
+ x = self.dense(x)
25
+ x = torch.tanh(x)
26
+ x = self.dropout(x)
27
+ x = self.out_proj(x)
28
+ return x
29
+
30
+ class Model(nn.Module):
31
+ def __init__(self, encoder,config,tokenizer,args):
32
+ super(Model, self).__init__()
33
+ self.encoder = encoder
34
+ self.config=config
35
+ self.tokenizer=tokenizer
36
+ self.classifier=RobertaClassificationHead(config)
37
+ self.args=args
38
+
39
+
40
+ def forward(self, input_ids=None,labels=None, return_vec=None):
41
+ input_ids=input_ids.view(-1,self.args.block_size)
42
+ outputs = self.encoder(input_ids= input_ids,attention_mask=input_ids.ne(1))
43
+
44
+ if return_vec:
45
+ return outputs.pooler_output
46
+
47
+ outputs = outputs[0]
48
+ logits=self.classifier(outputs)
49
+ prob=F.softmax(logits)
50
+
51
+ if labels is not None:
52
+ loss_fct = CrossEntropyLoss()
53
+ loss = loss_fct(logits, labels)
54
+ return loss,prob
55
+ else:
56
+ return prob
57
+
58
+
59
+
60
+
61
+
62
+
Code-Code/Clone-detection-BigCloneBench/code/run.py ADDED
@@ -0,0 +1,649 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
18
+ GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
19
+ using a masked language modeling (MLM) loss.
20
+ """
21
+
22
+ from __future__ import absolute_import, division, print_function
23
+
24
+ import argparse
25
+ import glob
26
+ import logging
27
+ import os
28
+ import pickle
29
+ import random
30
+ import re
31
+ import shutil
32
+ import json
33
+ import numpy as np
34
+ import torch
35
+ from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
36
+ from torch.utils.data.distributed import DistributedSampler
37
+
38
+ try:
39
+ from torch.utils.tensorboard import SummaryWriter
40
+ except:
41
+ from tensorboardX import SummaryWriter
42
+
43
+ from tqdm import tqdm, trange
44
+ import multiprocessing
45
+ from model import Model
46
+
47
+ cpu_cont = 16
48
+ from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
49
+ BertConfig, BertForMaskedLM, BertTokenizer,
50
+ GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
51
+ OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
52
+ RobertaConfig, RobertaModel, RobertaTokenizer,
53
+ DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
54
+
55
+ logger = logging.getLogger(__name__)
56
+
57
+ MODEL_CLASSES = {
58
+ 'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
59
+ 'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
60
+ 'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
61
+ 'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer),
62
+ 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
63
+ }
64
+
65
+ def get_example(item):
66
+ url1,url2,label,tokenizer,args,cache,url_to_code=item
67
+ if url1 in cache:
68
+ code1=cache[url1].copy()
69
+ else:
70
+ try:
71
+ code=' '.join(url_to_code[url1].split())
72
+ except:
73
+ code=""
74
+ code1=tokenizer.tokenize(code)
75
+ if url2 in cache:
76
+ code2=cache[url2].copy()
77
+ else:
78
+ try:
79
+ code=' '.join(url_to_code[url2].split())
80
+ except:
81
+ code=""
82
+ code2=tokenizer.tokenize(code)
83
+
84
+ return convert_examples_to_features(code1,code2,label,url1,url2,tokenizer,args,cache)
85
+
86
+
87
+ class InputFeatures(object):
88
+ """A single training/test features for a example."""
89
+ def __init__(self,
90
+ input_tokens,
91
+ input_ids,
92
+ label,
93
+ url1,
94
+ url2
95
+
96
+ ):
97
+ self.input_tokens = input_tokens
98
+ self.input_ids = input_ids
99
+ self.label=label
100
+ self.url1=url1
101
+ self.url2=url2
102
+
103
+ def convert_examples_to_features(code1_tokens,code2_tokens,label,url1,url2,tokenizer,args,cache):
104
+ #source
105
+ code1_tokens=code1_tokens[:args.block_size-2]
106
+ code1_tokens =[tokenizer.cls_token]+code1_tokens+[tokenizer.sep_token]
107
+ code2_tokens=code2_tokens[:args.block_size-2]
108
+ code2_tokens =[tokenizer.cls_token]+code2_tokens+[tokenizer.sep_token]
109
+
110
+ code1_ids=tokenizer.convert_tokens_to_ids(code1_tokens)
111
+ padding_length = args.block_size - len(code1_ids)
112
+ code1_ids+=[tokenizer.pad_token_id]*padding_length
113
+
114
+ code2_ids=tokenizer.convert_tokens_to_ids(code2_tokens)
115
+ padding_length = args.block_size - len(code2_ids)
116
+ code2_ids+=[tokenizer.pad_token_id]*padding_length
117
+
118
+ source_tokens=code1_tokens+code2_tokens
119
+ source_ids=code1_ids+code2_ids
120
+ return InputFeatures(source_tokens,source_ids,label,url1,url2)
121
+
122
+ class TextDataset(Dataset):
123
+ def __init__(self, tokenizer, args, file_path='train', block_size=512,pool=None):
124
+ postfix=file_path.split('/')[-1].split('.txt')[0]
125
+ self.examples = []
126
+ index_filename=file_path
127
+ logger.info("Creating features from index file at %s ", index_filename)
128
+ url_to_code={}
129
+ with open('/'.join(index_filename.split('/')[:-1])+'/data.jsonl') as f:
130
+ for line in f:
131
+ line=line.strip()
132
+ js=json.loads(line)
133
+ url_to_code[js['idx']]=js['func']
134
+
135
+ data=[]
136
+ cache={}
137
+ f=open(index_filename)
138
+ with open(index_filename) as f:
139
+ for line in f:
140
+ line=line.strip()
141
+ url1,url2,label=line.split('\t')
142
+ if url1 not in url_to_code or url2 not in url_to_code:
143
+ continue
144
+ if label=='0':
145
+ label=0
146
+ else:
147
+ label=1
148
+ data.append((url1,url2,label,tokenizer, args,cache,url_to_code))
149
+ if 'test' not in postfix:
150
+ data=random.sample(data,int(len(data)*0.1))
151
+
152
+ self.examples=pool.map(get_example,tqdm(data,total=len(data)))
153
+ if 'train' in postfix:
154
+ for idx, example in enumerate(self.examples[:3]):
155
+ logger.info("*** Example ***")
156
+ logger.info("idx: {}".format(idx))
157
+ logger.info("label: {}".format(example.label))
158
+ logger.info("input_tokens: {}".format([x.replace('\u0120','_') for x in example.input_tokens]))
159
+ logger.info("input_ids: {}".format(' '.join(map(str, example.input_ids))))
160
+
161
+
162
+
163
+ def __len__(self):
164
+ return len(self.examples)
165
+
166
+ def __getitem__(self, item):
167
+
168
+ return torch.tensor(self.examples[item].input_ids),torch.tensor(self.examples[item].label)
169
+
170
+
171
+ def load_and_cache_examples(args, tokenizer, evaluate=False,test=False,pool=None):
172
+ dataset = TextDataset(tokenizer, args, file_path=args.test_data_file if test else (args.eval_data_file if evaluate else args.train_data_file),block_size=args.block_size,pool=pool)
173
+ return dataset
174
+
175
+ def set_seed(seed=42):
176
+ random.seed(seed)
177
+ os.environ['PYHTONHASHSEED'] = str(seed)
178
+ np.random.seed(seed)
179
+ torch.manual_seed(seed)
180
+ torch.cuda.manual_seed(seed)
181
+ torch.backends.cudnn.deterministic = True
182
+
183
+ def train(args, train_dataset, model, tokenizer,pool):
184
+ """ Train the model """
185
+
186
+ args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
187
+ train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
188
+
189
+ train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
190
+ args.max_steps=args.epoch*len( train_dataloader)
191
+ args.save_steps=len( train_dataloader)
192
+ args.warmup_steps=len( train_dataloader)
193
+ args.logging_steps=len( train_dataloader)
194
+ args.num_train_epochs=args.epoch
195
+ model.to(args.device)
196
+ # Prepare optimizer and schedule (linear warmup and decay)
197
+ no_decay = ['bias', 'LayerNorm.weight']
198
+ optimizer_grouped_parameters = [
199
+ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
200
+ 'weight_decay': args.weight_decay},
201
+ {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
202
+ ]
203
+ optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
204
+ scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,
205
+ num_training_steps=args.max_steps)
206
+ if args.fp16:
207
+ try:
208
+ from apex import amp
209
+ except ImportError:
210
+ raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
211
+ model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
212
+
213
+ # multi-gpu training (should be after apex fp16 initialization)
214
+ if args.n_gpu > 1:
215
+ model = torch.nn.DataParallel(model)
216
+
217
+ # Distributed training (should be after apex fp16 initialization)
218
+ if args.local_rank != -1:
219
+ model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
220
+ output_device=args.local_rank,
221
+ find_unused_parameters=True)
222
+
223
+ checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
224
+ scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt')
225
+ optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt')
226
+ if os.path.exists(scheduler_last):
227
+ scheduler.load_state_dict(torch.load(scheduler_last))
228
+ if os.path.exists(optimizer_last):
229
+ optimizer.load_state_dict(torch.load(optimizer_last))
230
+ # Train!
231
+ logger.info("***** Running training *****")
232
+ logger.info(" Num examples = %d", len(train_dataset))
233
+ logger.info(" Num Epochs = %d", args.num_train_epochs)
234
+ logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
235
+ logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
236
+ args.train_batch_size * args.gradient_accumulation_steps * (
237
+ torch.distributed.get_world_size() if args.local_rank != -1 else 1))
238
+ logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
239
+ logger.info(" Total optimization steps = %d", args.max_steps)
240
+
241
+ global_step = args.start_step
242
+ tr_loss, logging_loss,avg_loss,tr_nb,tr_num,train_loss = 0.0, 0.0,0.0,0,0,0
243
+ best_mrr=0.0
244
+ best_f1=0
245
+ # model.resize_token_embeddings(len(tokenizer))
246
+ model.zero_grad()
247
+ set_seed(args.seed) # Added here for reproducibility (even between python 2 and 3)
248
+
249
+ for idx in range(args.start_epoch, int(args.num_train_epochs)):
250
+ bar = tqdm(train_dataloader,total=len(train_dataloader))
251
+ tr_num=0
252
+ train_loss=0
253
+ for step, batch in enumerate(bar):
254
+ inputs = batch[0].to(args.device)
255
+ labels=batch[1].to(args.device)
256
+ model.train()
257
+ loss,logits = model(inputs,labels)
258
+
259
+
260
+ if args.n_gpu > 1:
261
+ loss = loss.mean() # mean() to average on multi-gpu parallel training
262
+ if args.gradient_accumulation_steps > 1:
263
+ loss = loss / args.gradient_accumulation_steps
264
+
265
+ if args.fp16:
266
+ with amp.scale_loss(loss, optimizer) as scaled_loss:
267
+ scaled_loss.backward()
268
+ torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
269
+ else:
270
+ loss.backward()
271
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
272
+
273
+ tr_loss += loss.item()
274
+ tr_num+=1
275
+ train_loss+=loss.item()
276
+ if avg_loss==0:
277
+ avg_loss=tr_loss
278
+ avg_loss=round(train_loss/tr_num,5)
279
+ bar.set_description("epoch {} loss {}".format(idx,avg_loss))
280
+
281
+
282
+ if (step + 1) % args.gradient_accumulation_steps == 0:
283
+ optimizer.step()
284
+ optimizer.zero_grad()
285
+ scheduler.step()
286
+ global_step += 1
287
+ output_flag=True
288
+ avg_loss=round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4)
289
+ if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
290
+ logging_loss = tr_loss
291
+ tr_nb=global_step
292
+
293
+ if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
294
+
295
+ if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
296
+ results = evaluate(args, model, tokenizer,pool=pool,eval_when_training=True)
297
+ # Save model checkpoint
298
+
299
+ if results['eval_f1']>best_f1:
300
+ best_f1=results['eval_f1']
301
+ logger.info(" "+"*"*20)
302
+ logger.info(" Best f1:%s",round(best_f1,4))
303
+ logger.info(" "+"*"*20)
304
+
305
+ checkpoint_prefix = 'checkpoint-best-f1'
306
+ output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
307
+ if not os.path.exists(output_dir):
308
+ os.makedirs(output_dir)
309
+ model_to_save = model.module if hasattr(model,'module') else model
310
+ output_dir = os.path.join(output_dir, '{}'.format('model.bin'))
311
+ torch.save(model_to_save.state_dict(), output_dir)
312
+ logger.info("Saving model checkpoint to %s", output_dir)
313
+
314
+ # 每一轮记录checkpoint
315
+ output_dir = os.path.join(args.output_dir, 'epoch_{}'.format(idx+1))
316
+ if not os.path.exists(output_dir):
317
+ os.makedirs(output_dir)
318
+ model_to_save = model.module if hasattr(model, 'module') else model
319
+ ckpt_output_path = os.path.join(output_dir, 'subject_model.pth')
320
+ logger.info("Saving model checkpoint to %s", ckpt_output_path)
321
+ torch.save(model_to_save.state_dict(), ckpt_output_path)
322
+
323
+ if args.max_steps > 0 and global_step > args.max_steps:
324
+ train_iterator.close()
325
+ break
326
+ return global_step, tr_loss / global_step
327
+
328
+
329
+ def evaluate(args, model, tokenizer, prefix="",pool=None,eval_when_training=False):
330
+ # Loop to handle MNLI double evaluation (matched, mis-matched)
331
+ eval_output_dir = args.output_dir
332
+ eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True,pool=pool)
333
+ if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
334
+ os.makedirs(eval_output_dir)
335
+
336
+ args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
337
+ # Note that DistributedSampler samples randomly
338
+ eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
339
+ eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=4,pin_memory=True)
340
+
341
+ # multi-gpu evaluate
342
+ if args.n_gpu > 1 and eval_when_training is False:
343
+ model = torch.nn.DataParallel(model)
344
+
345
+ # Eval!
346
+ logger.info("***** Running evaluation {} *****".format(prefix))
347
+ logger.info(" Num examples = %d", len(eval_dataset))
348
+ logger.info(" Batch size = %d", args.eval_batch_size)
349
+ eval_loss = 0.0
350
+ nb_eval_steps = 0
351
+ model.eval()
352
+ logits=[]
353
+ y_trues=[]
354
+ for batch in eval_dataloader:
355
+ inputs = batch[0].to(args.device)
356
+ labels=batch[1].to(args.device)
357
+ with torch.no_grad():
358
+ lm_loss,logit = model(inputs,labels)
359
+ eval_loss += lm_loss.mean().item()
360
+ logits.append(logit.cpu().numpy())
361
+ y_trues.append(labels.cpu().numpy())
362
+ nb_eval_steps += 1
363
+ logits=np.concatenate(logits,0)
364
+ y_trues=np.concatenate(y_trues,0)
365
+ best_threshold=0
366
+ best_f1=0
367
+ for i in range(1,100):
368
+ threshold=i/100
369
+ y_preds=logits[:,1]>threshold
370
+ from sklearn.metrics import recall_score
371
+ recall=recall_score(y_trues, y_preds)
372
+ from sklearn.metrics import precision_score
373
+ precision=precision_score(y_trues, y_preds)
374
+ from sklearn.metrics import f1_score
375
+ f1=f1_score(y_trues, y_preds)
376
+ if f1>best_f1:
377
+ best_f1=f1
378
+ best_threshold=threshold
379
+
380
+ y_preds=logits[:,1]>best_threshold
381
+ from sklearn.metrics import recall_score
382
+ recall=recall_score(y_trues, y_preds)
383
+ from sklearn.metrics import precision_score
384
+ precision=precision_score(y_trues, y_preds)
385
+ from sklearn.metrics import f1_score
386
+ f1=f1_score(y_trues, y_preds)
387
+ result = {
388
+ "eval_recall": float(recall),
389
+ "eval_precision": float(precision),
390
+ "eval_f1": float(f1),
391
+ "eval_threshold":best_threshold,
392
+
393
+ }
394
+
395
+ logger.info("***** Eval results {} *****".format(prefix))
396
+ for key in sorted(result.keys()):
397
+ logger.info(" %s = %s", key, str(round(result[key],4)))
398
+
399
+ return result
400
+
401
+ def test(args, model, tokenizer, prefix="",pool=None,best_threshold=0):
402
+ # Loop to handle MNLI double evaluation (matched, mis-matched)
403
+ eval_dataset = load_and_cache_examples(args, tokenizer, test=True,pool=pool)
404
+
405
+ args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
406
+ # Note that DistributedSampler samples randomly
407
+ eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
408
+ eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=4,pin_memory=True)
409
+
410
+ # multi-gpu evaluate
411
+ if args.n_gpu > 1:
412
+ model = torch.nn.DataParallel(model)
413
+
414
+ # Eval!
415
+ logger.info("***** Running Test {} *****".format(prefix))
416
+ logger.info(" Num examples = %d", len(eval_dataset))
417
+ logger.info(" Batch size = %d", args.eval_batch_size)
418
+ eval_loss = 0.0
419
+ nb_eval_steps = 0
420
+ model.eval()
421
+ logits=[]
422
+ y_trues=[]
423
+ for batch in eval_dataloader:
424
+ inputs = batch[0].to(args.device)
425
+ labels=batch[1].to(args.device)
426
+ with torch.no_grad():
427
+ lm_loss,logit = model(inputs,labels)
428
+ eval_loss += lm_loss.mean().item()
429
+ logits.append(logit.cpu().numpy())
430
+ y_trues.append(labels.cpu().numpy())
431
+ nb_eval_steps += 1
432
+ logits=np.concatenate(logits,0)
433
+ y_preds=logits[:,1]>best_threshold
434
+ with open(os.path.join(args.output_dir,"predictions.txt"),'w') as f:
435
+ for example,pred in zip(eval_dataset.examples,y_preds):
436
+ if pred:
437
+ f.write(example.url1+'\t'+example.url2+'\t'+'1'+'\n')
438
+ else:
439
+ f.write(example.url1+'\t'+example.url2+'\t'+'0'+'\n')
440
+
441
+ def main():
442
+ parser = argparse.ArgumentParser()
443
+
444
+ ## Required parameters
445
+ parser.add_argument("--train_data_file", default=None, type=str, required=True,
446
+ help="The input training data file (a text file).")
447
+ parser.add_argument("--output_dir", default=None, type=str, required=True,
448
+ help="The output directory where the model predictions and checkpoints will be written.")
449
+
450
+ ## Other parameters
451
+ parser.add_argument("--eval_data_file", default=None, type=str,
452
+ help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
453
+ parser.add_argument("--test_data_file", default=None, type=str,
454
+ help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
455
+
456
+ parser.add_argument("--model_type", default="bert", type=str,
457
+ help="The model architecture to be fine-tuned.")
458
+ parser.add_argument("--model_name_or_path", default=None, type=str,
459
+ help="The model checkpoint for weights initialization.")
460
+
461
+ parser.add_argument("--mlm", action='store_true',
462
+ help="Train with masked-language modeling loss instead of language modeling.")
463
+ parser.add_argument("--mlm_probability", type=float, default=0.15,
464
+ help="Ratio of tokens to mask for masked language modeling loss")
465
+
466
+ parser.add_argument("--config_name", default="", type=str,
467
+ help="Optional pretrained config name or path if not the same as model_name_or_path")
468
+ parser.add_argument("--tokenizer_name", default="", type=str,
469
+ help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
470
+ parser.add_argument("--cache_dir", default="", type=str,
471
+ help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)")
472
+ parser.add_argument("--block_size", default=-1, type=int,
473
+ help="Optional input sequence length after tokenization."
474
+ "The training dataset will be truncated in block of this size for training."
475
+ "Default to the model max input length for single sentence inputs (take into account special tokens).")
476
+ parser.add_argument("--do_train", action='store_true',
477
+ help="Whether to run training.")
478
+ parser.add_argument("--do_eval", action='store_true',
479
+ help="Whether to run eval on the dev set.")
480
+ parser.add_argument("--do_test", action='store_true',
481
+ help="Whether to run eval on the dev set.")
482
+ parser.add_argument("--evaluate_during_training", action='store_true',
483
+ help="Run evaluation during training at each logging step.")
484
+ parser.add_argument("--do_lower_case", action='store_true',
485
+ help="Set this flag if you are using an uncased model.")
486
+
487
+ parser.add_argument("--train_batch_size", default=4, type=int,
488
+ help="Batch size per GPU/CPU for training.")
489
+ parser.add_argument("--eval_batch_size", default=4, type=int,
490
+ help="Batch size per GPU/CPU for evaluation.")
491
+ parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
492
+ help="Number of updates steps to accumulate before performing a backward/update pass.")
493
+ parser.add_argument("--learning_rate", default=5e-5, type=float,
494
+ help="The initial learning rate for Adam.")
495
+ parser.add_argument("--weight_decay", default=0.0, type=float,
496
+ help="Weight deay if we apply some.")
497
+ parser.add_argument("--adam_epsilon", default=1e-8, type=float,
498
+ help="Epsilon for Adam optimizer.")
499
+ parser.add_argument("--max_grad_norm", default=1.0, type=float,
500
+ help="Max gradient norm.")
501
+ parser.add_argument("--num_train_epochs", default=1.0, type=float,
502
+ help="Total number of training epochs to perform.")
503
+ parser.add_argument("--max_steps", default=-1, type=int,
504
+ help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
505
+ parser.add_argument("--warmup_steps", default=0, type=int,
506
+ help="Linear warmup over warmup_steps.")
507
+
508
+ parser.add_argument('--logging_steps', type=int, default=50,
509
+ help="Log every X updates steps.")
510
+ parser.add_argument('--save_steps', type=int, default=50,
511
+ help="Save checkpoint every X updates steps.")
512
+ parser.add_argument('--save_total_limit', type=int, default=None,
513
+ help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default')
514
+ parser.add_argument("--eval_all_checkpoints", action='store_true',
515
+ help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number")
516
+ parser.add_argument("--no_cuda", action='store_true',
517
+ help="Avoid using CUDA when available")
518
+ parser.add_argument('--overwrite_output_dir', action='store_true',
519
+ help="Overwrite the content of the output directory")
520
+ parser.add_argument('--overwrite_cache', action='store_true',
521
+ help="Overwrite the cached training and evaluation sets")
522
+ parser.add_argument('--seed', type=int, default=42,
523
+ help="random seed for initialization")
524
+ parser.add_argument('--epoch', type=int, default=42,
525
+ help="random seed for initialization")
526
+ parser.add_argument('--fp16', action='store_true',
527
+ help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
528
+ parser.add_argument('--fp16_opt_level', type=str, default='O1',
529
+ help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
530
+ "See details at https://nvidia.github.io/apex/amp.html")
531
+ parser.add_argument("--local_rank", type=int, default=-1,
532
+ help="For distributed training: local_rank")
533
+ parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
534
+ parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
535
+
536
+
537
+ pool = multiprocessing.Pool(cpu_cont)
538
+ args = parser.parse_args()
539
+
540
+ # Setup distant debugging if needed
541
+ if args.server_ip and args.server_port:
542
+ # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
543
+ import ptvsd
544
+ print("Waiting for debugger attach")
545
+ ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
546
+ ptvsd.wait_for_attach()
547
+
548
+ # Setup CUDA, GPU & distributed training
549
+ if args.local_rank == -1 or args.no_cuda:
550
+ device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
551
+ args.n_gpu = torch.cuda.device_count()
552
+ else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
553
+ torch.cuda.set_device(args.local_rank)
554
+ device = torch.device("cuda", args.local_rank)
555
+ torch.distributed.init_process_group(backend='nccl')
556
+ args.n_gpu = 1
557
+ args.device = device
558
+ args.per_gpu_train_batch_size=args.train_batch_size//args.n_gpu
559
+ args.per_gpu_eval_batch_size=args.eval_batch_size//args.n_gpu
560
+ # Setup logging
561
+ logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
562
+ datefmt='%m/%d/%Y %H:%M:%S',
563
+ level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
564
+ logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
565
+ args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
566
+
567
+ # Set seed
568
+ set_seed(args.seed)
569
+
570
+ # Load pretrained model and tokenizer
571
+ if args.local_rank not in [-1, 0]:
572
+ torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab
573
+
574
+ args.start_epoch = 0
575
+ args.start_step = 0
576
+ checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
577
+ if os.path.exists(checkpoint_last) and os.listdir(checkpoint_last):
578
+ args.model_name_or_path = os.path.join(checkpoint_last, 'pytorch_model.bin')
579
+ args.config_name = os.path.join(checkpoint_last, 'config.json')
580
+ idx_file = os.path.join(checkpoint_last, 'idx_file.txt')
581
+ with open(idx_file, encoding='utf-8') as idxf:
582
+ args.start_epoch = int(idxf.readlines()[0].strip()) + 1
583
+
584
+ step_file = os.path.join(checkpoint_last, 'step_file.txt')
585
+ if os.path.exists(step_file):
586
+ with open(step_file, encoding='utf-8') as stepf:
587
+ args.start_step = int(stepf.readlines()[0].strip())
588
+
589
+ logger.info("reload model from {}, resume from {} epoch".format(checkpoint_last, args.start_epoch))
590
+
591
+ config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
592
+ config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
593
+ cache_dir=args.cache_dir if args.cache_dir else None)
594
+ config.num_labels=2
595
+ tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name,
596
+ do_lower_case=args.do_lower_case,
597
+ cache_dir=args.cache_dir if args.cache_dir else None)
598
+ if args.block_size <= 0:
599
+ args.block_size = tokenizer.max_len_single_sentence # Our input block size will be the max possible for the model
600
+ args.block_size = min(args.block_size, tokenizer.max_len_single_sentence)
601
+ if args.model_name_or_path:
602
+ model = model_class.from_pretrained(args.model_name_or_path,
603
+ from_tf=bool('.ckpt' in args.model_name_or_path),
604
+ config=config,
605
+ cache_dir=args.cache_dir if args.cache_dir else None)
606
+ else:
607
+ model = model_class(config)
608
+
609
+ model=Model(model,config,tokenizer,args)
610
+ if args.local_rank == 0:
611
+ torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
612
+
613
+ logger.info("Training/evaluation parameters %s", args)
614
+
615
+ # Training
616
+ if args.do_train:
617
+ if args.local_rank not in [-1, 0]:
618
+ torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache
619
+
620
+ train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False,pool=pool)
621
+
622
+ if args.local_rank == 0:
623
+ torch.distributed.barrier()
624
+
625
+ global_step, tr_loss = train(args, train_dataset, model, tokenizer,pool)
626
+
627
+
628
+ # Evaluation
629
+ results = {}
630
+ if args.do_eval and args.local_rank in [-1, 0]:
631
+ checkpoint_prefix = 'epoch_2/subject_model.pth'
632
+ output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
633
+ model.load_state_dict(torch.load(output_dir))
634
+ model.to(args.device)
635
+ result=evaluate(args, model, tokenizer,pool=pool)
636
+
637
+ if args.do_test and args.local_rank in [-1, 0]:
638
+ checkpoint_prefix = 'epoch_2/subject_model.pth'
639
+ output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
640
+ model.load_state_dict(torch.load(output_dir))
641
+ model.to(args.device)
642
+ test(args, model, tokenizer,pool=pool,best_threshold=0.5)
643
+
644
+ return results
645
+
646
+
647
+ if __name__ == "__main__":
648
+ main()
649
+
Code-Code/Clone-detection-BigCloneBench/code/train.log ADDED
The diff for this file is too large to render. See raw diff
 
Code-Code/Clone-detection-BigCloneBench/code/train.sh ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CUDA_VISIBLE_DEVICES=0,1 python run.py \
2
+ --output_dir=../model \
3
+ --model_type=roberta \
4
+ --config_name=microsoft/codebert-base \
5
+ --model_name_or_path=microsoft/codebert-base \
6
+ --tokenizer_name=roberta-base \
7
+ --do_train \
8
+ --train_data_file=../dataset/train.txt \
9
+ --eval_data_file=../dataset/valid.txt \
10
+ --test_data_file=../dataset/test.txt \
11
+ --epoch 2 \
12
+ --block_size 400 \
13
+ --train_batch_size 16 \
14
+ --eval_batch_size 32 \
15
+ --learning_rate 5e-5 \
16
+ --max_grad_norm 1.0 \
17
+ --evaluate_during_training \
18
+ --seed 123456
Code-Code/Clone-detection-BigCloneBench/dataset.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:922ad328fff2df059476a791c55ce23f2444af7d4ec72da93bc33ed81d456572
3
+ size 13203888
Code-Code/Clone-detection-POJ-104/code/eval.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CUDA_VISIBLE_DEVICES=0,1 python run.py \
2
+ --output_dir=../model \
3
+ --model_type=roberta \
4
+ --config_name=microsoft/codebert-base \
5
+ --model_name_or_path=microsoft/codebert-base \
6
+ --tokenizer_name=roberta-base \
7
+ --do_eval \
8
+ --train_data_file=../dataset/train.jsonl \
9
+ --eval_data_file=../dataset/valid.jsonl \
10
+ --epoch 2 \
11
+ --block_size 400 \
12
+ --train_batch_size 8 \
13
+ --eval_batch_size 16 \
14
+ --learning_rate 2e-5 \
15
+ --max_grad_norm 1.0 \
16
+ --evaluate_during_training \
17
+ --seed 123456
Code-Code/Clone-detection-POJ-104/code/evaluate.sh ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ python extract_answers.py \
2
+ -c ../dataset/valid.jsonl \
3
+ -o ../model/answers.jsonl
4
+ python evaluator.py \
5
+ -a ../model/answers.jsonl \
6
+ -p ../model/predictions.jsonl
Code-Code/Clone-detection-POJ-104/code/evaluator.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # Licensed under the MIT license.
3
+ import logging
4
+ import sys,json
5
+ import numpy as np
6
+ from tqdm import tqdm
7
+
8
+ def read_answers(filename):
9
+ answers={}
10
+ with open(filename) as f:
11
+ for line in f:
12
+ line=line.strip()
13
+ js=json.loads(line)
14
+ answers[js['index']]=js['answers']
15
+ return answers
16
+
17
+ def read_predictions(filename):
18
+ predictions={}
19
+ with open(filename) as f:
20
+ for line in f:
21
+ line=line.strip()
22
+ js=json.loads(line)
23
+ predictions[js['index']]=js['answers']
24
+ return predictions
25
+
26
+ def calculate_scores(answers,predictions):
27
+ scores=[]
28
+ for key in answers:
29
+ if key not in predictions:
30
+ logging.error("Missing prediction for index {}.".format(key))
31
+ sys.exit()
32
+
33
+ if len(answers[key])!=len(predictions[key]):
34
+ logging.error("Mismatch the number of answers for index {}.".format(key))
35
+ sys.exit()
36
+
37
+ answer = set(answers[key])
38
+
39
+ Avep = []
40
+ for k, p in enumerate(predictions[key]):
41
+ if p in answer:
42
+ Avep.append((len(Avep)+1)/(k+1))
43
+
44
+ scores.append(sum(Avep)/len(answer))
45
+
46
+ result={}
47
+ result['MAP@R']= round(np.mean(scores),4)
48
+ return result
49
+
50
+ def main():
51
+ import argparse
52
+ parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for POJ-104 dataset.')
53
+ parser.add_argument('--answers', '-a',help="filename of the labels, in txt format.")
54
+ parser.add_argument('--predictions', '-p',help="filename of the leaderboard predictions, in txt format.")
55
+
56
+
57
+ args = parser.parse_args()
58
+ answers=read_answers(args.answers)
59
+ predictions=read_predictions(args.predictions)
60
+ scores=calculate_scores(answers,predictions)
61
+ print(scores)
62
+
63
+ if __name__ == '__main__':
64
+ main()
Code-Code/Clone-detection-POJ-104/code/extract_answers.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # Licensed under the MIT license.
3
+ import json
4
+
5
+ def extract_answers(filename):
6
+ cluster={}
7
+ with open(filename) as f:
8
+ for line in f:
9
+ line=line.strip()
10
+ js=json.loads(line)
11
+ if js['label'] not in cluster:
12
+ cluster[js['label']]=set()
13
+ cluster[js['label']].add(js['index'])
14
+ answers=[]
15
+ for key in cluster:
16
+ for idx1 in cluster[key]:
17
+ temp={}
18
+ temp['index']=idx1
19
+ temp['answers']=[]
20
+ for idx2 in cluster[key]:
21
+ if idx1!=idx2:
22
+ temp['answers'].append(idx2)
23
+ answers.append(temp)
24
+ return answers
25
+
26
+
27
+ def main():
28
+ import argparse
29
+ parser = argparse.ArgumentParser(description='Extract answers from code files.')
30
+ parser.add_argument('--codefile', '-c',help="filename of the code examples.")
31
+ parser.add_argument('--outfile', '-o',help="filename of output.")
32
+ args = parser.parse_args()
33
+ answers=extract_answers(args.codefile)
34
+ with open(args.outfile,'w') as f:
35
+ for line in answers:
36
+ f.write(json.dumps(line)+'\n')
37
+
38
+ if __name__ == '__main__':
39
+ main()
Code-Code/Clone-detection-POJ-104/code/model.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # Licensed under the MIT License.
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch
6
+ from torch.autograd import Variable
7
+ import copy
8
+ import torch.nn.functional as F
9
+ from torch.nn import CrossEntropyLoss, MSELoss
10
+
11
+
12
+
13
+ class Model(nn.Module):
14
+ def __init__(self, encoder,config,tokenizer,args):
15
+ super(Model, self).__init__()
16
+ self.encoder = encoder
17
+ self.config=config
18
+ self.tokenizer=tokenizer
19
+ self.args=args
20
+
21
+
22
+ def forward(self, input_ids=None,p_input_ids=None,n_input_ids=None,labels=None):
23
+ bs,_=input_ids.size()
24
+ input_ids=torch.cat((input_ids,p_input_ids,n_input_ids),0)
25
+
26
+ outputs=self.encoder(input_ids,attention_mask=input_ids.ne(1))
27
+ if len(outputs) > 1:
28
+ outputs = outputs[1]
29
+ else:
30
+ outputs = outputs[0][:, 0, :]
31
+ outputs=outputs.split(bs,0)
32
+
33
+ prob_1=(outputs[0]*outputs[1]).sum(-1)
34
+ prob_2=(outputs[0]*outputs[2]).sum(-1)
35
+ temp=torch.cat((outputs[0],outputs[1]),0)
36
+ temp_labels=torch.cat((labels,labels),0)
37
+ prob_3= torch.mm(outputs[0],temp.t())
38
+ mask=labels[:,None]==temp_labels[None,:]
39
+ prob_3=prob_3*(1-mask.float())-1e9*mask.float()
40
+
41
+ prob=torch.softmax(torch.cat((prob_1[:,None],prob_2[:,None],prob_3),-1),-1)
42
+ loss=torch.log(prob[:,0]+1e-10)
43
+ loss=-loss.mean()
44
+ return loss,outputs[0]
45
+
46
+
47
+
48
+
Code-Code/Clone-detection-POJ-104/code/run.py ADDED
@@ -0,0 +1,632 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
18
+ GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
19
+ using a masked language modeling (MLM) loss.
20
+ """
21
+
22
+ from __future__ import absolute_import, division, print_function
23
+
24
+ import argparse
25
+ import glob
26
+ import logging
27
+ import os
28
+ import pickle
29
+ import random
30
+ import re
31
+ import shutil
32
+
33
+ import numpy as np
34
+ import torch
35
+ from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
36
+ from torch.utils.data.distributed import DistributedSampler
37
+ import json
38
+ try:
39
+ from torch.utils.tensorboard import SummaryWriter
40
+ except:
41
+ from tensorboardX import SummaryWriter
42
+
43
+ from tqdm import tqdm, trange
44
+ import multiprocessing
45
+ from model import Model
46
+ cpu_cont = multiprocessing.cpu_count()
47
+ from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
48
+ BertConfig, BertModel, BertTokenizer,
49
+ GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
50
+ OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
51
+ RobertaConfig, RobertaModel, RobertaTokenizer,
52
+ DistilBertConfig, DistilBertModel, DistilBertTokenizer)
53
+
54
+ logger = logging.getLogger(__name__)
55
+
56
+ MODEL_CLASSES = {
57
+ 'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
58
+ 'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
59
+ 'bert': (BertConfig, BertModel, BertTokenizer),
60
+ 'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer),
61
+ 'distilbert': (DistilBertConfig, DistilBertModel, DistilBertTokenizer)
62
+ }
63
+
64
+
65
+ class InputFeatures(object):
66
+ """A single training/test features for a example."""
67
+ def __init__(self,
68
+ input_tokens,
69
+ input_ids,
70
+ index,
71
+ label,
72
+
73
+ ):
74
+ self.input_tokens = input_tokens
75
+ self.input_ids = input_ids
76
+ self.index=index
77
+ self.label=label
78
+
79
+
80
+ def convert_examples_to_features(js,tokenizer,args):
81
+ #source
82
+ code=' '.join(js['code'].split())
83
+ code_tokens=tokenizer.tokenize(code)[:args.block_size-2]
84
+ source_tokens =[tokenizer.cls_token]+code_tokens+[tokenizer.sep_token]
85
+ source_ids = tokenizer.convert_tokens_to_ids(source_tokens)
86
+ padding_length = args.block_size - len(source_ids)
87
+ source_ids+=[tokenizer.pad_token_id]*padding_length
88
+ return InputFeatures(source_tokens,source_ids,js['index'],int(js['label']))
89
+
90
+ class TextDataset(Dataset):
91
+ def __init__(self, tokenizer, args, file_path=None):
92
+ self.examples = []
93
+ data=[]
94
+ with open(file_path) as f:
95
+ for line in f:
96
+ line=line.strip()
97
+ js=json.loads(line)
98
+ data.append(js)
99
+ for js in data:
100
+ self.examples.append(convert_examples_to_features(js,tokenizer,args))
101
+ if 'train' in file_path:
102
+ for idx, example in enumerate(self.examples[:3]):
103
+ logger.info("*** Example ***")
104
+ logger.info("idx: {}".format(idx))
105
+ logger.info("label: {}".format(example.label))
106
+ logger.info("input_tokens: {}".format([x.replace('\u0120','_') for x in example.input_tokens]))
107
+ logger.info("input_ids: {}".format(' '.join(map(str, example.input_ids))))
108
+ self.label_examples={}
109
+ for e in self.examples:
110
+ if e.label not in self.label_examples:
111
+ self.label_examples[e.label]=[]
112
+ self.label_examples[e.label].append(e)
113
+
114
+ def __len__(self):
115
+ return len(self.examples)
116
+
117
+ def __getitem__(self, i):
118
+ label=self.examples[i].label
119
+ index=self.examples[i].index
120
+ labels=list(self.label_examples)
121
+ labels.remove(label)
122
+ while True:
123
+ shuffle_example=random.sample(self.label_examples[label],1)[0]
124
+ if shuffle_example.index!=index:
125
+ p_example=shuffle_example
126
+ break
127
+ n_example=random.sample(self.label_examples[random.sample(labels,1)[0]],1)[0]
128
+
129
+ return (torch.tensor(self.examples[i].input_ids),torch.tensor(p_example.input_ids),
130
+ torch.tensor(n_example.input_ids),torch.tensor(label))
131
+
132
+
133
+ def set_seed(seed=42):
134
+ random.seed(seed)
135
+ os.environ['PYHTONHASHSEED'] = str(seed)
136
+ np.random.seed(seed)
137
+ torch.manual_seed(seed)
138
+ torch.cuda.manual_seed(seed)
139
+ torch.backends.cudnn.deterministic = True
140
+
141
+
142
+ def train(args, train_dataset, model, tokenizer):
143
+ """ Train the model """
144
+
145
+ args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
146
+ train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
147
+
148
+ train_dataloader = DataLoader(train_dataset, sampler=train_sampler,
149
+ batch_size=args.train_batch_size,num_workers=4,pin_memory=True)
150
+ args.max_steps=args.epoch*len( train_dataloader)
151
+ args.save_steps=len( train_dataloader)
152
+ args.warmup_steps=len( train_dataloader)
153
+ args.logging_steps=len( train_dataloader)
154
+ args.num_train_epochs=args.epoch
155
+ model.to(args.device)
156
+ # Prepare optimizer and schedule (linear warmup and decay)
157
+ no_decay = ['bias', 'LayerNorm.weight']
158
+ optimizer_grouped_parameters = [
159
+ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
160
+ 'weight_decay': args.weight_decay},
161
+ {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
162
+ ]
163
+ optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
164
+ scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.max_steps*0.1,
165
+ num_training_steps=args.max_steps)
166
+ if args.fp16:
167
+ try:
168
+ from apex import amp
169
+ except ImportError:
170
+ raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
171
+ model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
172
+
173
+ # multi-gpu training (should be after apex fp16 initialization)
174
+ if args.n_gpu > 1:
175
+ model = torch.nn.DataParallel(model)
176
+
177
+ # Distributed training (should be after apex fp16 initialization)
178
+ if args.local_rank != -1:
179
+ model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
180
+ output_device=args.local_rank,
181
+ find_unused_parameters=True)
182
+
183
+ checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
184
+ scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt')
185
+ optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt')
186
+ if os.path.exists(scheduler_last):
187
+ scheduler.load_state_dict(torch.load(scheduler_last))
188
+ if os.path.exists(optimizer_last):
189
+ optimizer.load_state_dict(torch.load(optimizer_last))
190
+ # Train!
191
+ logger.info("***** Running training *****")
192
+ logger.info(" Num examples = %d", len(train_dataset))
193
+ logger.info(" Num Epochs = %d", args.num_train_epochs)
194
+ logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
195
+ logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
196
+ args.train_batch_size * args.gradient_accumulation_steps * (
197
+ torch.distributed.get_world_size() if args.local_rank != -1 else 1))
198
+ logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
199
+ logger.info(" Total optimization steps = %d", args.max_steps)
200
+
201
+ global_step = args.start_step
202
+ tr_loss, logging_loss,avg_loss,tr_nb,tr_num,train_loss = 0.0, 0.0,0.0,0,0,0
203
+ best_acc=0.0
204
+ # model.resize_token_embeddings(len(tokenizer))
205
+ model.zero_grad()
206
+ for idx in range(args.start_epoch, int(args.num_train_epochs)):
207
+ bar = train_dataloader
208
+ tr_num=0
209
+ train_loss=0
210
+ for step, batch in enumerate(bar):
211
+ inputs = batch[0].to(args.device)
212
+ p_inputs = batch[1].to(args.device)
213
+ n_inputs = batch[2].to(args.device)
214
+ labels = batch[3].to(args.device)
215
+ model.train()
216
+ loss,vec = model(inputs,p_inputs,n_inputs,labels)
217
+
218
+
219
+ if args.n_gpu > 1:
220
+ loss = loss.mean() # mean() to average on multi-gpu parallel training
221
+ if args.gradient_accumulation_steps > 1:
222
+ loss = loss / args.gradient_accumulation_steps
223
+
224
+ if args.fp16:
225
+ with amp.scale_loss(loss, optimizer) as scaled_loss:
226
+ scaled_loss.backward()
227
+ torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
228
+ else:
229
+ loss.backward()
230
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
231
+
232
+ tr_loss += loss.item()
233
+ tr_num+=1
234
+ train_loss+=loss.item()
235
+ if avg_loss==0:
236
+ avg_loss=tr_loss
237
+ avg_loss=round(train_loss/tr_num,5)
238
+ if (step+1)% 100==0:
239
+ logger.info("epoch {} step {} loss {}".format(idx,step+1,avg_loss))
240
+ #bar.set_description("epoch {} loss {}".format(idx,avg_loss))
241
+
242
+
243
+ if (step + 1) % args.gradient_accumulation_steps == 0:
244
+ optimizer.step()
245
+ optimizer.zero_grad()
246
+ scheduler.step()
247
+ global_step += 1
248
+ output_flag=True
249
+ avg_loss=round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4)
250
+ if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
251
+ logging_loss = tr_loss
252
+ tr_nb=global_step
253
+
254
+ if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
255
+
256
+ if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
257
+ results = evaluate(args, model, tokenizer,eval_when_training=True)
258
+ for key, value in results.items():
259
+ logger.info(" %s = %s", key, round(value,4))
260
+ # Save model checkpoint
261
+ tr_num=0
262
+ train_loss=0
263
+
264
+ if results['eval_map']>best_acc:
265
+ best_acc=results['eval_map']
266
+ logger.info(" "+"*"*20)
267
+ logger.info(" Best map:%s",round(best_acc,4))
268
+ logger.info(" "+"*"*20)
269
+
270
+ checkpoint_prefix = 'checkpoint-best-map'
271
+ output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
272
+ if not os.path.exists(output_dir):
273
+ os.makedirs(output_dir)
274
+ model_to_save = model.module if hasattr(model,'module') else model
275
+ output_dir = os.path.join(output_dir, '{}'.format('model.bin'))
276
+ torch.save(model_to_save.state_dict(), output_dir)
277
+ logger.info("Saving model checkpoint to %s", output_dir)
278
+
279
+ # 每一轮记录checkpoint
280
+ output_dir = os.path.join(args.output_dir, 'epoch_{}'.format(idx))
281
+ if not os.path.exists(output_dir):
282
+ os.makedirs(output_dir)
283
+ model_to_save = model.module if hasattr(model, 'module') else model
284
+ ckpt_output_path = os.path.join(output_dir, 'subject_model.pth')
285
+ logger.info("Saving model checkpoint to %s", ckpt_output_path)
286
+ torch.save(model_to_save.state_dict(), ckpt_output_path)
287
+
288
+
289
+ eval_dataset=None
290
+ def evaluate(args, model, tokenizer,eval_when_training=False):
291
+ # Loop to handle MNLI double evaluation (matched, mis-matched)
292
+ eval_output_dir = args.output_dir
293
+ global eval_dataset
294
+ if eval_dataset is None:
295
+ eval_dataset = TextDataset(tokenizer, args,args.eval_data_file)
296
+
297
+ if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
298
+ os.makedirs(eval_output_dir)
299
+
300
+ args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
301
+ # Note that DistributedSampler samples randomly
302
+ eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
303
+ eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=4,pin_memory=True)
304
+
305
+ # multi-gpu evaluate
306
+ if args.n_gpu > 1 and eval_when_training is False:
307
+ model = torch.nn.DataParallel(model)
308
+
309
+ # Eval!
310
+ logger.info("***** Running evaluation *****")
311
+ logger.info(" Num examples = %d", len(eval_dataset))
312
+ logger.info(" Batch size = %d", args.eval_batch_size)
313
+ eval_loss = 0.0
314
+ nb_eval_steps = 0
315
+ model.eval()
316
+ vecs=[]
317
+ labels=[]
318
+ for batch in eval_dataloader:
319
+ inputs = batch[0].to(args.device)
320
+ p_inputs = batch[1].to(args.device)
321
+ n_inputs = batch[2].to(args.device)
322
+ label = batch[3].to(args.device)
323
+ with torch.no_grad():
324
+ lm_loss,vec = model(inputs,p_inputs,n_inputs,label)
325
+ eval_loss += lm_loss.mean().item()
326
+ vecs.append(vec.cpu().numpy())
327
+ labels.append(label.cpu().numpy())
328
+ nb_eval_steps += 1
329
+ vecs=np.concatenate(vecs,0)
330
+ labels=np.concatenate(labels,0)
331
+ eval_loss = eval_loss / nb_eval_steps
332
+ perplexity = torch.tensor(eval_loss)
333
+
334
+ scores=np.matmul(vecs,vecs.T)
335
+ dic={}
336
+ for i in range(scores.shape[0]):
337
+ scores[i,i]=-1000000
338
+ if int(labels[i]) not in dic:
339
+ dic[int(labels[i])]=-1
340
+ dic[int(labels[i])]+=1
341
+ sort_ids=np.argsort(scores, axis=-1, kind='quicksort', order=None)[:,::-1]
342
+ MAP=[]
343
+ for i in range(scores.shape[0]):
344
+ cont=0
345
+ label=int(labels[i])
346
+ Avep = []
347
+ for j in range(dic[label]):
348
+ index=sort_ids[i,j]
349
+ if int(labels[index])==label:
350
+ Avep.append((len(Avep)+1)/(j+1))
351
+ MAP.append(sum(Avep)/dic[label])
352
+
353
+ result = {
354
+ "eval_loss": float(perplexity),
355
+ "eval_map":float(np.mean(MAP))
356
+ }
357
+
358
+
359
+ return result
360
+
361
+ def test(args, model, tokenizer):
362
+ # Loop to handle MNLI double evaluation (matched, mis-matched)
363
+ eval_dataset = TextDataset(tokenizer, args,args.test_data_file)
364
+
365
+
366
+ args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
367
+ # Note that DistributedSampler samples randomly
368
+ eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
369
+ eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
370
+
371
+ # multi-gpu evaluate
372
+ if args.n_gpu > 1:
373
+ model = torch.nn.DataParallel(model)
374
+
375
+ # Eval!
376
+ logger.info("***** Running Test *****")
377
+ logger.info(" Num examples = %d", len(eval_dataset))
378
+ logger.info(" Batch size = %d", args.eval_batch_size)
379
+ eval_loss = 0.0
380
+ nb_eval_steps = 0
381
+ model.eval()
382
+ vecs=[]
383
+ labels=[]
384
+ for batch in eval_dataloader:
385
+ inputs = batch[0].to(args.device)
386
+ p_inputs = batch[1].to(args.device)
387
+ n_inputs = batch[2].to(args.device)
388
+ label = batch[3].to(args.device)
389
+ with torch.no_grad():
390
+ lm_loss,vec = model(inputs,p_inputs,n_inputs,label)
391
+ eval_loss += lm_loss.mean().item()
392
+ vecs.append(vec.cpu().numpy())
393
+ labels.append(label.cpu().numpy())
394
+ nb_eval_steps += 1
395
+ vecs=np.concatenate(vecs,0)
396
+ labels=np.concatenate(labels,0)
397
+ eval_loss = eval_loss / nb_eval_steps
398
+ perplexity = torch.tensor(eval_loss)
399
+
400
+ scores=np.matmul(vecs,vecs.T)
401
+ for i in range(scores.shape[0]):
402
+ scores[i,i]=-1000000
403
+ sort_ids=np.argsort(scores, axis=-1, kind='quicksort', order=None)[:,::-1]
404
+ indexs=[]
405
+ for example in eval_dataset.examples:
406
+ indexs.append(example.index)
407
+ with open(os.path.join(args.output_dir,"predictions.jsonl"),'w') as f:
408
+ for index,sort_id in zip(indexs,sort_ids):
409
+ js={}
410
+ js['index']=index
411
+ js['answers']=[]
412
+ for idx in sort_id[:499]:
413
+ js['answers'].append(indexs[int(idx)])
414
+ f.write(json.dumps(js)+'\n')
415
+
416
+
417
+
418
+ def main():
419
+ parser = argparse.ArgumentParser()
420
+
421
+ ## Required parameters
422
+ parser.add_argument("--train_data_file", default=None, type=str, required=True,
423
+ help="The input training data file (a text file).")
424
+ parser.add_argument("--output_dir", default=None, type=str, required=True,
425
+ help="The output directory where the model predictions and checkpoints will be written.")
426
+
427
+ ## Other parameters
428
+ parser.add_argument("--eval_data_file", default=None, type=str,
429
+ help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
430
+ parser.add_argument("--test_data_file", default=None, type=str,
431
+ help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
432
+
433
+ parser.add_argument("--model_type", default="bert", type=str,
434
+ help="The model architecture to be fine-tuned.")
435
+ parser.add_argument("--model_name_or_path", default=None, type=str,
436
+ help="The model checkpoint for weights initialization.")
437
+
438
+ parser.add_argument("--mlm", action='store_true',
439
+ help="Train with masked-language modeling loss instead of language modeling.")
440
+ parser.add_argument("--mlm_probability", type=float, default=0.15,
441
+ help="Ratio of tokens to mask for masked language modeling loss")
442
+
443
+ parser.add_argument("--config_name", default="", type=str,
444
+ help="Optional pretrained config name or path if not the same as model_name_or_path")
445
+ parser.add_argument("--tokenizer_name", default="", type=str,
446
+ help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
447
+ parser.add_argument("--cache_dir", default="", type=str,
448
+ help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)")
449
+ parser.add_argument("--block_size", default=-1, type=int,
450
+ help="Optional input sequence length after tokenization."
451
+ "The training dataset will be truncated in block of this size for training."
452
+ "Default to the model max input length for single sentence inputs (take into account special tokens).")
453
+ parser.add_argument("--do_train", action='store_true',
454
+ help="Whether to run training.")
455
+ parser.add_argument("--do_eval", action='store_true',
456
+ help="Whether to run eval on the dev set.")
457
+ parser.add_argument("--do_test", action='store_true',
458
+ help="Whether to run eval on the dev set.")
459
+ parser.add_argument("--evaluate_during_training", action='store_true',
460
+ help="Run evaluation during training at each logging step.")
461
+ parser.add_argument("--do_lower_case", action='store_true',
462
+ help="Set this flag if you are using an uncased model.")
463
+
464
+ parser.add_argument("--train_batch_size", default=4, type=int,
465
+ help="Batch size per GPU/CPU for training.")
466
+ parser.add_argument("--eval_batch_size", default=4, type=int,
467
+ help="Batch size per GPU/CPU for evaluation.")
468
+ parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
469
+ help="Number of updates steps to accumulate before performing a backward/update pass.")
470
+ parser.add_argument("--learning_rate", default=5e-5, type=float,
471
+ help="The initial learning rate for Adam.")
472
+ parser.add_argument("--weight_decay", default=0.0, type=float,
473
+ help="Weight deay if we apply some.")
474
+ parser.add_argument("--adam_epsilon", default=1e-8, type=float,
475
+ help="Epsilon for Adam optimizer.")
476
+ parser.add_argument("--max_grad_norm", default=1.0, type=float,
477
+ help="Max gradient norm.")
478
+ parser.add_argument("--num_train_epochs", default=1.0, type=float,
479
+ help="Total number of training epochs to perform.")
480
+ parser.add_argument("--max_steps", default=-1, type=int,
481
+ help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
482
+ parser.add_argument("--warmup_steps", default=0, type=int,
483
+ help="Linear warmup over warmup_steps.")
484
+
485
+ parser.add_argument('--logging_steps', type=int, default=50,
486
+ help="Log every X updates steps.")
487
+ parser.add_argument('--save_steps', type=int, default=50,
488
+ help="Save checkpoint every X updates steps.")
489
+ parser.add_argument('--save_total_limit', type=int, default=None,
490
+ help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default')
491
+ parser.add_argument("--eval_all_checkpoints", action='store_true',
492
+ help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number")
493
+ parser.add_argument("--no_cuda", action='store_true',
494
+ help="Avoid using CUDA when available")
495
+ parser.add_argument('--overwrite_output_dir', action='store_true',
496
+ help="Overwrite the content of the output directory")
497
+ parser.add_argument('--overwrite_cache', action='store_true',
498
+ help="Overwrite the cached training and evaluation sets")
499
+ parser.add_argument('--seed', type=int, default=42,
500
+ help="random seed for initialization")
501
+ parser.add_argument('--epoch', type=int, default=42,
502
+ help="random seed for initialization")
503
+ parser.add_argument('--fp16', action='store_true',
504
+ help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
505
+ parser.add_argument('--fp16_opt_level', type=str, default='O1',
506
+ help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
507
+ "See details at https://nvidia.github.io/apex/amp.html")
508
+ parser.add_argument("--local_rank", type=int, default=-1,
509
+ help="For distributed training: local_rank")
510
+ parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
511
+ parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
512
+
513
+
514
+ args = parser.parse_args()
515
+
516
+
517
+ # Setup distant debugging if needed
518
+ if args.server_ip and args.server_port:
519
+ # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
520
+ import ptvsd
521
+ print("Waiting for debugger attach")
522
+ ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
523
+ ptvsd.wait_for_attach()
524
+
525
+ # Setup CUDA, GPU & distributed training
526
+ if args.local_rank == -1 or args.no_cuda:
527
+ device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
528
+ args.n_gpu = torch.cuda.device_count()
529
+ else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
530
+ torch.cuda.set_device(args.local_rank)
531
+ device = torch.device("cuda", args.local_rank)
532
+ torch.distributed.init_process_group(backend='nccl')
533
+ args.n_gpu = 1
534
+ args.device = device
535
+ args.per_gpu_train_batch_size=args.train_batch_size//args.n_gpu
536
+ args.per_gpu_eval_batch_size=args.eval_batch_size//args.n_gpu
537
+ # Setup logging
538
+ logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
539
+ datefmt='%m/%d/%Y %H:%M:%S',
540
+ level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
541
+ logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
542
+ args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
543
+
544
+
545
+ # Set seed
546
+ set_seed(args.seed)
547
+
548
+ # Load pretrained model and tokenizer
549
+ if args.local_rank not in [-1, 0]:
550
+ torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab
551
+
552
+ args.start_epoch = 0
553
+ args.start_step = 0
554
+ checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
555
+ if os.path.exists(checkpoint_last) and os.listdir(checkpoint_last):
556
+ args.model_name_or_path = os.path.join(checkpoint_last, 'pytorch_model.bin')
557
+ args.config_name = os.path.join(checkpoint_last, 'config.json')
558
+ idx_file = os.path.join(checkpoint_last, 'idx_file.txt')
559
+ with open(idx_file, encoding='utf-8') as idxf:
560
+ args.start_epoch = int(idxf.readlines()[0].strip()) + 1
561
+
562
+ step_file = os.path.join(checkpoint_last, 'step_file.txt')
563
+ if os.path.exists(step_file):
564
+ with open(step_file, encoding='utf-8') as stepf:
565
+ args.start_step = int(stepf.readlines()[0].strip())
566
+
567
+ logger.info("reload model from {}, resume from {} epoch".format(checkpoint_last, args.start_epoch))
568
+
569
+ config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
570
+ config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
571
+ cache_dir=args.cache_dir if args.cache_dir else None)
572
+ config.num_labels=1
573
+ tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name,
574
+ do_lower_case=args.do_lower_case,
575
+ cache_dir=args.cache_dir if args.cache_dir else None)
576
+ if args.block_size <= 0:
577
+ args.block_size = tokenizer.max_len_single_sentence # Our input block size will be the max possible for the model
578
+ args.block_size = min(args.block_size, tokenizer.max_len_single_sentence)
579
+ if args.model_name_or_path:
580
+ model = model_class.from_pretrained(args.model_name_or_path,
581
+ from_tf=bool('.ckpt' in args.model_name_or_path),
582
+ config=config,
583
+ cache_dir=args.cache_dir if args.cache_dir else None)
584
+ else:
585
+ model = model_class(config)
586
+
587
+ model=Model(model,config,tokenizer,args)
588
+ if args.local_rank == 0:
589
+ torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
590
+
591
+ logger.info("Training/evaluation parameters %s", args)
592
+
593
+ # Training
594
+ if args.do_train:
595
+ if args.local_rank not in [-1, 0]:
596
+ torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache
597
+
598
+ train_dataset = TextDataset(tokenizer, args,args.train_data_file)
599
+ if args.local_rank == 0:
600
+ torch.distributed.barrier()
601
+
602
+ train(args, train_dataset, model, tokenizer)
603
+
604
+
605
+
606
+ # Evaluation
607
+ results = {}
608
+ if args.do_eval and args.local_rank in [-1, 0]:
609
+ checkpoint_prefix = 'epoch_1/subject_model.pth' #'checkpoint-best-map/model.bin'
610
+ output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
611
+ model.load_state_dict(torch.load(output_dir),strict=False)
612
+ model.to(args.device)
613
+ result=evaluate(args, model, tokenizer)
614
+ logger.info("***** Eval results *****")
615
+ for key in sorted(result.keys()):
616
+ logger.info(" %s = %s", key, str(round(result[key],4)))
617
+
618
+ if args.do_test and args.local_rank in [-1, 0]:
619
+ checkpoint_prefix = 'epoch_1/subject_model.pth' #'checkpoint-best-map/model.bin'
620
+ output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
621
+ model.load_state_dict(torch.load(output_dir),strict=False)
622
+ model.to(args.device)
623
+ test(args, model, tokenizer)
624
+
625
+ return results
626
+
627
+
628
+ if __name__ == "__main__":
629
+ main()
630
+
631
+
632
+
Code-Code/Clone-detection-POJ-104/code/test.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CUDA_VISIBLE_DEVICES=0,1 python run.py \
2
+ --output_dir=../model \
3
+ --model_type=roberta \
4
+ --config_name=microsoft/codebert-base \
5
+ --model_name_or_path=microsoft/codebert-base \
6
+ --tokenizer_name=roberta-base \
7
+ --do_test \
8
+ --train_data_file=../dataset/train.jsonl \
9
+ --test_data_file=../dataset/valid.jsonl \
10
+ --epoch 2 \
11
+ --block_size 400 \
12
+ --train_batch_size 8 \
13
+ --eval_batch_size 16 \
14
+ --learning_rate 2e-5 \
15
+ --max_grad_norm 1.0 \
16
+ --evaluate_during_training \
17
+ --seed 123456
Code-Code/Clone-detection-POJ-104/code/train.sh ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CUDA_VISIBLE_DEVICES=0,1 python run.py \
2
+ --output_dir=../model \
3
+ --model_type=roberta \
4
+ --config_name=microsoft/codebert-base \
5
+ --model_name_or_path=microsoft/codebert-base \
6
+ --tokenizer_name=roberta-base \
7
+ --do_train \
8
+ --train_data_file=../dataset/train.jsonl \
9
+ --eval_data_file=../dataset/valid.jsonl \
10
+ --test_data_file=../dataset/test.jsonl \
11
+ --epoch 2 \
12
+ --block_size 400 \
13
+ --train_batch_size 8 \
14
+ --eval_batch_size 16 \
15
+ --learning_rate 2e-5 \
16
+ --max_grad_norm 1.0 \
17
+ --evaluate_during_training \
18
+ --seed 123456
Code-Code/Clone-detection-POJ-104/dataset.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c13009574c8c3c85c4ec26f6e33e53765479f41fa20239578b473fd11df4d01
3
+ size 7269797
Code-Code/CodeCompletion-token/code/beam.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch
4
+ from torch.autograd import Variable
5
+ import copy
6
+
7
+ class Beam(object):
8
+ def __init__(self, size, sos, eos):
9
+ self.size = size
10
+ self.tt = torch.cuda
11
+ # The score for each translation on the beam.
12
+ self.scores = self.tt.FloatTensor(size).zero_()
13
+ # The backpointers at each time-step.
14
+ self.prevKs = []
15
+ # The outputs at each time-step.
16
+ self.nextYs = [self.tt.LongTensor(size)
17
+ .fill_(0)]
18
+ self.nextYs[0][:] = sos
19
+ # Has EOS topped the beam yet.
20
+ self._eos = eos
21
+ self.eosTop = False
22
+ # Time and k pair for finished.
23
+ self.finished = []
24
+
25
+ def getCurrentState(self):
26
+ "Get the outputs for the current timestep."
27
+ batch = self.tt.LongTensor(self.nextYs[-1]).view(-1, 1)
28
+ return batch
29
+
30
+ def getCurrentOrigin(self):
31
+ "Get the backpointers for the current timestep."
32
+ return self.prevKs[-1]
33
+
34
+ def advance(self, wordLk):
35
+ """
36
+ Given prob over words for every last beam `wordLk` and attention
37
+ `attnOut`: Compute and update the beam search.
38
+
39
+ Parameters:
40
+
41
+ * `wordLk`- probs of advancing from the last step (K x words)
42
+ * `attnOut`- attention at the last step
43
+
44
+ Returns: True if beam search is complete.
45
+ """
46
+ numWords = wordLk.size(1)
47
+
48
+ # Sum the previous scores.
49
+ if len(self.prevKs) > 0:
50
+ beamLk = wordLk + self.scores.unsqueeze(1).expand_as(wordLk)
51
+
52
+ # Don't let EOS have children.
53
+ for i in range(self.nextYs[-1].size(0)):
54
+ if self.nextYs[-1][i] in self._eos:
55
+ beamLk[i] = -1e20
56
+ else:
57
+ beamLk = wordLk[0]
58
+ flatBeamLk = beamLk.view(-1)
59
+ bestScores, bestScoresId = flatBeamLk.topk(self.size, 0, True, True)
60
+
61
+ self.scores = bestScores
62
+
63
+ # bestScoresId is flattened beam x word array, so calculate which
64
+ # word and beam each score came from
65
+ prevK = bestScoresId // numWords
66
+ self.prevKs.append(prevK)
67
+ self.nextYs.append((bestScoresId - prevK * numWords))
68
+
69
+
70
+ for i in range(self.nextYs[-1].size(0)):
71
+ if self.nextYs[-1][i] in self._eos:
72
+ s = self.scores[i]
73
+ self.finished.append((s, len(self.nextYs) - 1, i))
74
+
75
+ # End condition is when top-of-beam is EOS and no global score.
76
+ if self.nextYs[-1][0] in self._eos:
77
+ self.eosTop = True
78
+
79
+ def done(self):
80
+ return self.eosTop and len(self.finished) >=self.size
81
+
82
+ def getFinal(self):
83
+ if len(self.finished) == 0:
84
+ self.finished.append((self.scores[0], len(self.nextYs) - 1, 0))
85
+ self.finished.sort(key=lambda a: -a[0])
86
+ if len(self.finished) != self.size:
87
+ unfinished=[]
88
+ for i in range(self.nextYs[-1].size(0)):
89
+ if self.nextYs[-1][i] not in self._eos:
90
+ s = self.scores[i]
91
+ unfinished.append((s, len(self.nextYs) - 1, i))
92
+ unfinished.sort(key=lambda a: -a[0])
93
+ self.finished+=unfinished[:self.size-len(self.finished)]
94
+ return self.finished[:self.size]
95
+
96
+ def getHyp(self, beam_res):
97
+ """
98
+ Walk back to construct the full hypothesis.
99
+ """
100
+ hyps=[]
101
+ for _,timestep, k in beam_res:
102
+ hyp = []
103
+ for j in range(len(self.prevKs[:timestep]) - 1, -1, -1):
104
+ hyp.append(self.nextYs[j+1][k])
105
+ k = self.prevKs[j][k]
106
+ hyps.append(hyp[::-1])
107
+ return hyps
108
+
109
+ def buildTargetTokens(self, preds):
110
+ sentence=[]
111
+ for pred in preds:
112
+ tokens = []
113
+ for tok in pred:
114
+ tokens.append(tok)
115
+ if tok in self._eos:
116
+ break
117
+ sentence.append(tokens)
118
+ return sentence
Code-Code/CodeCompletion-token/code/dataset.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # Licensed under the MIT License.
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import argparse
6
+ import glob
7
+ import logging
8
+ import os
9
+ import pickle
10
+ import random
11
+ import re
12
+ import gc
13
+ import shutil
14
+ import json
15
+
16
+ import numpy as np
17
+ import torch
18
+ from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
19
+ from torch.utils.data.distributed import DistributedSampler
20
+
21
+ from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
22
+ BertConfig, BertForMaskedLM, BertTokenizer,
23
+ GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
24
+ OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
25
+ RobertaConfig, RobertaForMaskedLM, RobertaTokenizer,
26
+ DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
27
+
28
+ class TextDataset(Dataset):
29
+ def __init__(self, tokenizer, args, logger, file_type='train', block_size=1024):
30
+ if args.local_rank==-1:
31
+ local_rank=0
32
+ world_size=1
33
+ else:
34
+ local_rank=args.local_rank
35
+ world_size=torch.distributed.get_world_size()
36
+
37
+ if not os.path.exists(args.output_dir):
38
+ os.makedirs(args.output_dir)
39
+ cached_file = os.path.join(args.output_dir, file_type+"_langs_%s"%(args.langs)+"_blocksize_%d"%(block_size)+"_wordsize_%d"%(world_size)+"_rank_%d"%(local_rank))
40
+ if os.path.exists(cached_file) and not args.overwrite_cache:
41
+ if file_type == 'train':
42
+ logger.warning("Loading features from cached file %s", cached_file)
43
+ with open(cached_file, 'rb') as handle:
44
+ self.inputs = pickle.load(handle)
45
+
46
+ else:
47
+ self.inputs = []
48
+ if args.langs == 'all':
49
+ langs = os.listdir(args.data_dir)
50
+ else:
51
+ langs = [args.langs]
52
+
53
+ data=[]
54
+ for lang in langs:
55
+ datafile = os.path.join(args.data_dir, lang, file_type+'.pkl')
56
+ if file_type == 'train':
57
+ logger.warning("Creating features from dataset file at %s", datafile)
58
+ # with open(datafile) as f:
59
+ # data.extend([json.loads(x)['code'] for idx,x in enumerate(f.readlines()) if idx%world_size==local_rank])
60
+ dataset = pickle.load(open(datafile, 'rb'))
61
+ data.extend(['<s> '+' '.join(x['function'].split())+' </s>' for idx,x in enumerate(dataset) if idx%world_size==local_rank])
62
+
63
+ # random.shuffle(data)
64
+ data = data
65
+ length = len(data)
66
+ logger.warning("Data size: %d"%(length))
67
+ input_ids = []
68
+ for idx,x in enumerate(data):
69
+ try:
70
+ input_ids.extend(tokenizer.encode(x))
71
+ except Exception:
72
+ pass
73
+ if idx % (length//10) == 0:
74
+ percent = idx / (length//10) * 10
75
+ logger.warning("Rank %d, load %d"%(local_rank, percent))
76
+ del data
77
+ gc.collect()
78
+
79
+ length = len(input_ids)
80
+ for i in range(0, length-block_size, block_size):
81
+ self.inputs.append(input_ids[i : i + block_size])
82
+ del input_ids
83
+ gc.collect()
84
+
85
+ if file_type == 'train':
86
+ logger.warning("Rank %d Training %d token, %d samples"%(local_rank, length, len(self.inputs)))
87
+ logger.warning("Saving features into cached file %s", cached_file)
88
+ with open(cached_file, 'wb') as handle:
89
+ pickle.dump(self.inputs, handle, protocol=pickle.HIGHEST_PROTOCOL)
90
+
91
+ def __len__(self):
92
+ return len(self.inputs)
93
+
94
+ def __getitem__(self, item):
95
+ return torch.tensor(self.inputs[item])
96
+
97
+ class finetuneDataset(Dataset):
98
+ def __init__(self, tokenizer, args, logger, file_type='train', block_size=1024):
99
+ if args.local_rank==-1:
100
+ local_rank=0
101
+ world_size=1
102
+ else:
103
+ local_rank=args.local_rank
104
+ world_size=torch.distributed.get_world_size()
105
+
106
+ if not os.path.exists(args.output_dir):
107
+ os.makedirs(args.output_dir)
108
+ cached_file = os.path.join(args.output_dir, file_type+"_blocksize_%d"%(block_size)+"_wordsize_%d"%(world_size)+"_rank_%d"%(local_rank))
109
+ if os.path.exists(cached_file) and not args.overwrite_cache:
110
+ if file_type == 'train':
111
+ logger.warning("Loading features from cached file %s", cached_file)
112
+ with open(cached_file, 'rb') as handle:
113
+ self.inputs = pickle.load(handle)
114
+
115
+ else:
116
+ self.inputs = []
117
+
118
+ datafile = os.path.join(args.data_dir, f"{file_type}.txt")
119
+ if file_type == 'train':
120
+ logger.warning("Creating features from dataset file at %s", datafile)
121
+ with open(datafile) as f:
122
+ data = f.readlines()
123
+
124
+ length = len(data)
125
+ logger.info("Data size: %d"%(length))
126
+ input_ids = []
127
+ for idx,x in enumerate(data):
128
+ x = x.strip()
129
+ if x.startswith("<s>") and x.endswith("</s>"):
130
+ pass
131
+ else:
132
+ x = "<s> " + x + " </s>"
133
+ try:
134
+ input_ids.extend(tokenizer.encode(x))
135
+ except Exception:
136
+ pass
137
+ if idx % (length//10) == 0:
138
+ percent = idx / (length//10) * 10
139
+ logger.warning("Rank %d, load %d"%(local_rank, percent))
140
+ del data
141
+ gc.collect()
142
+
143
+ length = len(input_ids) // world_size
144
+ logger.info(f"tokens: {length*world_size}")
145
+ input_ids = input_ids[local_rank*length: (local_rank+1)*length]
146
+
147
+ for i in range(0, length-block_size, block_size):
148
+ self.inputs.append(input_ids[i : i + block_size])
149
+ del input_ids
150
+ gc.collect()
151
+
152
+ if file_type == 'train':
153
+ logger.warning("Rank %d Training %d token, %d samples"%(local_rank, length, len(self.inputs)))
154
+ logger.warning("Saving features into cached file %s", cached_file)
155
+ with open(cached_file, 'wb') as handle:
156
+ pickle.dump(self.inputs, handle, protocol=pickle.HIGHEST_PROTOCOL)
157
+
158
+ def __len__(self):
159
+ return len(self.inputs)
160
+
161
+ def __getitem__(self, item):
162
+ return torch.tensor(self.inputs[item])
163
+
164
+ class EvalDataset(Dataset):
165
+ def __init__(self, tokenizer, args, logger, file_type='train', block_size=1024):
166
+ if not os.path.exists(args.output_dir):
167
+ os.makedirs(args.output_dir)
168
+ cached_file = os.path.join(args.output_dir, file_type+"_blocksize_%d"%(block_size))
169
+ if os.path.exists(cached_file) and not args.overwrite_cache:
170
+ with open(cached_file, 'rb') as handle:
171
+ self.inputs = pickle.load(handle)
172
+
173
+ else:
174
+ self.inputs = []
175
+
176
+ datafile = os.path.join(args.data_dir, f"{file_type}.txt")
177
+ with open(datafile) as f:
178
+ data = f.readlines()
179
+
180
+ length = len(data)
181
+ logger.info("Data size: %d"%(length))
182
+ input_ids = []
183
+ for idx,x in enumerate(data):
184
+ x = x.strip()
185
+ if x.startswith("<s>") and x.endswith("</s>"):
186
+ pass
187
+ else:
188
+ x = "<s> " + x + " </s>"
189
+ try:
190
+ input_ids.extend(tokenizer.encode(x))
191
+ except Exception:
192
+ pass
193
+ if idx % (length//10) == 0:
194
+ percent = idx / (length//10) * 10
195
+ logger.warning("load %d"%(percent))
196
+ del data
197
+ gc.collect()
198
+
199
+ logger.info(f"tokens: {len(input_ids)}")
200
+ self.split(input_ids, tokenizer, logger, block_size=block_size)
201
+ del input_ids
202
+ gc.collect()
203
+
204
+ with open(cached_file, 'wb') as handle:
205
+ pickle.dump(self.inputs, handle, protocol=pickle.HIGHEST_PROTOCOL)
206
+
207
+ def split(self, input_ids, tokenizer, logger, block_size=1024):
208
+ sample = []
209
+ i = 0
210
+ while i < len(input_ids):
211
+ sample = input_ids[i: i+block_size]
212
+ if len(sample) == block_size:
213
+ for j in range(block_size):
214
+ if tokenizer.convert_ids_to_tokens(sample[block_size-1-j])[0] == '\u0120' or tokenizer.convert_ids_to_tokens(sample[block_size-1-j]).startswith("<NUM_LIT"):
215
+ break
216
+ if sample[block_size-1-j] in [tokenizer.bos_token_id, tokenizer.eos_token_id, tokenizer.sep_token_id]:
217
+ if sample[block_size-1-j] != tokenizer.bos_token_id:
218
+ j -= 1
219
+ break
220
+ if j == block_size-1:
221
+ print(tokenizer.decode(sample))
222
+ exit()
223
+ sample = sample[: block_size-1-j]
224
+ # print(len(sample))
225
+ i += len(sample)
226
+ pad_len = block_size-len(sample)
227
+ sample += [tokenizer.pad_token_id]*pad_len
228
+ self.inputs.append(sample)
229
+
230
+ if len(self.inputs) % 10000 == 0:
231
+ logger.info(f"{len(self.inputs)} samples")
232
+
233
+
234
+ def __len__(self):
235
+ return len(self.inputs)
236
+
237
+ def __getitem__(self, item):
238
+ return torch.tensor(self.inputs[item])
239
+
240
+
241
+
242
+ class lineDataset(Dataset):
243
+ def __init__(self, tokenizer, args, logger, file_type='test', block_size=924):
244
+ datafile = os.path.join(args.data_dir, f"{file_type}.json")
245
+ with open(datafile) as f:
246
+ datas = f.readlines()
247
+
248
+ length = len(datas)
249
+ logger.info("Data size: %d"%(length))
250
+ self.inputs = []
251
+ self.gts = []
252
+ for data in datas:
253
+ data = json.loads(data.strip())
254
+ self.inputs.append(tokenizer.encode(data["input"])[-block_size:])
255
+ self.gts.append(data["gt"])
256
+
257
+ def __len__(self):
258
+ return len(self.inputs)
259
+
260
+ def __getitem__(self, item):
261
+ return torch.tensor(self.inputs[item]), self.gts[item]
Code-Code/CodeCompletion-token/code/eval.sh ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ LANG=java # set python for py150
2
+ DATADIR=../dataset/javaCorpus/token_completion
3
+ LITFILE=../dataset/javaCorpus/literals.json
4
+ OUTPUTDIR=../model/javaCorpus
5
+ PRETRAINDIR=microsoft/CodeGPT-small-java # microsoft/CodeGPT-small-py for py150
6
+ LOGFILE=eval_javaCorpus.log
7
+
8
+ CUDA_VISIBLE_DEVICES=0 python run_lm.py \
9
+ --data_dir=$DATADIR \
10
+ --lit_file=$LITFILE \
11
+ --langs=$LANG \
12
+ --output_dir=$OUTPUTDIR \
13
+ --pretrain_dir=$OUTPUTDIR \
14
+ --log_file=$LOGFILE \
15
+ --model_type=gpt2 \
16
+ --block_size=512 \
17
+ --do_eval \
18
+ --per_gpu_eval_batch_size=16 \
19
+ --logging_steps=100 \
20
+ --seed=42
Code-Code/CodeCompletion-token/code/evaluate.sh ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ python evaluator.py \
2
+ -a=../dataset/javaCorpus/token_completion/test.txt \
3
+ -p=../model/javaCorpus/predictions.txt
Code-Code/CodeCompletion-token/code/evaluator.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # Licensed under the MIT license.
3
+ import os
4
+ import logging
5
+ import argparse
6
+
7
+ logger = logging.getLogger(__name__)
8
+ logging.basicConfig(level=logging.INFO)
9
+
10
+ def main():
11
+ parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for code completion (token level).')
12
+ parser.add_argument('--answers', '-a', required=True, help="filename of the labels, in txt format.")
13
+ parser.add_argument('--predictions', '-p', required=True, help="filename of the leaderboard predictions, in txt format.")
14
+ args = parser.parse_args()
15
+
16
+ preds = open(args.predictions, "r").readlines()
17
+ gts = open(args.answers, "r").readlines()
18
+
19
+ assert len(preds) == len(gts), f"Samples of predictions and answers are not equal, {len(preds)}: {len(gts)}"
20
+
21
+ total = 0
22
+ correct = 0.0
23
+ for pred, gt in zip(preds, gts):
24
+ pred = pred.split()
25
+ gt = gt.split()
26
+ assert len(pred) == len(gt), f"Sequence length of prediction and answer are not equal, {len(pred)}: {len(gt)}"
27
+ for x, y in zip(pred, gt):
28
+ if y not in ["<s>", "</s>", "<EOL>", "<pad>"]:
29
+ total += 1
30
+ if x == y:
31
+ correct += 1
32
+
33
+ logger.info(f"Total {total} tokens, accuracy: {round(correct/total*100, 2)}")
34
+
35
+ if __name__ == "__main__":
36
+ main()
Code-Code/CodeCompletion-token/code/model.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # Licensed under the MIT License.
3
+ import math
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+
8
+ class RNNModel(nn.Module):
9
+ """Container module with an encoder, a recurrent module, and a decoder."""
10
+
11
+ def __init__(self, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
12
+ super(RNNModel, self).__init__()
13
+ self.ntoken = ntoken
14
+ self.drop = nn.Dropout(dropout)
15
+ self.encoder = nn.Embedding(ntoken, ninp)
16
+ self.rnn = nn.LSTM(ninp, nhid, nlayers, dropout=dropout, batch_first=True)
17
+ self.decoder = nn.Linear(nhid, ntoken)
18
+ self.criterion = nn.CrossEntropyLoss()
19
+
20
+ # Optionally tie weights as in:
21
+ # "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
22
+ # https://arxiv.org/abs/1608.05859
23
+ # and
24
+ # "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
25
+ # https://arxiv.org/abs/1611.01462
26
+ if tie_weights:
27
+ if nhid != ninp:
28
+ raise ValueError('When using the tied flag, nhid must be equal to emsize')
29
+ self.decoder.weight = self.encoder.weight
30
+
31
+ self.init_weights()
32
+
33
+ self.nhid = nhid
34
+ self.nlayers = nlayers
35
+
36
+ def init_weights(self):
37
+ initrange = 0.1
38
+ nn.init.uniform_(self.encoder.weight, -initrange, initrange)
39
+ nn.init.zeros_(self.decoder.weight)
40
+ nn.init.uniform_(self.decoder.weight, -initrange, initrange)
41
+
42
+ def forward(self, input, hidden=None, labels=None):
43
+ emb = self.encoder(input)
44
+ if hidden is not None:
45
+ output, hidden = self.rnn(emb, hidden)
46
+ else:
47
+ output, hidden = self.rnn(emb)
48
+ output = self.drop(output)
49
+ output = self.decoder(output)
50
+ # decoded = decoded.view(-1, self.ntoken)
51
+ # output = F.log_softmax(decoded, dim=1)
52
+ if labels is not None:
53
+ shift_logits = output[..., :-1, :].contiguous()
54
+ shift_labels = labels[..., 1:].contiguous()
55
+ loss = self.criterion(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
56
+ return loss, output, hidden
57
+ else:
58
+ return output, hidden
59
+
60
+ def init_hidden(self, bsz):
61
+ weight = next(self.parameters())
62
+ if self.rnn_type == 'LSTM':
63
+ return (weight.new_zeros(self.nlayers, bsz, self.nhid),
64
+ weight.new_zeros(self.nlayers, bsz, self.nhid))
65
+ else:
66
+ return weight.new_zeros(self.nlayers, bsz, self.nhid)
67
+
68
+
Code-Code/CodeCompletion-token/code/run_lm.py ADDED
@@ -0,0 +1,728 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Code completion (both token level and line level) pipeline in CodeXGLUE
18
+ """
19
+
20
+ from __future__ import absolute_import, division, print_function
21
+
22
+ import argparse
23
+ import glob
24
+ import logging
25
+ import os
26
+ import pickle
27
+ import random
28
+ import re
29
+ import shutil
30
+ import json
31
+
32
+ import numpy as np
33
+ import torch
34
+ from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
35
+ from torch.utils.data.distributed import DistributedSampler
36
+ from dataset import TextDataset, finetuneDataset, EvalDataset, lineDataset
37
+ from beam import Beam
38
+
39
+ from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
40
+ BertConfig, BertForMaskedLM, BertTokenizer,
41
+ GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
42
+ OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
43
+ RobertaConfig, RobertaForMaskedLM, RobertaTokenizer,
44
+ DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
45
+ from model import RNNModel
46
+
47
+ # logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
48
+ # datefmt='%m/%d/%Y %H:%M:%S',
49
+ # level=logging.INFO)
50
+ logger = logging.getLogger(__name__)
51
+
52
+ MODEL_CLASSES = {
53
+ 'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
54
+ 'rnn': (GPT2Config, RNNModel, GPT2Tokenizer),
55
+ 'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
56
+ 'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
57
+ 'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
58
+ 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
59
+ }
60
+
61
+
62
+
63
+ def load_and_cache_examples(args, tokenizer, evaluate=False):
64
+ if args.not_pretrain:
65
+ dataset = finetuneDataset(tokenizer, args, logger, file_type='dev' if evaluate else 'train',
66
+ block_size=args.block_size)
67
+ else:
68
+ dataset = TextDataset(tokenizer, args, logger, file_type='dev' if evaluate else 'train',
69
+ block_size=args.block_size)
70
+ return dataset
71
+
72
+ def set_seed(args):
73
+ random.seed(args.seed)
74
+ np.random.seed(args.seed)
75
+ torch.manual_seed(args.seed)
76
+ if args.n_gpu > 0:
77
+ torch.cuda.manual_seed_all(args.seed)
78
+
79
+ def update_config(args, config):
80
+ # config.n_positions = config.n_ctx = args.block_size
81
+ config.vocab_size = args.vocab_size
82
+
83
+ def get_special_tokens(path):
84
+ lits = json.load(open(path))
85
+ tokens = ["<STR_LIT>", "<NUM_LIT>", "<CHAR_LIT>"]
86
+ for lit in lits["str"]:
87
+ tokens.append(f"<STR_LIT:{lit}>")
88
+ for lit in lits["num"]:
89
+ tokens.append(f"<NUM_LIT:{lit}>")
90
+ for lit in lits["char"]:
91
+ tokens.append(f"<CHAR_LIT:{lit}>")
92
+ return tokens
93
+
94
+
95
+
96
+ def train(args, train_dataset, model, tokenizer, fh, pool):
97
+ """ Train the model """
98
+ if args.local_rank in [-1, 0]:
99
+ args.tensorboard_dir = os.path.join(args.output_dir, 'tensorboard')
100
+ if not os.path.exists(args.tensorboard_dir):
101
+ os.makedirs(args.tensorboard_dir)
102
+
103
+ args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
104
+ train_sampler = RandomSampler(train_dataset)
105
+
106
+ train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.batch_size, drop_last=True)
107
+ total_examples = len(train_dataset) * (
108
+ torch.distributed.get_world_size() if args.local_rank != -1 else 1)
109
+ batch_size = args.batch_size * args.gradient_accumulation_steps * (
110
+ torch.distributed.get_world_size() if args.local_rank != -1 else 1)
111
+ # if args.max_steps > 0:
112
+ # t_total = args.max_steps
113
+ # args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
114
+ if args.num_train_epochs > 0:
115
+ t_total = total_examples // batch_size * args.num_train_epochs
116
+ args.max_steps = t_total
117
+ model.to(args.device)
118
+ if args.local_rank not in [-1, 0]:
119
+ torch.distributed.barrier()
120
+ # Prepare optimizer and schedule (linear warmup and decay)
121
+ no_decay = ['bias', 'LayerNorm.weight']
122
+ optimizer_grouped_parameters = [
123
+ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
124
+ 'weight_decay': args.weight_decay},
125
+ {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
126
+ ]
127
+ optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
128
+ scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,
129
+ num_training_steps=t_total)
130
+ checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
131
+ # scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt')
132
+ optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt')
133
+ # if os.path.exists(scheduler_last):
134
+ # scheduler.load_state_dict(torch.load(scheduler_last, map_location="cpu"))
135
+ if os.path.exists(optimizer_last):
136
+ logger.warning(f"Loading optimizer from {optimizer_last}")
137
+ optimizer.load_state_dict(torch.load(optimizer_last, map_location="cpu"))
138
+ if args.local_rank == 0:
139
+ torch.distributed.barrier()
140
+ if args.fp16:
141
+ try:
142
+ from apex import amp
143
+ except ImportError:
144
+ raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
145
+ model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
146
+
147
+ # multi-gpu training (should be after apex fp16 initialization)
148
+ if args.n_gpu > 1:
149
+ model = torch.nn.DataParallel(model)
150
+
151
+ # Distributed training (should be after apex fp16 initialization)
152
+ if args.local_rank != -1:
153
+ model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank%args.gpu_per_node],
154
+ output_device=args.local_rank%args.gpu_per_node)
155
+
156
+ # Train!
157
+ logger.info("***** Running training *****")
158
+ logger.info(" Num examples = %d", total_examples )
159
+ logger.info(" Num epoch = %d", t_total*batch_size//total_examples)
160
+ logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
161
+ logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", batch_size)
162
+ logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
163
+ logger.info(" Total optimization steps = %d", t_total)
164
+
165
+ global_step = args.start_step
166
+ tr_loss, logging_loss,avg_loss,tr_nb = 0.0, 0.0, 0.0, global_step
167
+ # model.resize_token_embeddings(len(tokenizer))
168
+ model.zero_grad()
169
+ set_seed(args) # Added here for reproducibility (even between python 2 and 3)
170
+
171
+ for idx in range(args.start_epoch, int(args.num_train_epochs)):
172
+ for step, batch in enumerate(train_dataloader):
173
+ inputs, labels = (batch, batch)
174
+ inputs = inputs.to(args.device)
175
+ labels = labels.to(args.device)
176
+ model.train()
177
+ outputs = model(inputs, labels=labels)
178
+ loss = outputs[0]
179
+
180
+ if args.n_gpu > 1:
181
+ loss = loss.mean() # mean() to average on multi-gpu parallel training
182
+ if args.gradient_accumulation_steps > 1:
183
+ loss = loss / args.gradient_accumulation_steps
184
+
185
+ if args.fp16:
186
+ with amp.scale_loss(loss, optimizer) as scaled_loss:
187
+ scaled_loss.backward()
188
+ torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
189
+ else:
190
+ loss.backward()
191
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
192
+
193
+ tr_loss += loss.item()
194
+
195
+ if (step + 1) % args.gradient_accumulation_steps == 0:
196
+ optimizer.step()
197
+ optimizer.zero_grad()
198
+ scheduler.step()
199
+ global_step += 1
200
+ output_flag=True
201
+ avg_loss=round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4)
202
+ if global_step % args.logging_steps == 0:
203
+ logger.info(" steps: %s ppl: %s lr: %s", global_step, round(avg_loss,5), scheduler.get_last_lr()[0])
204
+ if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
205
+ # Log metrics
206
+ logging_loss = tr_loss
207
+ tr_nb=global_step
208
+
209
+ if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
210
+ checkpoint_prefix = "checkpoint"
211
+ # Save model checkpoint
212
+ if args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
213
+ results = evaluate(args, model, tokenizer, eval_when_training=True)
214
+ for key, value in results.items():
215
+ logger.info(" %s = %s", key, round(value,4))
216
+ output_dir = os.path.join(args.output_dir, '{}-{}-{}'.format(checkpoint_prefix, global_step, round(results['perplexity'],4)))
217
+ else:
218
+ output_dir = os.path.join(args.output_dir, "{}-{}".format(checkpoint_prefix, global_step))
219
+ if not os.path.exists(output_dir):
220
+ os.makedirs(output_dir)
221
+ model_to_save = (
222
+ model.module if hasattr(model, "module") else model
223
+ ) # Take care of distributed/parallel training
224
+ if args.model_type == "rnn":
225
+ torch.save(model_to_save.state_dict(), os.path.join(output_dir, "model.pt"))
226
+ else:
227
+ model_to_save.save_pretrained(output_dir)
228
+ tokenizer.save_pretrained(output_dir)
229
+
230
+ torch.save(args, os.path.join(output_dir, "training_args.bin"))
231
+ logger.info("Saving model checkpoint to %s", output_dir)
232
+
233
+ # _rotate_checkpoints(args, checkpoint_prefix)
234
+ last_output_dir = os.path.join(args.output_dir, 'checkpoint-last')
235
+ if not os.path.exists(last_output_dir):
236
+ os.makedirs(last_output_dir)
237
+ if args.model_type == "rnn":
238
+ torch.save(model_to_save.state_dict(), os.path.join(last_output_dir, "model.pt"))
239
+ else:
240
+ model_to_save.save_pretrained(last_output_dir)
241
+ tokenizer.save_pretrained(last_output_dir)
242
+ idx_file = os.path.join(last_output_dir, 'idx_file.txt')
243
+ with open(idx_file, 'w', encoding='utf-8') as idxf:
244
+ idxf.write(str(0) + '\n')
245
+
246
+ torch.save(optimizer.state_dict(), os.path.join(last_output_dir, "optimizer.pt"))
247
+ # torch.save(scheduler.state_dict(), os.path.join(last_output_dir, "scheduler.pt"))
248
+ logger.info("Saving optimizer and scheduler states to %s", last_output_dir)
249
+
250
+ step_file = os.path.join(last_output_dir, 'step_file.txt')
251
+ with open(step_file, 'w', encoding='utf-8') as stepf:
252
+ stepf.write(str(global_step) + '\n')
253
+
254
+
255
+ if args.max_steps > 0 and global_step > args.max_steps:
256
+ break
257
+
258
+ # 每一轮记录checkpoint
259
+ output_dir = os.path.join(args.output_dir, 'epoch_{}'.format(idx+1))
260
+ if not os.path.exists(output_dir):
261
+ os.makedirs(output_dir)
262
+ model_to_save = model.module if hasattr(model, 'module') else model
263
+ ckpt_output_path = os.path.join(output_dir, 'subject_model.pth')
264
+ logger.info("Saving model checkpoint to %s", ckpt_output_path)
265
+ torch.save(model_to_save.state_dict(), ckpt_output_path)
266
+
267
+ if args.max_steps > 0 and global_step > args.max_steps:
268
+ break
269
+
270
+ return global_step, tr_loss / global_step
271
+
272
+
273
+ def evaluate(args, model, tokenizer, prefix="", eval_when_training=False):
274
+ # Loop to handle MNLI double evaluation (matched, mis-matched)
275
+ eval_output_dir = args.output_dir
276
+
277
+ eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True)
278
+
279
+ if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
280
+ os.makedirs(eval_output_dir)
281
+
282
+ args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
283
+ # Note that DistributedSampler samples randomly
284
+ eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
285
+ eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, drop_last=True)
286
+
287
+ # multi-gpu evaluate
288
+ if args.n_gpu > 1 and eval_when_training is False:
289
+ model = torch.nn.DataParallel(model)
290
+
291
+ # Eval!
292
+ #logger.info("***** Running evaluation {} *****".format(prefix))
293
+ #logger.info(" Num examples = %d", len(eval_dataset))
294
+ #logger.info(" Batch size = %d", args.eval_batch_size)
295
+ eval_loss = 0.0
296
+ nb_eval_steps = 0
297
+ model.eval()
298
+
299
+ for batch in eval_dataloader:
300
+ inputs, labels = (batch, batch)
301
+ inputs = inputs.to(args.device)
302
+ labels = labels.to(args.device)
303
+
304
+ with torch.no_grad():
305
+ outputs = model(inputs, labels=labels)
306
+ lm_loss = outputs[0]
307
+ eval_loss += lm_loss.mean().item()
308
+ nb_eval_steps += 1
309
+
310
+ eval_loss = eval_loss / nb_eval_steps
311
+ perplexity = torch.exp(torch.tensor(eval_loss))
312
+
313
+ result = {
314
+ "perplexity": float(perplexity)
315
+ }
316
+
317
+ output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
318
+ with open(output_eval_file, "w") as writer:
319
+ #logger.info("***** Eval results {} *****".format(prefix))
320
+ for key in sorted(result.keys()):
321
+ #logger.info(" %s = %s", key, str(result[key]))
322
+ writer.write("%s = %s\n" % (key, str(result[key])))
323
+
324
+ return result
325
+
326
+ def eval_acc(args, model, tokenizer, file_type='test'):
327
+ """
328
+ Evaluate token level code completion on accuracy.
329
+
330
+ This function can only used to evaluate accuracy, but not inference, because the inputs are previous sub-tokens but not tokens.
331
+ But it can be guaranteed that the accuracy in this function is the same as the real token level completion.
332
+ The reason is:
333
+ Assuming the inputs are "context_len = 100 <EOL> masks = np . zeros (", and the ground truth is "context_len".
334
+ Due to our bpe encoding, the model have to outputs "context", "_" and "len" in 3 time step, i.e. gt0="context", gt1="_", gt2="len".
335
+ In a real inference scenario:
336
+ time step 0, inputs "context_len = 100 <EOL> masks = np . zeros ( ", model outputs: out0;
337
+ time step 1, inputs: in1=out0, outputs: out1
338
+ ... until the model outputs a complete token
339
+ But in this function, no matter out0 is, in1=gt0="context".
340
+ That is to say, in this function, we feed ground truth but not output sub-token when we predict the next token which is split by bpe.
341
+ So obviouly we would get different predictions from the real token completion scenario.
342
+ However, if we calculate token leval accuracy,
343
+ if and only if the model predicts every sub-token correctly, the complete token can be seen correct.
344
+ In this situation, out0==gt0, out1==gt1, so it doesn't matter we feed gt or output to model.
345
+ In summary, this function can make models oupout the same complete token if this token equals to ground truth,
346
+ if not, the model might predict a different token from the real completion scenario, but all wrong.
347
+ So it would not affect the token level accuracy.
348
+
349
+ I use this trick to speed up evaluation due to the large test set.
350
+ """
351
+ eval_dataset = EvalDataset(tokenizer, args, logger, file_type=file_type, block_size=args.block_size)
352
+ args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
353
+ eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
354
+ eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
355
+ model.to(args.device)
356
+ # multi-gpu training (should be after apex fp16 initialization)
357
+ if args.n_gpu > 1:
358
+ model = torch.nn.DataParallel(model)
359
+
360
+ # Distributed training (should be after apex fp16 initialization)
361
+ if args.local_rank != -1:
362
+ model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank%args.gpu_per_node],
363
+ output_device=args.local_rank%args.gpu_per_node)
364
+
365
+ def DecodeIds(idxs):
366
+ codes = ""
367
+ for idx in idxs:
368
+ to_add = tokenizer.convert_ids_to_tokens(idx)
369
+ if tokenizer.convert_ids_to_tokens(idx)[0] == '\u0120':
370
+ if not codes.endswith(" "):
371
+ codes += " " + to_add[1:]
372
+ else:
373
+ codes += to_add[1:]
374
+ elif (
375
+ idx in [tokenizer.bos_token_id, tokenizer.eos_token_id, tokenizer.sep_token_id, tokenizer.pad_token_id] or
376
+ tokenizer.convert_ids_to_tokens(idx).startswith("<NUM_LIT")
377
+ ):
378
+ codes += " " + to_add + " "
379
+ else:
380
+ codes += to_add
381
+ return codes.strip(" ")
382
+
383
+ model.eval()
384
+
385
+ correct = 0.0
386
+ total = 0
387
+
388
+ total_pred = []
389
+ total_gt = []
390
+
391
+ for step, batch in enumerate(eval_dataloader):
392
+ inputs = batch.to(args.device)
393
+
394
+ with torch.no_grad():
395
+ outputs = model(inputs)
396
+ pred_scores = outputs[0]
397
+ pred_ids = pred_scores.argmax(-1)
398
+
399
+ all_pred = []
400
+ all_gt = []
401
+ prev_pred = None
402
+ for pred, gt in zip(pred_ids, inputs):
403
+ pred = pred.cpu().tolist()
404
+ gt = gt.cpu().tolist()
405
+
406
+ for i, y in enumerate(gt):
407
+ if i == 0:
408
+ if y in [tokenizer.bos_token_id, tokenizer.eos_token_id, tokenizer.sep_token_id, tokenizer.pad_token_id]:
409
+ now_gt = [y]
410
+ now_pred = [0] if prev_pred is None else [prev_pred]
411
+ all_pred.append(DecodeIds(now_pred).strip().split()[0])
412
+ all_gt.append(DecodeIds(now_gt).strip())
413
+ now_gt = []
414
+ now_pred = []
415
+ else:
416
+ now_gt = [y]
417
+ now_pred = [0] if prev_pred is None else [prev_pred]
418
+ else:
419
+ if tokenizer.convert_ids_to_tokens(y)[0] == '\u0120':
420
+ if len(now_gt) > 0:
421
+ try:
422
+ all_pred.append(DecodeIds(now_pred).strip().split()[0])
423
+ except IndexError:
424
+ all_pred.append("<SPACE>")
425
+ all_gt.append(DecodeIds(now_gt).strip())
426
+ now_gt = []
427
+ now_pred = []
428
+ if y in [tokenizer.bos_token_id, tokenizer.eos_token_id, tokenizer.sep_token_id, tokenizer.pad_token_id] or tokenizer.convert_ids_to_tokens(y).startswith("<NUM_LIT"):
429
+ if len(now_gt) > 0:
430
+ try:
431
+ all_pred.append(DecodeIds(now_pred).strip().split()[0])
432
+ except IndexError:
433
+ all_pred.append("<SPACE>")
434
+ all_gt.append(DecodeIds(now_gt).strip())
435
+ now_gt = [y]
436
+ now_pred = [pred[i-1]]
437
+ try:
438
+ all_pred.append(DecodeIds(now_pred).strip().split()[0])
439
+ except IndexError:
440
+ all_pred.append("<SPACE>")
441
+ all_gt.append(DecodeIds(now_gt).strip())
442
+ now_gt = []
443
+ now_pred = []
444
+ continue
445
+ now_gt.append(y)
446
+ now_pred.append(pred[i-1])
447
+ assert len(all_pred) == len(all_gt)
448
+
449
+ total_pred.extend(all_pred)
450
+ total_gt.extend(all_gt)
451
+
452
+
453
+ for x, y in zip(all_pred, all_gt):
454
+ if y not in ["<s>", "</s>", "<EOL>", "<pad>"]:
455
+ total += 1
456
+ if x == y:
457
+ correct += 1
458
+
459
+ if step % args.logging_steps == 0:
460
+ logger.info(f"{step} are done!")
461
+ logger.info(f"{total}, {correct/total}")
462
+
463
+ # pickle.dump(total_pred, open(os.path.join(args.output_dir, "preds.pkl"), "wb"))
464
+ # pickle.dump(total_gt, open(os.path.join(args.output_dir, "gts.pkl"), "wb"))
465
+
466
+ saved_file = os.path.join(args.output_dir, "predictions.txt")
467
+ total_samples = post_process(args, total_pred, total_gt, open(os.path.join(args.data_dir, f"{file_type}.txt")).readlines(), saved_file)
468
+ logger.info(f"Eval on {total_samples}, saved at {saved_file}")
469
+
470
+ return total, correct
471
+
472
+ def post_process(args, preds, gts, true_gts, saved_file):
473
+ wf = open(saved_file, "w")
474
+
475
+ cnt = 0
476
+ new_gt = []
477
+ new_pred = []
478
+ for i, (pred,gt) in enumerate(zip(preds,gts)):
479
+ if gt in ["", "<pad>"]:
480
+ continue
481
+ new_gt.append(gt)
482
+ new_pred.append(pred.replace(" ", ""))
483
+ if gt == "</s>":
484
+ gt_str = " ".join(new_gt)
485
+ pred_str = " ".join(new_pred)
486
+ assert gt_str == true_gts[cnt].strip(), f"{cnt} sample gt_str != true_gt"
487
+ wf.write(pred_str+"\n")
488
+ cnt += 1
489
+ new_gt = []
490
+ new_pred = []
491
+
492
+ return cnt
493
+
494
+
495
+ def main():
496
+ parser = argparse.ArgumentParser()
497
+
498
+ ## Required parameters
499
+ parser.add_argument("--data_dir", default=None, type=str, required=True,
500
+ help="The input data path.")
501
+ parser.add_argument("--langs", default=None, type=str, required=True,
502
+ help="Languages to train, if all, train all languages in data_dir")
503
+ parser.add_argument("--output_dir", default=None, type=str, required=True,
504
+ help="The output directory where the model predictions and checkpoints will be written.")
505
+
506
+ ## Other parameters
507
+ parser.add_argument("--model_type", default="gpt2", type=str,
508
+ help="The model architecture to be fine-tuned.")
509
+ parser.add_argument("--pretrain_dir", default="", type=str,
510
+ help="The output directory where the model predictions and checkpoints will be written.")
511
+ parser.add_argument("--config_dir", type=str,
512
+ help="config name. Required when training from scratch")
513
+ parser.add_argument("--tokenizer_dir", type=str,
514
+ help="Pre-trained tokenizer dir. Required when training from scratch")
515
+ parser.add_argument("--lit_file", type=str,
516
+ help="literals json file")
517
+ parser.add_argument("--load_name", type=str, default="pretrained",
518
+ help="Load pretrained model name")
519
+
520
+ parser.add_argument("--mlm", action='store_true',
521
+ help="Train with masked-language modeling loss instead of language modeling.")
522
+ parser.add_argument("--mlm_probability", type=float, default=0.15,
523
+ help="Ratio of tokens to mask for masked language modeling loss")
524
+
525
+ parser.add_argument("--cache_dir", default="", type=str,
526
+ help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)")
527
+ parser.add_argument("--block_size", default=1024, type=int,
528
+ help="Optional input sequence length after tokenization."
529
+ "The training dataset will be truncated in block of this size for training."
530
+ "Default to the model max input length for single sentence inputs (take into account special tokens).")
531
+ parser.add_argument("--do_train", action='store_true',
532
+ help="Whether to run training.")
533
+ parser.add_argument("--do_eval", action='store_true',
534
+ help="Whether to run eval on the dev set.")
535
+ parser.add_argument("--evaluate_during_training", action='store_true',
536
+ help="Run evaluation during training at each logging step.")
537
+ parser.add_argument("--do_lower_case", action='store_true',
538
+ help="Set this flag if you are using an uncased model.")
539
+
540
+ parser.add_argument("--per_gpu_train_batch_size", default=4, type=int,
541
+ help="Batch size per GPU/CPU for training.")
542
+ parser.add_argument("--per_gpu_eval_batch_size", default=12, type=int,
543
+ help="Batch size per GPU/CPU for evaluation.")
544
+ parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
545
+ help="Number of updates steps to accumulate before performing a backward/update pass.")
546
+ parser.add_argument("--learning_rate", default=5e-5, type=float,
547
+ help="The initial learning rate for Adam.")
548
+ parser.add_argument("--weight_decay", default=0.0, type=float,
549
+ help="Weight deay if we apply some.")
550
+ parser.add_argument("--adam_epsilon", default=1e-8, type=float,
551
+ help="Epsilon for Adam optimizer.")
552
+ parser.add_argument("--max_grad_norm", default=1.0, type=float,
553
+ help="Max gradient norm.")
554
+ parser.add_argument("--num_train_epochs", default=1.0, type=float,
555
+ help="Total number of training epochs to perform.")
556
+ parser.add_argument("--max_steps", default=-1, type=int,
557
+ help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
558
+ parser.add_argument("--warmup_steps", default=0, type=int,
559
+ help="Linear warmup over warmup_steps.")
560
+
561
+ parser.add_argument('--logging_steps', type=int, default=1000,
562
+ help="Log every X updates steps.")
563
+ parser.add_argument('--save_steps', type=int, default=5000,
564
+ help="Save checkpoint every X updates steps.")
565
+ parser.add_argument('--save_total_limit', type=int, default=None,
566
+ help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default')
567
+ parser.add_argument("--eval_all_checkpoints", action='store_true',
568
+ help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number")
569
+ parser.add_argument("--no_cuda", action='store_true',
570
+ help="Avoid using CUDA when available")
571
+ parser.add_argument('--overwrite_output_dir', action='store_true',
572
+ help="Overwrite the content of the output directory")
573
+ parser.add_argument('--overwrite_cache', action='store_true',
574
+ help="Overwrite the cached training and evaluation sets")
575
+ parser.add_argument('--seed', type=int, default=42,
576
+ help="random seed for initialization")
577
+ parser.add_argument('--not_pretrain', action='store_true',
578
+ help="use different dataset")
579
+
580
+ parser.add_argument('--fp16', action='store_true',
581
+ help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
582
+ parser.add_argument('--fp16_opt_level', type=str, default='O1',
583
+ help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
584
+ "See details at https://nvidia.github.io/apex/amp.html")
585
+ parser.add_argument("--local_rank", type=int, default=-1,
586
+ help="For distributed training: local_rank")
587
+ parser.add_argument("--node_index", type=int, default=-1,
588
+ help="node index if multi-node running")
589
+ parser.add_argument("--gpu_per_node", type=int, default=-1,
590
+ help="num of gpus per node")
591
+ parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
592
+ parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
593
+
594
+ parser.add_argument('--log_file', type=str, default='')
595
+ parser.add_argument('--tensorboard_dir', type=str)
596
+
597
+ pool = None
598
+ args = parser.parse_args()
599
+
600
+ # args.output_dir = os.path.join(args.output_dir, args.dataset)
601
+
602
+ if args.model_type in ["bert", "roberta", "distilbert"] and not args.mlm:
603
+ raise ValueError("BERT and RoBERTa do not have LM heads but masked LM heads. They must be run using the --mlm "
604
+ "flag (masked language modeling).")
605
+
606
+ if os.path.exists(args.output_dir) and os.listdir(
607
+ args.output_dir) and args.do_train and not args.overwrite_output_dir:
608
+ raise ValueError(
609
+ "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
610
+ args.output_dir))
611
+
612
+ # Setup distant debugging if needed
613
+ if args.server_ip and args.server_port:
614
+ # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
615
+ import ptvsd
616
+ print("Waiting for debugger attach")
617
+ ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
618
+ ptvsd.wait_for_attach()
619
+
620
+ logger.info("local_rank: %d, node_index: %d, gpu_per_node: %d"%(args.local_rank, args.node_index, args.gpu_per_node))
621
+ # Setup CUDA, GPU & distributed training
622
+ if args.local_rank == -1 or args.no_cuda:
623
+ device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
624
+ args.n_gpu = torch.cuda.device_count()
625
+ else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
626
+ torch.cuda.set_device(args.local_rank)
627
+ device = torch.device("cuda", args.local_rank)
628
+ torch.distributed.init_process_group(backend='nccl')
629
+ args.local_rank += args.node_index * args.gpu_per_node
630
+ args.n_gpu = 1
631
+ args.device = device
632
+ # args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
633
+
634
+ # Setup logging
635
+ logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
636
+ datefmt='%m/%d/%Y %H:%M:%S',
637
+ level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
638
+ logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s, world size: %s",
639
+ args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16,
640
+ torch.distributed.get_world_size() if args.local_rank != -1 else 1)
641
+
642
+ # 使用FileHandler输出到文件
643
+ fh = logging.FileHandler(args.log_file)
644
+ logger.addHandler(fh)
645
+
646
+ # Set seed
647
+ set_seed(args)
648
+
649
+ # Load pretrained model and tokenizer
650
+ if args.local_rank not in [-1, 0]:
651
+ torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab
652
+
653
+ args.start_epoch = 0
654
+ args.start_step = 0
655
+ checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
656
+ if args.do_train and os.path.exists(checkpoint_last) and os.listdir(checkpoint_last):
657
+ args.pretrain_dir = os.path.join(checkpoint_last)
658
+ args.config_name = os.path.join(checkpoint_last, 'config.json')
659
+ idx_file = os.path.join(checkpoint_last, 'idx_file.txt')
660
+ with open(idx_file, encoding='utf-8') as idxf:
661
+ args.start_epoch = int(idxf.readlines()[0].strip()) + 1
662
+
663
+ step_file = os.path.join(checkpoint_last, 'step_file.txt')
664
+ if os.path.exists(step_file):
665
+ with open(step_file, encoding='utf-8') as stepf:
666
+ args.start_step = int(stepf.readlines()[0].strip())
667
+
668
+ logger.info("reload model from {}, resume from {} steps".format(checkpoint_last, args.start_step))
669
+
670
+ # get special tokens
671
+ special_tokens = get_special_tokens(args.lit_file)
672
+
673
+ # Load pre-trained model
674
+ config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
675
+ pretrained = checkpoint_last #args.pretrain_dir
676
+ if pretrained:
677
+ tokenizer = tokenizer_class.from_pretrained(pretrained, do_lower_case=args.do_lower_case, sep_token='<EOL>', bos_token='<s>', eos_token='</s>', pad_token='<pad>', unk_token='<|UNKNOWN|>', additional_special_tokens=special_tokens)
678
+ if args.model_type == "rnn":
679
+ model = model_class(len(tokenizer), 768, 768, 1)
680
+ model_last = os.path.join(pretrained, 'model.pt')
681
+ if os.path.exists(model_last):
682
+ logger.warning(f"Loading model from {model_last}")
683
+ model.load_state_dict(torch.load(model_last, map_location="cpu"))
684
+ else:
685
+ model = model_class.from_pretrained(pretrained)
686
+ model.resize_token_embeddings(len(tokenizer))
687
+ else:
688
+ tokenizer = tokenizer_class.from_pretrained(args.tokenizer_dir, sep_token='<EOL>', bos_token='<s>', eos_token='</s>', pad_token='<pad>', unk_token='<|UNKNOWN|>', additional_special_tokens=special_tokens)
689
+ args.vocab_size = len(tokenizer)
690
+ if args.model_type == "rnn":
691
+ model = model_class(len(tokenizer), 768, 768, 1)
692
+ else:
693
+ config = config_class.from_pretrained(args.config_dir)
694
+ model = model_class(config)
695
+ model.resize_token_embeddings(len(tokenizer))
696
+
697
+
698
+ model_parameters = model.parameters()
699
+ num_params = sum([np.prod(p.size()) for p in model_parameters])
700
+ logger.info(f"Model has a total of {num_params} trainable parameters")
701
+
702
+ if args.local_rank == 0:
703
+ torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
704
+
705
+ logger.info("Training/evaluation parameters %s", args)
706
+
707
+ # Training
708
+ if args.do_train:
709
+ train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False)
710
+
711
+ global_step, tr_loss = train(args, train_dataset, model, tokenizer, fh, pool)
712
+ logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
713
+
714
+ # Only works on single GPU
715
+ if args.do_eval:
716
+ checkpoint_prefix = 'epoch_5/subject_model.pth'
717
+ output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
718
+ model.load_state_dict(torch.load(output_dir))
719
+ model.to(args.device)
720
+ # 不要用dev文件,否则会在EvalDataset的__init__中检测不通过,被exit
721
+ # dev_total, dev_cr = eval_acc(args, model, tokenizer, 'dev')
722
+ # logger.info(f"Dev total tokens: {dev_total}, accuracy: {dev_cr/dev_total}")
723
+ test_total, test_cr = eval_acc(args, model, tokenizer, 'test')
724
+ logger.info(f"Test total tokens: {test_total}, accuracy: {test_cr/test_total}")
725
+
726
+
727
+ if __name__ == "__main__":
728
+ main()
Code-Code/CodeCompletion-token/code/train.sh ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ LANG=java # set python for py150
2
+ DATADIR=../dataset/javaCorpus/token_completion
3
+ LITFILE=../dataset/javaCorpus/literals.json
4
+ OUTPUTDIR=../model/javaCorpus
5
+ PRETRAINDIR=microsoft/CodeGPT-small-java # microsoft/CodeGPT-small-py for py150
6
+ LOGFILE=train_javaCorpus.log
7
+ PER_NODE_GPU=4 # modify YOUR_GPU_NUM
8
+
9
+ CUDA_VISIBLE_DEVICES=0,1,2,3 python run_lm.py \
10
+ --data_dir=$DATADIR \
11
+ --lit_file=$LITFILE \
12
+ --langs=$LANG \
13
+ --output_dir=$OUTPUTDIR \
14
+ --pretrain_dir=$PRETRAINDIR \
15
+ --log_file=$LOGFILE \
16
+ --model_type=gpt2 \
17
+ --block_size=512 \
18
+ --do_train \
19
+ --gpu_per_node $PER_NODE_GPU \
20
+ --learning_rate=8e-5 \
21
+ --weight_decay=0.01 \
22
+ --evaluate_during_training \
23
+ --per_gpu_train_batch_size=1 \
24
+ --per_gpu_eval_batch_size=4 \
25
+ --gradient_accumulation_steps=4 \
26
+ --num_train_epochs=5 \
27
+ --logging_steps=100 \
28
+ --save_steps=1000 \
29
+ --seed=42 \
30
+ --overwrite_output_dir \
31
+ --not_pretrain
Code-Code/CodeCompletion-token/data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fe81ae13261569dcb0147143f6be01900bdea8fc19394b931a2f6be720dac03
3
+ size 16149700
Code-Code/Defect-detection/code/eval.sh ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CUDA_VISIBLE_DEVICES=0,1 python run.py \
2
+ --output_dir=../model \
3
+ --model_type=roberta \
4
+ --tokenizer_name=microsoft/codebert-base \
5
+ --model_name_or_path=microsoft/codebert-base \
6
+ --do_eval \
7
+ --do_test \
8
+ --train_data_file=../dataset/train.jsonl \
9
+ --eval_data_file=../dataset/valid.jsonl \
10
+ --test_data_file=../dataset/valid.jsonl \
11
+ --epoch 5 \
12
+ --block_size 400 \
13
+ --train_batch_size 32 \
14
+ --eval_batch_size 64 \
15
+ --learning_rate 2e-5 \
16
+ --max_grad_norm 1.0 \
17
+ --evaluate_during_training \
18
+ --seed 123456
Code-Code/Defect-detection/code/evaluate.sh ADDED
@@ -0,0 +1 @@
 
 
1
+ python evaluator.py -a ../dataset/valid.jsonl -p ../model/predictions.txt
Code-Code/Defect-detection/code/evaluator.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # Licensed under the MIT license.
3
+ import logging
4
+ import sys
5
+ import json
6
+ import numpy as np
7
+
8
+ def read_answers(filename):
9
+ answers={}
10
+ with open(filename) as f:
11
+ for line in f:
12
+ line=line.strip()
13
+ js=json.loads(line)
14
+ answers[js['idx']]=js['target']
15
+ return answers
16
+
17
+ def read_predictions(filename):
18
+ predictions={}
19
+ with open(filename) as f:
20
+ for line in f:
21
+ line=line.strip()
22
+ idx,label=line.split()
23
+ predictions[int(idx)]=int(label)
24
+ return predictions
25
+
26
+ def calculate_scores(answers,predictions):
27
+ Acc=[]
28
+ for key in answers:
29
+ if key not in predictions:
30
+ logging.error("Missing prediction for index {}.".format(key))
31
+ sys.exit()
32
+ Acc.append(answers[key]==predictions[key])
33
+
34
+ scores={}
35
+ scores['Acc']=np.mean(Acc)
36
+ return scores
37
+
38
+ def main():
39
+ import argparse
40
+ parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for Defect Detection dataset.')
41
+ parser.add_argument('--answers', '-a',help="filename of the labels, in txt format.")
42
+ parser.add_argument('--predictions', '-p',help="filename of the leaderboard predictions, in txt format.")
43
+
44
+
45
+ args = parser.parse_args()
46
+ answers=read_answers(args.answers)
47
+ predictions=read_predictions(args.predictions)
48
+ scores=calculate_scores(answers,predictions)
49
+ print(scores)
50
+
51
+ if __name__ == '__main__':
52
+ main()
Code-Code/Defect-detection/code/model.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # Licensed under the MIT License.
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch
6
+ from torch.autograd import Variable
7
+ import copy
8
+ from torch.nn import CrossEntropyLoss, MSELoss
9
+
10
+
11
+
12
+ class Model(nn.Module):
13
+ def __init__(self, encoder,config,tokenizer,args):
14
+ super(Model, self).__init__()
15
+ self.encoder = encoder
16
+ self.config=config
17
+ self.tokenizer=tokenizer
18
+ self.args=args
19
+
20
+ # Define dropout layer, dropout_probability is taken from args.
21
+ self.dropout = nn.Dropout(args.dropout_probability)
22
+
23
+
24
+ def forward(self, input_ids=None,labels=None, return_vec=None):
25
+ outputs=self.encoder(input_ids,attention_mask=input_ids.ne(1))
26
+
27
+ if return_vec:
28
+ return outputs.pooler_output
29
+ outputs = outputs[0]
30
+
31
+ # Apply dropout
32
+ outputs = self.dropout(outputs)
33
+
34
+ logits=outputs
35
+ prob=torch.sigmoid(logits)
36
+ if labels is not None:
37
+ labels=labels.float()
38
+ loss=torch.log(prob[:,0]+1e-10)*labels+torch.log((1-prob)[:,0]+1e-10)*(1-labels)
39
+ loss=-loss.mean()
40
+ return loss,prob
41
+ else:
42
+ return prob
43
+
44
+
45
+
Code-Code/Defect-detection/code/run.py ADDED
@@ -0,0 +1,598 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
18
+ GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
19
+ using a masked language modeling (MLM) loss.
20
+ """
21
+
22
+ from __future__ import absolute_import, division, print_function
23
+
24
+ import argparse
25
+ import glob
26
+ import logging
27
+ import os
28
+ import pickle
29
+ import random
30
+ import re
31
+ import shutil
32
+
33
+ import numpy as np
34
+ import torch
35
+ from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
36
+ from torch.utils.data.distributed import DistributedSampler
37
+ import json
38
+ try:
39
+ from torch.utils.tensorboard import SummaryWriter
40
+ except:
41
+ from tensorboardX import SummaryWriter
42
+
43
+ from tqdm import tqdm, trange
44
+ import multiprocessing
45
+ from model import Model
46
+ cpu_cont = multiprocessing.cpu_count()
47
+ from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
48
+ BertConfig, BertForMaskedLM, BertTokenizer, BertForSequenceClassification,
49
+ GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
50
+ OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
51
+ RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer,
52
+ DistilBertConfig, DistilBertForMaskedLM, DistilBertForSequenceClassification, DistilBertTokenizer)
53
+
54
+ logger = logging.getLogger(__name__)
55
+
56
+ MODEL_CLASSES = {
57
+ 'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
58
+ 'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
59
+ 'bert': (BertConfig, BertForSequenceClassification, BertTokenizer),
60
+ 'roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),
61
+ 'distilbert': (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer)
62
+ }
63
+
64
+
65
+
66
+ class InputFeatures(object):
67
+ """A single training/test features for a example."""
68
+ def __init__(self,
69
+ input_tokens,
70
+ input_ids,
71
+ idx,
72
+ label,
73
+
74
+ ):
75
+ self.input_tokens = input_tokens
76
+ self.input_ids = input_ids
77
+ self.idx=str(idx)
78
+ self.label=label
79
+
80
+
81
+ def convert_examples_to_features(js,tokenizer,args):
82
+ #source
83
+ code=' '.join(js['func'].split())
84
+ code_tokens=tokenizer.tokenize(code)[:args.block_size-2]
85
+ source_tokens =[tokenizer.cls_token]+code_tokens+[tokenizer.sep_token]
86
+ source_ids = tokenizer.convert_tokens_to_ids(source_tokens)
87
+ padding_length = args.block_size - len(source_ids)
88
+ source_ids+=[tokenizer.pad_token_id]*padding_length
89
+ return InputFeatures(source_tokens,source_ids,js['idx'],js['target'])
90
+
91
+ class TextDataset(Dataset):
92
+ def __init__(self, tokenizer, args, file_path=None):
93
+ self.examples = []
94
+ with open(file_path) as f:
95
+ for line in f:
96
+ js=json.loads(line.strip())
97
+ self.examples.append(convert_examples_to_features(js,tokenizer,args))
98
+ if 'train' in file_path:
99
+ for idx, example in enumerate(self.examples[:3]):
100
+ logger.info("*** Example ***")
101
+ logger.info("idx: {}".format(idx))
102
+ logger.info("label: {}".format(example.label))
103
+ logger.info("input_tokens: {}".format([x.replace('\u0120','_') for x in example.input_tokens]))
104
+ logger.info("input_ids: {}".format(' '.join(map(str, example.input_ids))))
105
+
106
+ def __len__(self):
107
+ return len(self.examples)
108
+
109
+ def __getitem__(self, i):
110
+ return torch.tensor(self.examples[i].input_ids),torch.tensor(self.examples[i].label)
111
+
112
+
113
+ def set_seed(seed=42):
114
+ random.seed(seed)
115
+ os.environ['PYHTONHASHSEED'] = str(seed)
116
+ np.random.seed(seed)
117
+ torch.manual_seed(seed)
118
+ torch.cuda.manual_seed(seed)
119
+ torch.backends.cudnn.deterministic = True
120
+
121
+
122
+ def train(args, train_dataset, model, tokenizer):
123
+ """ Train the model """
124
+ args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
125
+ train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
126
+
127
+ train_dataloader = DataLoader(train_dataset, sampler=train_sampler,
128
+ batch_size=args.train_batch_size,num_workers=4,pin_memory=True)
129
+ args.max_steps=args.epoch*len( train_dataloader)
130
+ args.save_steps=len( train_dataloader)
131
+ args.warmup_steps=len( train_dataloader)
132
+ args.logging_steps=len( train_dataloader)
133
+ args.num_train_epochs=args.epoch
134
+ model.to(args.device)
135
+ # Prepare optimizer and schedule (linear warmup and decay)
136
+ no_decay = ['bias', 'LayerNorm.weight']
137
+ optimizer_grouped_parameters = [
138
+ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
139
+ 'weight_decay': args.weight_decay},
140
+ {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
141
+ ]
142
+ optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
143
+ scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.max_steps*0.1,
144
+ num_training_steps=args.max_steps)
145
+ if args.fp16:
146
+ try:
147
+ from apex import amp
148
+ except ImportError:
149
+ raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
150
+ model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
151
+
152
+ # multi-gpu training (should be after apex fp16 initialization)
153
+ if args.n_gpu > 1:
154
+ model = torch.nn.DataParallel(model)
155
+
156
+ # Distributed training (should be after apex fp16 initialization)
157
+ if args.local_rank != -1:
158
+ model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
159
+ output_device=args.local_rank,
160
+ find_unused_parameters=True)
161
+
162
+ checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
163
+ scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt')
164
+ optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt')
165
+ if os.path.exists(scheduler_last):
166
+ scheduler.load_state_dict(torch.load(scheduler_last))
167
+ if os.path.exists(optimizer_last):
168
+ optimizer.load_state_dict(torch.load(optimizer_last))
169
+ # Train!
170
+ logger.info("***** Running training *****")
171
+ logger.info(" Num examples = %d", len(train_dataset))
172
+ logger.info(" Num Epochs = %d", args.num_train_epochs)
173
+ logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
174
+ logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
175
+ args.train_batch_size * args.gradient_accumulation_steps * (
176
+ torch.distributed.get_world_size() if args.local_rank != -1 else 1))
177
+ logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
178
+ logger.info(" Total optimization steps = %d", args.max_steps)
179
+
180
+ global_step = args.start_step
181
+ tr_loss, logging_loss,avg_loss,tr_nb,tr_num,train_loss = 0.0, 0.0,0.0,0,0,0
182
+ best_mrr=0.0
183
+ best_acc=0.0
184
+ # model.resize_token_embeddings(len(tokenizer))
185
+ model.zero_grad()
186
+
187
+ # Initialize early stopping parameters at the start of training
188
+ early_stopping_counter = 0
189
+ best_loss = None
190
+
191
+ for idx in range(args.start_epoch, int(args.num_train_epochs)):
192
+ bar = tqdm(train_dataloader,total=len(train_dataloader))
193
+ tr_num=0
194
+ train_loss=0
195
+ for step, batch in enumerate(bar):
196
+ inputs = batch[0].to(args.device)
197
+ labels=batch[1].to(args.device)
198
+ model.train()
199
+ loss,logits = model(inputs,labels)
200
+
201
+
202
+ if args.n_gpu > 1:
203
+ loss = loss.mean() # mean() to average on multi-gpu parallel training
204
+ if args.gradient_accumulation_steps > 1:
205
+ loss = loss / args.gradient_accumulation_steps
206
+
207
+ if args.fp16:
208
+ with amp.scale_loss(loss, optimizer) as scaled_loss:
209
+ scaled_loss.backward()
210
+ torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
211
+ else:
212
+ loss.backward()
213
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
214
+
215
+ tr_loss += loss.item()
216
+ tr_num+=1
217
+ train_loss+=loss.item()
218
+ if avg_loss==0:
219
+ avg_loss=tr_loss
220
+ avg_loss=round(train_loss/tr_num,5)
221
+ bar.set_description("epoch {} loss {}".format(idx,avg_loss))
222
+
223
+
224
+ if (step + 1) % args.gradient_accumulation_steps == 0:
225
+ optimizer.step()
226
+ optimizer.zero_grad()
227
+ scheduler.step()
228
+ global_step += 1
229
+ output_flag=True
230
+ avg_loss=round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4)
231
+ if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
232
+ logging_loss = tr_loss
233
+ tr_nb=global_step
234
+
235
+ if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
236
+
237
+ if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
238
+ results = evaluate(args, model, tokenizer,eval_when_training=True)
239
+ for key, value in results.items():
240
+ logger.info(" %s = %s", key, round(value,4))
241
+ # Save model checkpoint
242
+
243
+ if results['eval_acc']>best_acc:
244
+ best_acc=results['eval_acc']
245
+ logger.info(" "+"*"*20)
246
+ logger.info(" Best acc:%s",round(best_acc,4))
247
+ logger.info(" "+"*"*20)
248
+
249
+ checkpoint_prefix = 'checkpoint-best-acc'
250
+ output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
251
+ if not os.path.exists(output_dir):
252
+ os.makedirs(output_dir)
253
+ model_to_save = model.module if hasattr(model,'module') else model
254
+ output_dir = os.path.join(output_dir, '{}'.format('model.bin'))
255
+ torch.save(model_to_save.state_dict(), output_dir)
256
+ logger.info("Saving model checkpoint to %s", output_dir)
257
+
258
+ # 每一轮记录checkpoint
259
+ output_dir = os.path.join(args.output_dir, 'epoch_{}'.format(idx+1))
260
+ if not os.path.exists(output_dir):
261
+ os.makedirs(output_dir)
262
+ model_to_save = model.module if hasattr(model, 'module') else model
263
+ ckpt_output_path = os.path.join(output_dir, 'subject_model.pth')
264
+ logger.info("Saving model checkpoint to %s", ckpt_output_path)
265
+ torch.save(model_to_save.state_dict(), ckpt_output_path)
266
+
267
+ # Calculate average loss for the epoch
268
+ avg_loss = train_loss / tr_num
269
+
270
+ # Check for early stopping condition
271
+ if args.early_stopping_patience is not None:
272
+ if best_loss is None or avg_loss < best_loss - args.min_loss_delta:
273
+ best_loss = avg_loss
274
+ early_stopping_counter = 0
275
+ else:
276
+ early_stopping_counter += 1
277
+ if early_stopping_counter >= args.early_stopping_patience:
278
+ logger.info("Early stopping")
279
+ break # Exit the loop early
280
+
281
+
282
+
283
+
284
+ def evaluate(args, model, tokenizer,eval_when_training=False):
285
+ # Loop to handle MNLI double evaluation (matched, mis-matched)
286
+ eval_output_dir = args.output_dir
287
+
288
+ eval_dataset = TextDataset(tokenizer, args,args.eval_data_file)
289
+
290
+ if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
291
+ os.makedirs(eval_output_dir)
292
+
293
+ args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
294
+ # Note that DistributedSampler samples randomly
295
+ eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
296
+ eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=4,pin_memory=True)
297
+
298
+ # multi-gpu evaluate
299
+ if args.n_gpu > 1 and eval_when_training is False:
300
+ model = torch.nn.DataParallel(model)
301
+
302
+ # Eval!
303
+ logger.info("***** Running evaluation *****")
304
+ logger.info(" Num examples = %d", len(eval_dataset))
305
+ logger.info(" Batch size = %d", args.eval_batch_size)
306
+ eval_loss = 0.0
307
+ nb_eval_steps = 0
308
+ model.eval()
309
+ logits=[]
310
+ labels=[]
311
+ for batch in eval_dataloader:
312
+ inputs = batch[0].to(args.device)
313
+ label=batch[1].to(args.device)
314
+ with torch.no_grad():
315
+ lm_loss,logit = model(inputs,label)
316
+ eval_loss += lm_loss.mean().item()
317
+ logits.append(logit.cpu().numpy())
318
+ labels.append(label.cpu().numpy())
319
+ nb_eval_steps += 1
320
+ logits=np.concatenate(logits,0)
321
+ labels=np.concatenate(labels,0)
322
+ preds=logits[:,0]>0.5
323
+ eval_acc=np.mean(labels==preds)
324
+ eval_loss = eval_loss / nb_eval_steps
325
+ perplexity = torch.tensor(eval_loss)
326
+
327
+ result = {
328
+ "eval_loss": float(perplexity),
329
+ "eval_acc":round(eval_acc,4),
330
+ }
331
+ return result
332
+
333
+ def test(args, model, tokenizer):
334
+ # Loop to handle MNLI double evaluation (matched, mis-matched)
335
+ eval_dataset = TextDataset(tokenizer, args,args.test_data_file)
336
+
337
+
338
+ args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
339
+ # Note that DistributedSampler samples randomly
340
+ eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
341
+ eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
342
+
343
+ # multi-gpu evaluate
344
+ if args.n_gpu > 1:
345
+ model = torch.nn.DataParallel(model)
346
+
347
+ # Eval!
348
+ logger.info("***** Running Test *****")
349
+ logger.info(" Num examples = %d", len(eval_dataset))
350
+ logger.info(" Batch size = %d", args.eval_batch_size)
351
+ eval_loss = 0.0
352
+ nb_eval_steps = 0
353
+ model.eval()
354
+ logits=[]
355
+ labels=[]
356
+ for batch in tqdm(eval_dataloader,total=len(eval_dataloader)):
357
+ inputs = batch[0].to(args.device)
358
+ label=batch[1].to(args.device)
359
+ with torch.no_grad():
360
+ logit = model(inputs)
361
+ logits.append(logit.cpu().numpy())
362
+ labels.append(label.cpu().numpy())
363
+
364
+ logits=np.concatenate(logits,0)
365
+ labels=np.concatenate(labels,0)
366
+ preds=logits[:,0]>0.5
367
+ with open(os.path.join(args.output_dir,"predictions.txt"),'w') as f:
368
+ for example,pred in zip(eval_dataset.examples,preds):
369
+ if pred:
370
+ f.write(example.idx+'\t1\n')
371
+ else:
372
+ f.write(example.idx+'\t0\n')
373
+
374
+
375
+
376
+ def main():
377
+ parser = argparse.ArgumentParser()
378
+
379
+ ## Required parameters
380
+ parser.add_argument("--train_data_file", default=None, type=str, required=True,
381
+ help="The input training data file (a text file).")
382
+ parser.add_argument("--output_dir", default=None, type=str, required=True,
383
+ help="The output directory where the model predictions and checkpoints will be written.")
384
+
385
+ ## Other parameters
386
+ parser.add_argument("--eval_data_file", default=None, type=str,
387
+ help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
388
+ parser.add_argument("--test_data_file", default=None, type=str,
389
+ help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
390
+
391
+ parser.add_argument("--model_type", default="bert", type=str,
392
+ help="The model architecture to be fine-tuned.")
393
+ parser.add_argument("--model_name_or_path", default=None, type=str,
394
+ help="The model checkpoint for weights initialization.")
395
+
396
+ parser.add_argument("--mlm", action='store_true',
397
+ help="Train with masked-language modeling loss instead of language modeling.")
398
+ parser.add_argument("--mlm_probability", type=float, default=0.15,
399
+ help="Ratio of tokens to mask for masked language modeling loss")
400
+
401
+ parser.add_argument("--config_name", default="", type=str,
402
+ help="Optional pretrained config name or path if not the same as model_name_or_path")
403
+ parser.add_argument("--tokenizer_name", default="", type=str,
404
+ help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
405
+ parser.add_argument("--cache_dir", default="", type=str,
406
+ help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)")
407
+ parser.add_argument("--block_size", default=-1, type=int,
408
+ help="Optional input sequence length after tokenization."
409
+ "The training dataset will be truncated in block of this size for training."
410
+ "Default to the model max input length for single sentence inputs (take into account special tokens).")
411
+ parser.add_argument("--do_train", action='store_true',
412
+ help="Whether to run training.")
413
+ parser.add_argument("--do_eval", action='store_true',
414
+ help="Whether to run eval on the dev set.")
415
+ parser.add_argument("--do_test", action='store_true',
416
+ help="Whether to run eval on the dev set.")
417
+ parser.add_argument("--evaluate_during_training", action='store_true',
418
+ help="Run evaluation during training at each logging step.")
419
+ parser.add_argument("--do_lower_case", action='store_true',
420
+ help="Set this flag if you are using an uncased model.")
421
+
422
+ parser.add_argument("--train_batch_size", default=4, type=int,
423
+ help="Batch size per GPU/CPU for training.")
424
+ parser.add_argument("--eval_batch_size", default=4, type=int,
425
+ help="Batch size per GPU/CPU for evaluation.")
426
+ parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
427
+ help="Number of updates steps to accumulate before performing a backward/update pass.")
428
+ parser.add_argument("--learning_rate", default=5e-5, type=float,
429
+ help="The initial learning rate for Adam.")
430
+ parser.add_argument("--weight_decay", default=0.0, type=float,
431
+ help="Weight deay if we apply some.")
432
+ parser.add_argument("--adam_epsilon", default=1e-8, type=float,
433
+ help="Epsilon for Adam optimizer.")
434
+ parser.add_argument("--max_grad_norm", default=1.0, type=float,
435
+ help="Max gradient norm.")
436
+ parser.add_argument("--num_train_epochs", default=1.0, type=float,
437
+ help="Total number of training epochs to perform.")
438
+ parser.add_argument("--max_steps", default=-1, type=int,
439
+ help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
440
+ parser.add_argument("--warmup_steps", default=0, type=int,
441
+ help="Linear warmup over warmup_steps.")
442
+
443
+ parser.add_argument('--logging_steps', type=int, default=50,
444
+ help="Log every X updates steps.")
445
+ parser.add_argument('--save_steps', type=int, default=50,
446
+ help="Save checkpoint every X updates steps.")
447
+ parser.add_argument('--save_total_limit', type=int, default=None,
448
+ help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default')
449
+ parser.add_argument("--eval_all_checkpoints", action='store_true',
450
+ help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number")
451
+ parser.add_argument("--no_cuda", action='store_true',
452
+ help="Avoid using CUDA when available")
453
+ parser.add_argument('--overwrite_output_dir', action='store_true',
454
+ help="Overwrite the content of the output directory")
455
+ parser.add_argument('--overwrite_cache', action='store_true',
456
+ help="Overwrite the cached training and evaluation sets")
457
+ parser.add_argument('--seed', type=int, default=42,
458
+ help="random seed for initialization")
459
+ parser.add_argument('--epoch', type=int, default=42,
460
+ help="random seed for initialization")
461
+ parser.add_argument('--fp16', action='store_true',
462
+ help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
463
+ parser.add_argument('--fp16_opt_level', type=str, default='O1',
464
+ help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
465
+ "See details at https://nvidia.github.io/apex/amp.html")
466
+ parser.add_argument("--local_rank", type=int, default=-1,
467
+ help="For distributed training: local_rank")
468
+ parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
469
+ parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
470
+
471
+ # Add early stopping parameters and dropout probability parameters
472
+ parser.add_argument("--early_stopping_patience", type=int, default=None,
473
+ help="Number of epochs with no improvement after which training will be stopped.")
474
+ parser.add_argument("--min_loss_delta", type=float, default=0.001,
475
+ help="Minimum change in the loss required to qualify as an improvement.")
476
+ parser.add_argument('--dropout_probability', type=float, default=0, help='dropout probability')
477
+
478
+
479
+
480
+
481
+ args = parser.parse_args()
482
+
483
+ # Setup distant debugging if needed
484
+ if args.server_ip and args.server_port:
485
+ # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
486
+ import ptvsd
487
+ print("Waiting for debugger attach")
488
+ ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
489
+ ptvsd.wait_for_attach()
490
+
491
+ # Setup CUDA, GPU & distributed training
492
+ if args.local_rank == -1 or args.no_cuda:
493
+ device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
494
+ args.n_gpu = torch.cuda.device_count()
495
+ else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
496
+ torch.cuda.set_device(args.local_rank)
497
+ device = torch.device("cuda", args.local_rank)
498
+ torch.distributed.init_process_group(backend='nccl')
499
+ args.n_gpu = 1
500
+ args.device = device
501
+ args.per_gpu_train_batch_size=args.train_batch_size//args.n_gpu
502
+ args.per_gpu_eval_batch_size=args.eval_batch_size//args.n_gpu
503
+ # Setup logging
504
+ logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
505
+ datefmt='%m/%d/%Y %H:%M:%S',
506
+ level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
507
+ logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
508
+ args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
509
+
510
+
511
+
512
+ # Set seed
513
+ set_seed(args.seed)
514
+
515
+ # Load pretrained model and tokenizer
516
+ if args.local_rank not in [-1, 0]:
517
+ torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab
518
+
519
+ args.start_epoch = 0
520
+ args.start_step = 0
521
+ checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
522
+ if os.path.exists(checkpoint_last) and os.listdir(checkpoint_last):
523
+ args.model_name_or_path = os.path.join(checkpoint_last, 'pytorch_model.bin')
524
+ args.config_name = os.path.join(checkpoint_last, 'config.json')
525
+ idx_file = os.path.join(checkpoint_last, 'idx_file.txt')
526
+ with open(idx_file, encoding='utf-8') as idxf:
527
+ args.start_epoch = int(idxf.readlines()[0].strip()) + 1
528
+
529
+ step_file = os.path.join(checkpoint_last, 'step_file.txt')
530
+ if os.path.exists(step_file):
531
+ with open(step_file, encoding='utf-8') as stepf:
532
+ args.start_step = int(stepf.readlines()[0].strip())
533
+
534
+ logger.info("reload model from {}, resume from {} epoch".format(checkpoint_last, args.start_epoch))
535
+
536
+ config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
537
+ config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
538
+ cache_dir=args.cache_dir if args.cache_dir else None)
539
+ config.num_labels=1
540
+ tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name,
541
+ do_lower_case=args.do_lower_case,
542
+ cache_dir=args.cache_dir if args.cache_dir else None)
543
+ if args.block_size <= 0:
544
+ args.block_size = tokenizer.max_len_single_sentence # Our input block size will be the max possible for the model
545
+ args.block_size = min(args.block_size, tokenizer.max_len_single_sentence)
546
+ if args.model_name_or_path:
547
+ model = model_class.from_pretrained(args.model_name_or_path,
548
+ from_tf=bool('.ckpt' in args.model_name_or_path),
549
+ config=config,
550
+ cache_dir=args.cache_dir if args.cache_dir else None)
551
+ else:
552
+ model = model_class(config)
553
+
554
+ model=Model(model,config,tokenizer,args)
555
+ if args.local_rank == 0:
556
+ torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
557
+
558
+ logger.info("Training/evaluation parameters %s", args)
559
+
560
+ # Training
561
+ if args.do_train:
562
+ if args.local_rank not in [-1, 0]:
563
+ torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache
564
+
565
+ train_dataset = TextDataset(tokenizer, args,args.train_data_file)
566
+ if args.local_rank == 0:
567
+ torch.distributed.barrier()
568
+
569
+ train(args, train_dataset, model, tokenizer)
570
+
571
+
572
+
573
+ # Evaluation
574
+ results = {}
575
+ if args.do_eval and args.local_rank in [-1, 0]:
576
+ checkpoint_prefix = 'epoch_5/subject_model.pth'
577
+ output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
578
+ model.load_state_dict(torch.load(output_dir))
579
+ model.to(args.device)
580
+ result=evaluate(args, model, tokenizer)
581
+ logger.info("***** Eval results *****")
582
+ for key in sorted(result.keys()):
583
+ logger.info(" %s = %s", key, str(round(result[key],4)))
584
+
585
+ if args.do_test and args.local_rank in [-1, 0]:
586
+ checkpoint_prefix = 'epoch_5/subject_model.pth'
587
+ output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
588
+ model.load_state_dict(torch.load(output_dir))
589
+ model.to(args.device)
590
+ test(args, model, tokenizer)
591
+
592
+ return results
593
+
594
+
595
+ if __name__ == "__main__":
596
+ main()
597
+
598
+
Code-Code/Defect-detection/code/train.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CUDA_VISIBLE_DEVICES=0,1 python run.py \
2
+ --output_dir=../model \
3
+ --model_type=roberta \
4
+ --tokenizer_name=microsoft/codebert-base \
5
+ --model_name_or_path=microsoft/codebert-base \
6
+ --do_train \
7
+ --train_data_file=../dataset/train.jsonl \
8
+ --eval_data_file=../dataset/valid.jsonl \
9
+ --test_data_file=../dataset/test.jsonl \
10
+ --epoch 5 \
11
+ --block_size 400 \
12
+ --train_batch_size 32 \
13
+ --eval_batch_size 64 \
14
+ --learning_rate 2e-5 \
15
+ --max_grad_norm 1.0 \
16
+ --evaluate_during_training \
17
+ --seed 123456
Code-Code/Defect-detection/dataset.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fafb4004eda1a4e1d4392b002e3de6f542d2a2b6701ec9758f25791bc9da49d6
3
+ size 14533467
Code-Code/code-refinement/code/bleu.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2017 Google Inc. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+
16
+ """Python implementation of BLEU and smooth-BLEU.
17
+
18
+ This module provides a Python implementation of BLEU and smooth-BLEU.
19
+ Smooth BLEU is computed following the method outlined in the paper:
20
+ Chin-Yew Lin, Franz Josef Och. ORANGE: a method for evaluating automatic
21
+ evaluation metrics for machine translation. COLING 2004.
22
+ """
23
+
24
+ import collections
25
+ import math
26
+
27
+
28
+ def _get_ngrams(segment, max_order):
29
+ """Extracts all n-grams upto a given maximum order from an input segment.
30
+
31
+ Args:
32
+ segment: text segment from which n-grams will be extracted.
33
+ max_order: maximum length in tokens of the n-grams returned by this
34
+ methods.
35
+
36
+ Returns:
37
+ The Counter containing all n-grams upto max_order in segment
38
+ with a count of how many times each n-gram occurred.
39
+ """
40
+ ngram_counts = collections.Counter()
41
+ for order in range(1, max_order + 1):
42
+ for i in range(0, len(segment) - order + 1):
43
+ ngram = tuple(segment[i:i+order])
44
+ ngram_counts[ngram] += 1
45
+ return ngram_counts
46
+
47
+
48
+ def compute_bleu(reference_corpus, translation_corpus, max_order=4,
49
+ smooth=False):
50
+ """Computes BLEU score of translated segments against one or more references.
51
+
52
+ Args:
53
+ reference_corpus: list of lists of references for each translation. Each
54
+ reference should be tokenized into a list of tokens.
55
+ translation_corpus: list of translations to score. Each translation
56
+ should be tokenized into a list of tokens.
57
+ max_order: Maximum n-gram order to use when computing BLEU score.
58
+ smooth: Whether or not to apply Lin et al. 2004 smoothing.
59
+
60
+ Returns:
61
+ 3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
62
+ precisions and brevity penalty.
63
+ """
64
+ matches_by_order = [0] * max_order
65
+ possible_matches_by_order = [0] * max_order
66
+ reference_length = 0
67
+ translation_length = 0
68
+ for (references, translation) in zip(reference_corpus,
69
+ translation_corpus):
70
+ reference_length += min(len(r) for r in references)
71
+ translation_length += len(translation)
72
+
73
+ merged_ref_ngram_counts = collections.Counter()
74
+ for reference in references:
75
+ merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
76
+ translation_ngram_counts = _get_ngrams(translation, max_order)
77
+ overlap = translation_ngram_counts & merged_ref_ngram_counts
78
+ for ngram in overlap:
79
+ matches_by_order[len(ngram)-1] += overlap[ngram]
80
+ for order in range(1, max_order+1):
81
+ possible_matches = len(translation) - order + 1
82
+ if possible_matches > 0:
83
+ possible_matches_by_order[order-1] += possible_matches
84
+
85
+ precisions = [0] * max_order
86
+ for i in range(0, max_order):
87
+ if smooth:
88
+ precisions[i] = ((matches_by_order[i] + 1.) /
89
+ (possible_matches_by_order[i] + 1.))
90
+ else:
91
+ if possible_matches_by_order[i] > 0:
92
+ precisions[i] = (float(matches_by_order[i]) /
93
+ possible_matches_by_order[i])
94
+ else:
95
+ precisions[i] = 0.0
96
+
97
+ if min(precisions) > 0:
98
+ p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
99
+ geo_mean = math.exp(p_log_sum)
100
+ else:
101
+ geo_mean = 0
102
+
103
+ ratio = float(translation_length) / reference_length
104
+
105
+ if ratio > 1.0:
106
+ bp = 1.
107
+ else:
108
+ bp = math.exp(1 - 1. / ratio)
109
+
110
+ bleu = geo_mean * bp
111
+
112
+ return (bleu, precisions, bp, ratio, translation_length, reference_length)
113
+
114
+
115
+ def _bleu(ref_file, trans_file, subword_option=None):
116
+ max_order = 4
117
+ smooth = True
118
+ ref_files = [ref_file]
119
+ reference_text = []
120
+ for reference_filename in ref_files:
121
+ with open(reference_filename) as fh:
122
+ reference_text.append(fh.readlines())
123
+ per_segment_references = []
124
+ for references in zip(*reference_text):
125
+ reference_list = []
126
+ for reference in references:
127
+ reference_list.append(reference.strip().split())
128
+ per_segment_references.append(reference_list)
129
+ translations = []
130
+ with open(trans_file) as fh:
131
+ for line in fh:
132
+ translations.append(line.strip().split())
133
+ bleu_score, _, _, _, _, _ = compute_bleu(per_segment_references, translations, max_order, smooth)
134
+ return round(100 * bleu_score,2)
Code-Code/code-refinement/code/eval.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pretrained_model=microsoft/codebert-base
2
+ output_dir=../model
3
+ data_size=small
4
+
5
+ CUDA_VISIBLE_DEVICES=1 python run.py \
6
+ --do_test \
7
+ --model_type roberta \
8
+ --model_name_or_path $pretrained_model \
9
+ --config_name roberta-base \
10
+ --tokenizer_name roberta-base \
11
+ --load_model_path $output_dir/epoch_34/subject_model.pth \
12
+ --dev_filename ../data/$data_size/valid.buggy-fixed.buggy,../data/$data_size/valid.buggy-fixed.fixed \
13
+ --output_dir $output_dir \
14
+ --max_source_length 256 \
15
+ --max_target_length 256 \
16
+ --beam_size 5 \
17
+ --eval_batch_size 16
Code-Code/code-refinement/code/evaluate.sh ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ python evaluator.py \
2
+ -ref ../data/small/valid.buggy-fixed.fixed \
3
+ -pre ../model/test_0.output
Code-Code/code-refinement/code/evaluator.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # Licensed under the MIT license.
3
+ import logging
4
+ import sys
5
+
6
+ from bleu import _bleu
7
+
8
+ def main():
9
+ import argparse
10
+ parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for BigCloneBench dataset.')
11
+ parser.add_argument('--references', '-ref',help="filename of the labels, in txt format.")
12
+ parser.add_argument('--predictions', '-pre',help="filename of the leaderboard predictions, in txt format.")
13
+
14
+ args = parser.parse_args()
15
+
16
+ refs = [x.strip() for x in open(args.references, 'r', encoding='utf-8').readlines()]
17
+ pres = [x.strip() for x in open(args.predictions, 'r', encoding='utf-8').readlines()]
18
+
19
+ assert len(refs) == len(pres)
20
+
21
+ length = len(refs)
22
+ count = 0
23
+ for i in range(length):
24
+ r = refs[i]
25
+ p = pres[i]
26
+ if r == p:
27
+ count += 1
28
+ acc = round(count/length*100, 2)
29
+
30
+ bleu_score = round(_bleu(args.references, args.predictions),2)
31
+
32
+ print('BLEU:', bleu_score, '; Acc:', acc)
33
+
34
+ if __name__ == '__main__':
35
+ main()
Code-Code/code-refinement/code/model.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # Licensed under the MIT license.
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch
7
+ from torch.autograd import Variable
8
+ import copy
9
+ class Seq2Seq(nn.Module):
10
+ """
11
+ Build Seqence-to-Sequence.
12
+
13
+ Parameters:
14
+
15
+ * `encoder`- encoder of seq2seq model. e.g. roberta
16
+ * `decoder`- decoder of seq2seq model. e.g. transformer
17
+ * `config`- configuration of encoder model.
18
+ * `beam_size`- beam size for beam search.
19
+ * `max_length`- max length of target for beam search.
20
+ * `sos_id`- start of symbol ids in target for beam search.
21
+ * `eos_id`- end of symbol ids in target for beam search.
22
+ """
23
+ def __init__(self, encoder,decoder,config,beam_size=None,max_length=None,sos_id=None,eos_id=None):
24
+ super(Seq2Seq, self).__init__()
25
+ self.encoder = encoder
26
+ self.decoder=decoder
27
+ self.config=config
28
+ self.register_buffer("bias", torch.tril(torch.ones(2048, 2048)))
29
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
30
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
31
+ self.lsm = nn.LogSoftmax(dim=-1)
32
+ self.tie_weights()
33
+
34
+ self.beam_size=beam_size
35
+ self.max_length=max_length
36
+ self.sos_id=sos_id
37
+ self.eos_id=eos_id
38
+
39
+ def _tie_or_clone_weights(self, first_module, second_module):
40
+ """ Tie or clone module weights depending of weither we are using TorchScript or not
41
+ """
42
+ if self.config.torchscript:
43
+ first_module.weight = nn.Parameter(second_module.weight.clone())
44
+ else:
45
+ first_module.weight = second_module.weight
46
+
47
+ def tie_weights(self):
48
+ """ Make sure we are sharing the input and output embeddings.
49
+ Export to TorchScript can't handle parameter sharing so we are cloning them instead.
50
+ """
51
+ self._tie_or_clone_weights(self.lm_head,
52
+ self.encoder.embeddings.word_embeddings)
53
+
54
+ def forward(self, source_ids=None,source_mask=None,target_ids=None,target_mask=None,args=None, return_vec=None):
55
+ outputs = self.encoder(source_ids, attention_mask=source_mask)
56
+ if return_vec:
57
+ return outputs.pooler_output
58
+
59
+ encoder_output = outputs[0].permute([1,0,2]).contiguous()
60
+
61
+ if target_ids is not None:
62
+ attn_mask=-1e4 *(1-self.bias[:target_ids.shape[1],:target_ids.shape[1]])
63
+ tgt_embeddings = self.encoder.embeddings(target_ids).permute([1,0,2]).contiguous()
64
+ out = self.decoder(tgt_embeddings,encoder_output,tgt_mask=attn_mask,memory_key_padding_mask=(1-source_mask).bool())
65
+ hidden_states = torch.tanh(self.dense(out)).permute([1,0,2]).contiguous()
66
+ lm_logits = self.lm_head(hidden_states)
67
+ # Shift so that tokens < n predict n
68
+ active_loss = target_mask[..., 1:].ne(0).view(-1) == 1
69
+ shift_logits = lm_logits[..., :-1, :].contiguous()
70
+ shift_labels = target_ids[..., 1:].contiguous()
71
+ # Flatten the tokens
72
+ loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
73
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1))[active_loss],
74
+ shift_labels.view(-1)[active_loss])
75
+
76
+ outputs = loss,loss*active_loss.sum(),active_loss.sum()
77
+ return outputs
78
+ else:
79
+ #Predict
80
+ preds=[]
81
+ zero=torch.cuda.LongTensor(1).fill_(0)
82
+ for i in range(source_ids.shape[0]):
83
+ context=encoder_output[:,i:i+1]
84
+ context_mask=source_mask[i:i+1,:]
85
+ beam = Beam(self.beam_size,self.sos_id,self.eos_id)
86
+ input_ids=beam.getCurrentState()
87
+ context=context.repeat(1, self.beam_size,1)
88
+ context_mask=context_mask.repeat(self.beam_size,1)
89
+ for _ in range(self.max_length):
90
+ if beam.done():
91
+ break
92
+ attn_mask=-1e4 *(1-self.bias[:input_ids.shape[1],:input_ids.shape[1]])
93
+ tgt_embeddings = self.encoder.embeddings(input_ids).permute([1,0,2]).contiguous()
94
+ out = self.decoder(tgt_embeddings,context,tgt_mask=attn_mask,memory_key_padding_mask=(1-context_mask).bool())
95
+ out = torch.tanh(self.dense(out))
96
+ hidden_states=out.permute([1,0,2]).contiguous()[:,-1,:]
97
+ out = self.lsm(self.lm_head(hidden_states)).data
98
+ beam.advance(out)
99
+ input_ids.data.copy_(input_ids.data.index_select(0, beam.getCurrentOrigin()))
100
+ input_ids=torch.cat((input_ids,beam.getCurrentState()),-1)
101
+ hyp= beam.getHyp(beam.getFinal())
102
+ pred=beam.buildTargetTokens(hyp)[:self.beam_size]
103
+ pred=[torch.cat([x.view(-1) for x in p]+[zero]*(self.max_length-len(p))).view(1,-1) for p in pred]
104
+ preds.append(torch.cat(pred,0).unsqueeze(0))
105
+
106
+ preds=torch.cat(preds,0)
107
+ return preds
108
+
109
+
110
+
111
+ class Beam(object):
112
+ def __init__(self, size,sos,eos):
113
+ self.size = size
114
+ self.tt = torch.cuda
115
+ # The score for each translation on the beam.
116
+ self.scores = self.tt.FloatTensor(size).zero_()
117
+ # The backpointers at each time-step.
118
+ self.prevKs = []
119
+ # The outputs at each time-step.
120
+ self.nextYs = [self.tt.LongTensor(size)
121
+ .fill_(0)]
122
+ self.nextYs[0][0] = sos
123
+ # Has EOS topped the beam yet.
124
+ self._eos = eos
125
+ self.eosTop = False
126
+ # Time and k pair for finished.
127
+ self.finished = []
128
+
129
+ def getCurrentState(self):
130
+ "Get the outputs for the current timestep."
131
+ batch = self.tt.LongTensor(self.nextYs[-1]).view(-1, 1)
132
+ return batch
133
+
134
+ def getCurrentOrigin(self):
135
+ "Get the backpointers for the current timestep."
136
+ return self.prevKs[-1]
137
+
138
+ def advance(self, wordLk):
139
+ """
140
+ Given prob over words for every last beam `wordLk` and attention
141
+ `attnOut`: Compute and update the beam search.
142
+
143
+ Parameters:
144
+
145
+ * `wordLk`- probs of advancing from the last step (K x words)
146
+ * `attnOut`- attention at the last step
147
+
148
+ Returns: True if beam search is complete.
149
+ """
150
+ numWords = wordLk.size(1)
151
+
152
+ # Sum the previous scores.
153
+ if len(self.prevKs) > 0:
154
+ beamLk = wordLk + self.scores.unsqueeze(1).expand_as(wordLk)
155
+
156
+ # Don't let EOS have children.
157
+ for i in range(self.nextYs[-1].size(0)):
158
+ if self.nextYs[-1][i] == self._eos:
159
+ beamLk[i] = -1e20
160
+ else:
161
+ beamLk = wordLk[0]
162
+ flatBeamLk = beamLk.view(-1)
163
+ bestScores, bestScoresId = flatBeamLk.topk(self.size, 0, True, True)
164
+
165
+ self.scores = bestScores
166
+
167
+ # bestScoresId is flattened beam x word array, so calculate which
168
+ # word and beam each score came from
169
+ prevK = bestScoresId // numWords
170
+ self.prevKs.append(prevK)
171
+ self.nextYs.append((bestScoresId - prevK * numWords))
172
+
173
+
174
+ for i in range(self.nextYs[-1].size(0)):
175
+ if self.nextYs[-1][i] == self._eos:
176
+ s = self.scores[i]
177
+ self.finished.append((s, len(self.nextYs) - 1, i))
178
+
179
+ # End condition is when top-of-beam is EOS and no global score.
180
+ if self.nextYs[-1][0] == self._eos:
181
+ self.eosTop = True
182
+
183
+ def done(self):
184
+ return self.eosTop and len(self.finished) >=self.size
185
+
186
+ def getFinal(self):
187
+ if len(self.finished) == 0:
188
+ self.finished.append((self.scores[0], len(self.nextYs) - 1, 0))
189
+ self.finished.sort(key=lambda a: -a[0])
190
+ if len(self.finished) != self.size:
191
+ unfinished=[]
192
+ for i in range(self.nextYs[-1].size(0)):
193
+ if self.nextYs[-1][i] != self._eos:
194
+ s = self.scores[i]
195
+ unfinished.append((s, len(self.nextYs) - 1, i))
196
+ unfinished.sort(key=lambda a: -a[0])
197
+ self.finished+=unfinished[:self.size-len(self.finished)]
198
+ return self.finished[:self.size]
199
+
200
+ def getHyp(self, beam_res):
201
+ """
202
+ Walk back to construct the full hypothesis.
203
+ """
204
+ hyps=[]
205
+ for _,timestep, k in beam_res:
206
+ hyp = []
207
+ for j in range(len(self.prevKs[:timestep]) - 1, -1, -1):
208
+ hyp.append(self.nextYs[j+1][k])
209
+ k = self.prevKs[j][k]
210
+ hyps.append(hyp[::-1])
211
+ return hyps
212
+
213
+ def buildTargetTokens(self, preds):
214
+ sentence=[]
215
+ for pred in preds:
216
+ tokens = []
217
+ for tok in pred:
218
+ if tok==self._eos:
219
+ break
220
+ tokens.append(tok)
221
+ sentence.append(tokens)
222
+ return sentence
223
+
Code-Code/code-refinement/code/run.py ADDED
@@ -0,0 +1,575 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
18
+ GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
19
+ using a masked language modeling (MLM) loss.
20
+ """
21
+
22
+ from __future__ import absolute_import
23
+ import os
24
+ import sys
25
+ import pickle
26
+ import torch
27
+ import json
28
+ import random
29
+ import logging
30
+ import argparse
31
+ import numpy as np
32
+ from io import open
33
+ from itertools import cycle
34
+ import torch.nn as nn
35
+ from model import Seq2Seq
36
+ from tqdm import tqdm, trange
37
+ from bleu import _bleu
38
+ from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
39
+ from torch.utils.data.distributed import DistributedSampler
40
+ from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
41
+ RobertaConfig, RobertaModel, RobertaTokenizer)
42
+ MODEL_CLASSES = {'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer)}
43
+
44
+ logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
45
+ datefmt = '%m/%d/%Y %H:%M:%S',
46
+ level = logging.INFO)
47
+ logger = logging.getLogger(__name__)
48
+
49
+ class Example(object):
50
+ """A single training/test example."""
51
+ def __init__(self,
52
+ idx,
53
+ source,
54
+ target,
55
+ ):
56
+ self.idx = idx
57
+ self.source = source
58
+ self.target = target
59
+
60
+ # def read_examples(filename):
61
+ # """Read examples from filename."""
62
+ # examples=[]
63
+ # with open(filename,encoding="utf-8") as f:
64
+ # for idx,js in enumerate(json.load(f)):
65
+ # source=' '.join(js['old_comment_tokens'])
66
+ # target=' '.join(js['new_comment_tokens'])
67
+ # examples.append(
68
+ # Example(
69
+ # idx = idx,
70
+ # source=source,
71
+ # target=target,
72
+ # )
73
+ # )
74
+ # return examples
75
+ def read_examples(filename):
76
+ """Read examples from filename."""
77
+ examples=[]
78
+ assert len(filename.split(','))==2
79
+ src_filename = filename.split(',')[0]
80
+ trg_filename = filename.split(',')[1]
81
+ idx = 0
82
+ with open(src_filename) as f1,open(trg_filename) as f2:
83
+ for line1,line2 in zip(f1,f2):
84
+ examples.append(
85
+ Example(
86
+ idx = idx,
87
+ source=line1.strip(),
88
+ target=line2.strip(),
89
+ )
90
+ )
91
+ idx+=1
92
+ return examples
93
+
94
+ class InputFeatures(object):
95
+ """A single training/test features for a example."""
96
+ def __init__(self,
97
+ example_id,
98
+ source_ids,
99
+ target_ids,
100
+ source_mask,
101
+ target_mask,
102
+
103
+ ):
104
+ self.example_id = example_id
105
+ self.source_ids = source_ids
106
+ self.target_ids = target_ids
107
+ self.source_mask = source_mask
108
+ self.target_mask = target_mask
109
+
110
+
111
+
112
+ def convert_examples_to_features(examples, tokenizer, args,stage=None):
113
+ features = []
114
+ for example_index, example in enumerate(examples):
115
+ #source
116
+ source_tokens = tokenizer.tokenize(example.source)[:args.max_source_length-2]
117
+ source_tokens =[tokenizer.cls_token]+source_tokens+[tokenizer.sep_token]
118
+ source_ids = tokenizer.convert_tokens_to_ids(source_tokens)
119
+ source_mask = [1] * (len(source_tokens))
120
+ padding_length = args.max_source_length - len(source_ids)
121
+ source_ids+=[tokenizer.pad_token_id]*padding_length
122
+ source_mask+=[0]*padding_length
123
+
124
+ #target
125
+ if stage=="test":
126
+ target_tokens = tokenizer.tokenize("None")
127
+ else:
128
+ target_tokens = tokenizer.tokenize(example.target)[:args.max_target_length-2]
129
+ target_tokens = [tokenizer.cls_token]+target_tokens+[tokenizer.sep_token]
130
+ target_ids = tokenizer.convert_tokens_to_ids(target_tokens)
131
+ target_mask = [1] *len(target_ids)
132
+ padding_length = args.max_target_length - len(target_ids)
133
+ target_ids+=[tokenizer.pad_token_id]*padding_length
134
+ target_mask+=[0]*padding_length
135
+
136
+ if example_index < 5:
137
+ if stage=='train':
138
+ logger.info("*** Example ***")
139
+ logger.info("idx: {}".format(example.idx))
140
+
141
+ logger.info("source_tokens: {}".format([x.replace('\u0120','_') for x in source_tokens]))
142
+ logger.info("source_ids: {}".format(' '.join(map(str, source_ids))))
143
+ logger.info("source_mask: {}".format(' '.join(map(str, source_mask))))
144
+
145
+ logger.info("target_tokens: {}".format([x.replace('\u0120','_') for x in target_tokens]))
146
+ logger.info("target_ids: {}".format(' '.join(map(str, target_ids))))
147
+ logger.info("target_mask: {}".format(' '.join(map(str, target_mask))))
148
+
149
+ features.append(
150
+ InputFeatures(
151
+ example_index,
152
+ source_ids,
153
+ target_ids,
154
+ source_mask,
155
+ target_mask,
156
+ )
157
+ )
158
+ return features
159
+
160
+
161
+ def _truncate_seq_pair(tokens_a, tokens_b,tokens_c, max_length):
162
+ """Truncates a sequence pair in place to the maximum length."""
163
+
164
+ # This is a simple heuristic which will always truncate the longer sequence
165
+ # one token at a time. This makes more sense than truncating an equal percent
166
+ # of tokens from each, since if one sequence is very short then each token
167
+ # that's truncated likely contains more information than a longer sequence.
168
+
169
+ while True:
170
+ total_length = len(tokens_a) + len(tokens_b)+len(tokens_c)
171
+ if total_length <= max_length:
172
+ break
173
+ if len(tokens_a) >= len(tokens_b) and len(tokens_a)>=len(tokens_c):
174
+ tokens_a.pop()
175
+ elif len(tokens_b) >= len(tokens_a) and len(tokens_b)>=len(tokens_c):
176
+ tokens_b.pop()
177
+ else:
178
+ tokens_c.pop()
179
+
180
+ def set_seed(args):
181
+ """set random seed."""
182
+ random.seed(args.seed)
183
+ np.random.seed(args.seed)
184
+ torch.manual_seed(args.seed)
185
+ if args.n_gpu > 0:
186
+ torch.cuda.manual_seed_all(args.seed)
187
+
188
+ def main():
189
+ parser = argparse.ArgumentParser()
190
+
191
+ ## Required parameters
192
+ parser.add_argument("--model_type", default=None, type=str, required=True,
193
+ help="Model type: e.g. roberta")
194
+ parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
195
+ help="Path to pre-trained model: e.g. roberta-base" )
196
+ parser.add_argument("--tokenizer_name", default="", required=True,
197
+ help="Pretrained tokenizer name or path if not the same as model_name")
198
+ parser.add_argument("--output_dir", default=None, type=str, required=True,
199
+ help="The output directory where the model predictions and checkpoints will be written.")
200
+ parser.add_argument("--load_model_path", default=None, type=str,
201
+ help="Path to trained model: Should contain the .bin files" )
202
+ ## Other parameters
203
+ parser.add_argument("--train_filename", default=None, type=str,
204
+ help="The train filenames (source and target files).")
205
+ parser.add_argument("--dev_filename", default=None, type=str,
206
+ help="The dev filename. (source and target files).")
207
+ parser.add_argument("--test_filename", default=None, type=str,
208
+ help="The test filename. (source and target files).")
209
+
210
+ parser.add_argument("--config_name", default="", type=str,
211
+ help="Pretrained config name or path if not the same as model_name")
212
+
213
+ parser.add_argument("--max_source_length", default=64, type=int,
214
+ help="The maximum total source sequence length after tokenization. Sequences longer "
215
+ "than this will be truncated, sequences shorter will be padded.")
216
+ parser.add_argument("--max_target_length", default=32, type=int,
217
+ help="The maximum total target sequence length after tokenization. Sequences longer "
218
+ "than this will be truncated, sequences shorter will be padded.")
219
+
220
+ parser.add_argument("--do_train", action='store_true',
221
+ help="Whether to run training.")
222
+ parser.add_argument("--do_eval", action='store_true',
223
+ help="Whether to run eval on the dev set.")
224
+ parser.add_argument("--do_test", action='store_true',
225
+ help="Whether to run eval on the dev set.")
226
+ parser.add_argument("--do_lower_case", action='store_true',
227
+ help="Set this flag if you are using an uncased model.")
228
+ parser.add_argument("--no_cuda", action='store_true',
229
+ help="Avoid using CUDA when available")
230
+
231
+ parser.add_argument("--train_batch_size", default=8, type=int,
232
+ help="Batch size per GPU/CPU for training.")
233
+ parser.add_argument("--eval_batch_size", default=8, type=int,
234
+ help="Batch size per GPU/CPU for evaluation.")
235
+ parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
236
+ help="Number of updates steps to accumulate before performing a backward/update pass.")
237
+ parser.add_argument("--learning_rate", default=5e-5, type=float,
238
+ help="The initial learning rate for Adam.")
239
+ parser.add_argument("--beam_size", default=10, type=int,
240
+ help="beam size for beam search")
241
+ parser.add_argument("--weight_decay", default=0.0, type=float,
242
+ help="Weight deay if we apply some.")
243
+ parser.add_argument("--adam_epsilon", default=1e-8, type=float,
244
+ help="Epsilon for Adam optimizer.")
245
+ parser.add_argument("--max_grad_norm", default=1.0, type=float,
246
+ help="Max gradient norm.")
247
+ parser.add_argument("--num_train_epochs", default=3.0, type=float,
248
+ help="Total number of training epochs to perform.")
249
+ parser.add_argument("--max_steps", default=-1, type=int,
250
+ help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
251
+ parser.add_argument("--eval_steps", default=-1, type=int,
252
+ help="")
253
+ parser.add_argument("--train_steps", default=-1, type=int,
254
+ help="")
255
+ parser.add_argument("--warmup_steps", default=0, type=int,
256
+ help="Linear warmup over warmup_steps.")
257
+ parser.add_argument("--local_rank", type=int, default=-1,
258
+ help="For distributed training: local_rank")
259
+ parser.add_argument('--seed', type=int, default=42,
260
+ help="random seed for initialization")
261
+ # print arguments
262
+ args = parser.parse_args()
263
+ logger.info(args)
264
+
265
+ # Setup CUDA, GPU & distributed training
266
+ if args.local_rank == -1 or args.no_cuda:
267
+ device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
268
+ args.n_gpu = torch.cuda.device_count()
269
+ else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
270
+ torch.cuda.set_device(args.local_rank)
271
+ device = torch.device("cuda", args.local_rank)
272
+ torch.distributed.init_process_group(backend='nccl')
273
+ args.n_gpu = 1
274
+ logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s",
275
+ args.local_rank, device, args.n_gpu, bool(args.local_rank != -1))
276
+ args.device = device
277
+ # Set seed
278
+ set_seed(args)
279
+ # make dir if output_dir not exist
280
+ if os.path.exists(args.output_dir) is False:
281
+ os.makedirs(args.output_dir)
282
+
283
+ config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
284
+ config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path)
285
+ tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name,do_lower_case=args.do_lower_case)
286
+
287
+ #budild model
288
+ encoder = model_class.from_pretrained(args.model_name_or_path,config=config)
289
+ decoder_layer = nn.TransformerDecoderLayer(d_model=config.hidden_size, nhead=config.num_attention_heads)
290
+ decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)
291
+ model=Seq2Seq(encoder=encoder,decoder=decoder,config=config,
292
+ beam_size=args.beam_size,max_length=args.max_target_length,
293
+ sos_id=tokenizer.cls_token_id,eos_id=tokenizer.sep_token_id)
294
+
295
+ if args.load_model_path is not None:
296
+ logger.info("reload model from {}".format(args.load_model_path))
297
+ model.load_state_dict(torch.load(args.load_model_path))
298
+
299
+ model.to(device)
300
+ if args.local_rank != -1:
301
+ # Distributed training
302
+ try:
303
+ from apex.parallel import DistributedDataParallel as DDP
304
+ except ImportError:
305
+ raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
306
+
307
+ model = DDP(model)
308
+ elif args.n_gpu > 1:
309
+ # multi-gpu training
310
+ model = torch.nn.DataParallel(model)
311
+
312
+
313
+
314
+
315
+ if args.do_train:
316
+ # Prepare training data loader
317
+ train_examples = read_examples(args.train_filename)
318
+ train_features = convert_examples_to_features(train_examples, tokenizer,args,stage='train')
319
+ all_source_ids = torch.tensor([f.source_ids for f in train_features], dtype=torch.long)
320
+ all_source_mask = torch.tensor([f.source_mask for f in train_features], dtype=torch.long)
321
+ all_target_ids = torch.tensor([f.target_ids for f in train_features], dtype=torch.long)
322
+ all_target_mask = torch.tensor([f.target_mask for f in train_features], dtype=torch.long)
323
+ train_data = TensorDataset(all_source_ids,all_source_mask,all_target_ids,all_target_mask)
324
+
325
+ if args.local_rank == -1:
326
+ train_sampler = RandomSampler(train_data)
327
+ else:
328
+ train_sampler = DistributedSampler(train_data)
329
+ train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size//args.gradient_accumulation_steps)
330
+
331
+ num_train_optimization_steps = args.train_steps
332
+
333
+ # Prepare optimizer and schedule (linear warmup and decay)
334
+ no_decay = ['bias', 'LayerNorm.weight']
335
+ optimizer_grouped_parameters = [
336
+ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
337
+ 'weight_decay': args.weight_decay},
338
+ {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
339
+ ]
340
+ optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
341
+ scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,
342
+ num_training_steps=num_train_optimization_steps)
343
+
344
+
345
+ #Start training
346
+ logger.info("***** Running training *****")
347
+ logger.info(" Num examples = %d", len(train_examples))
348
+ logger.info(" Batch size = %d", args.train_batch_size)
349
+ logger.info(" Num epoch = %d", num_train_optimization_steps*args.train_batch_size//len(train_examples))
350
+
351
+
352
+ model.train()
353
+ dev_dataset={}
354
+ nb_tr_examples, nb_tr_steps,tr_loss,global_step,best_bleu,best_loss = 0, 0,0,0,0,1e6
355
+ bar = range(num_train_optimization_steps)
356
+ train_dataloader=cycle(train_dataloader)
357
+ eval_flag = True
358
+ idx=0
359
+ for step in bar:
360
+ batch = next(train_dataloader)
361
+ batch = tuple(t.to(device) for t in batch)
362
+ source_ids,source_mask,target_ids,target_mask = batch
363
+ loss,_,_ = model(source_ids=source_ids,source_mask=source_mask,target_ids=target_ids,target_mask=target_mask)
364
+
365
+ if args.n_gpu > 1:
366
+ loss = loss.mean() # mean() to average on multi-gpu.
367
+ if args.gradient_accumulation_steps > 1:
368
+ loss = loss / args.gradient_accumulation_steps
369
+ tr_loss += loss.item()
370
+ train_loss=round(tr_loss*args.gradient_accumulation_steps/(nb_tr_steps+1),4)
371
+ if (global_step + 1)%100==0:
372
+ logger.info(" step {} loss {} batch-{}".format(global_step + 1,train_loss, ((global_step+1)*args.train_batch_size) / len(train_examples)))
373
+ nb_tr_examples += source_ids.size(0)
374
+ nb_tr_steps += 1
375
+ loss.backward()
376
+
377
+ if (nb_tr_steps + 1) % args.gradient_accumulation_steps == 0:
378
+ #Update parameters
379
+ optimizer.step()
380
+ optimizer.zero_grad()
381
+ scheduler.step()
382
+ global_step += 1
383
+ eval_flag = True
384
+
385
+
386
+ if args.do_eval and ((global_step + 1) %args.eval_steps == 0) and eval_flag:
387
+ #Eval model with dev dataset
388
+ tr_loss = 0
389
+ nb_tr_examples, nb_tr_steps = 0, 0
390
+ eval_flag=False
391
+ if 'dev_loss' in dev_dataset:
392
+ eval_examples,eval_data=dev_dataset['dev_loss']
393
+ else:
394
+ eval_examples = read_examples(args.dev_filename)
395
+ eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='dev')
396
+ all_source_ids = torch.tensor([f.source_ids for f in eval_features], dtype=torch.long)
397
+ all_source_mask = torch.tensor([f.source_mask for f in eval_features], dtype=torch.long)
398
+ all_target_ids = torch.tensor([f.target_ids for f in eval_features], dtype=torch.long)
399
+ all_target_mask = torch.tensor([f.target_mask for f in eval_features], dtype=torch.long)
400
+ eval_data = TensorDataset(all_source_ids,all_source_mask,all_target_ids,all_target_mask)
401
+ dev_dataset['dev_loss']=eval_examples,eval_data
402
+ eval_sampler = SequentialSampler(eval_data)
403
+ eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
404
+
405
+ logger.info("\n***** Running evaluation *****")
406
+ logger.info(" Num examples = %d", len(eval_examples))
407
+ logger.info(" Batch size = %d", args.eval_batch_size)
408
+
409
+ #Start Evaling model
410
+ model.eval()
411
+ eval_loss,tokens_num = 0,0
412
+ for batch in eval_dataloader:
413
+ batch = tuple(t.to(device) for t in batch)
414
+ source_ids,source_mask,target_ids,target_mask = batch
415
+
416
+ with torch.no_grad():
417
+ _,loss,num = model(source_ids=source_ids,source_mask=source_mask,
418
+ target_ids=target_ids,target_mask=target_mask)
419
+ eval_loss += loss.sum().item()
420
+ tokens_num += num.sum().item()
421
+ #Pring loss of dev dataset
422
+ model.train()
423
+ eval_loss = eval_loss / tokens_num
424
+ result = {'eval_ppl': round(np.exp(eval_loss),5),
425
+ 'global_step': global_step+1,
426
+ 'train_loss': round(train_loss,5)}
427
+ for key in sorted(result.keys()):
428
+ logger.info(" %s = %s", key, str(result[key]))
429
+ logger.info(" "+"*"*20)
430
+
431
+ #save last checkpoint
432
+ last_output_dir = os.path.join(args.output_dir, 'checkpoint-last')
433
+ if not os.path.exists(last_output_dir):
434
+ os.makedirs(last_output_dir)
435
+ model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
436
+ output_model_file = os.path.join(last_output_dir, "pytorch_model.bin")
437
+ torch.save(model_to_save.state_dict(), output_model_file)
438
+ if eval_loss<best_loss:
439
+ logger.info(" Best ppl:%s",round(np.exp(eval_loss),5))
440
+ logger.info(" "+"*"*20)
441
+ best_loss=eval_loss
442
+ # Save best checkpoint for best ppl
443
+ output_dir = os.path.join(args.output_dir, 'checkpoint-best-ppl')
444
+ if not os.path.exists(output_dir):
445
+ os.makedirs(output_dir)
446
+ model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
447
+ output_model_file = os.path.join(output_dir, "pytorch_model.bin")
448
+ torch.save(model_to_save.state_dict(), output_model_file)
449
+
450
+
451
+ #Calculate bleu
452
+ if 'dev_bleu' in dev_dataset:
453
+ eval_examples,eval_data=dev_dataset['dev_bleu']
454
+ else:
455
+ eval_examples = read_examples(args.dev_filename)
456
+ eval_examples = random.sample(eval_examples,min(1000,len(eval_examples)))
457
+ eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='test')
458
+ all_source_ids = torch.tensor([f.source_ids for f in eval_features], dtype=torch.long)
459
+ all_source_mask = torch.tensor([f.source_mask for f in eval_features], dtype=torch.long)
460
+ eval_data = TensorDataset(all_source_ids,all_source_mask)
461
+ dev_dataset['dev_bleu']=eval_examples,eval_data
462
+
463
+
464
+ eval_sampler = SequentialSampler(eval_data)
465
+ eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
466
+
467
+ model.eval()
468
+ p=[]
469
+ for batch in eval_dataloader:
470
+ batch = tuple(t.to(device) for t in batch)
471
+ source_ids,source_mask= batch
472
+ with torch.no_grad():
473
+ preds = model(source_ids=source_ids,source_mask=source_mask)
474
+ for pred in preds:
475
+ t=pred[0].cpu().numpy()
476
+ t=list(t)
477
+ if 0 in t:
478
+ t=t[:t.index(0)]
479
+ text = tokenizer.decode(t,clean_up_tokenization_spaces=False)
480
+ p.append(text)
481
+ model.train()
482
+ predictions=[]
483
+ accs=[]
484
+ with open(os.path.join(args.output_dir,"dev.output"),'w') as f, open(os.path.join(args.output_dir,"dev.gold"),'w') as f1:
485
+ for ref,gold in zip(p,eval_examples):
486
+ predictions.append(str(gold.idx)+'\t'+ref)
487
+ f.write(ref+'\n')
488
+ f1.write(gold.target+'\n')
489
+ accs.append(ref==gold.target)
490
+
491
+ dev_bleu=round(_bleu(os.path.join(args.output_dir, "dev.gold"), os.path.join(args.output_dir, "dev.output")),2)
492
+ logger.info(" %s = %s "%("bleu-4",str(dev_bleu)))
493
+ logger.info(" %s = %s "%("xMatch",str(round(np.mean(accs)*100,4))))
494
+ logger.info(" "+"*"*20)
495
+ if dev_bleu>best_bleu:
496
+ logger.info(" Best bleu:%s",dev_bleu)
497
+ logger.info(" "+"*"*20)
498
+ best_bleu=dev_bleu
499
+ # Save best checkpoint for best bleu
500
+ output_dir = os.path.join(args.output_dir, 'checkpoint-best-bleu')
501
+ if not os.path.exists(output_dir):
502
+ os.makedirs(output_dir)
503
+ model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
504
+ output_model_file = os.path.join(output_dir, "pytorch_model.bin")
505
+ torch.save(model_to_save.state_dict(), output_model_file)
506
+
507
+ # 每一轮记录checkpoint
508
+ if int((global_step+1)*args.train_batch_size / len(train_examples)) == idx+1:
509
+ logger.info(" batch:%s",idx)
510
+ output_dir = os.path.join(args.output_dir, 'epoch_{}'.format(idx+1))
511
+ if not os.path.exists(output_dir):
512
+ os.makedirs(output_dir)
513
+ model_to_save = model.module if hasattr(model, 'module') else model
514
+ ckpt_output_path = os.path.join(output_dir, 'subject_model.pth')
515
+ logger.info("Saving model checkpoint to %s", ckpt_output_path)
516
+ torch.save(model_to_save.state_dict(), ckpt_output_path)
517
+ idx = idx+1
518
+
519
+ if args.do_test:
520
+ files=[]
521
+ if args.dev_filename is not None:
522
+ files.append(args.dev_filename)
523
+ if args.test_filename is not None:
524
+ files.append(args.test_filename)
525
+ for idx,file in enumerate(files):
526
+ logger.info("Test file: {}".format(file))
527
+ eval_examples = read_examples(file)
528
+ eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='test')
529
+ all_source_ids = torch.tensor([f.source_ids for f in eval_features], dtype=torch.long)
530
+ all_source_mask = torch.tensor([f.source_mask for f in eval_features], dtype=torch.long)
531
+ eval_data = TensorDataset(all_source_ids,all_source_mask)
532
+
533
+ # Calculate bleu
534
+ eval_sampler = SequentialSampler(eval_data)
535
+ eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
536
+
537
+ model.eval()
538
+ p=[]
539
+ for batch in tqdm(eval_dataloader,total=len(eval_dataloader)):
540
+ batch = tuple(t.to(device) for t in batch)
541
+ source_ids,source_mask= batch
542
+ with torch.no_grad():
543
+ preds = model(source_ids=source_ids,source_mask=source_mask)
544
+ for pred in preds:
545
+ t=pred[0].cpu().numpy()
546
+ t=list(t)
547
+ if 0 in t:
548
+ t=t[:t.index(0)]
549
+ text = tokenizer.decode(t,clean_up_tokenization_spaces=False)
550
+ p.append(text)
551
+ model.train()
552
+ predictions=[]
553
+ accs=[]
554
+ with open(os.path.join(args.output_dir,"test_{}.output".format(str(idx))),'w') as f, open(os.path.join(args.output_dir,"test_{}.gold".format(str(idx))),'w') as f1:
555
+ for ref,gold in zip(p,eval_examples):
556
+ predictions.append(str(gold.idx)+'\t'+ref)
557
+ f.write(ref+'\n')
558
+ f1.write(gold.target+'\n')
559
+ accs.append(ref==gold.target)
560
+ dev_bleu=round(_bleu(os.path.join(args.output_dir, "test_{}.gold".format(str(idx))).format(file),
561
+ os.path.join(args.output_dir, "test_{}.output".format(str(idx))).format(file)),2)
562
+ logger.info(" %s = %s "%("bleu-4",str(dev_bleu)))
563
+ logger.info(" %s = %s "%("xMatch",str(round(np.mean(accs)*100,4))))
564
+ logger.info(" "+"*"*20)
565
+
566
+
567
+
568
+
569
+
570
+
571
+
572
+ if __name__ == "__main__":
573
+ main()
574
+
575
+
Code-Code/code-refinement/code/train.sh ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pretrained_model=microsoft/codebert-base
2
+ output_dir=../model
3
+ data_size=small
4
+
5
+ CUDA_VISIBLE_DEVICES=1 python run.py \
6
+ --do_train \
7
+ --do_eval \
8
+ --model_type roberta \
9
+ --model_name_or_path $pretrained_model \
10
+ --config_name roberta-base \
11
+ --tokenizer_name roberta-base \
12
+ --train_filename ../data/$data_size/train.buggy-fixed.buggy,../data/$data_size/train.buggy-fixed.fixed \
13
+ --dev_filename ../data/$data_size/valid.buggy-fixed.buggy,../data/$data_size/valid.buggy-fixed.fixed \
14
+ --output_dir $output_dir \
15
+ --max_source_length 256 \
16
+ --max_target_length 256 \
17
+ --beam_size 5 \
18
+ --train_batch_size 16 \
19
+ --eval_batch_size 16 \
20
+ --learning_rate 5e-5 \
21
+ --train_steps 100000 \
22
+ --eval_steps 5000
Code-Code/code-refinement/dataset.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:777c3c2c8db2e206e35336adda286979caea2dd7627f86be63ad9313d6dd5c29
3
+ size 9317188
Code-Text/code-to-text/code/bleu.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python
2
+
3
+ '''
4
+ This script was adapted from the original version by hieuhoang1972 which is part of MOSES.
5
+ '''
6
+
7
+ # $Id: bleu.py 1307 2007-03-14 22:22:36Z hieuhoang1972 $
8
+
9
+ '''Provides:
10
+
11
+ cook_refs(refs, n=4): Transform a list of reference sentences as strings into a form usable by cook_test().
12
+ cook_test(test, refs, n=4): Transform a test sentence as a string (together with the cooked reference sentences) into a form usable by score_cooked().
13
+ score_cooked(alltest, n=4): Score a list of cooked test sentences.
14
+
15
+ score_set(s, testid, refids, n=4): Interface with dataset.py; calculate BLEU score of testid against refids.
16
+
17
+ The reason for breaking the BLEU computation into three phases cook_refs(), cook_test(), and score_cooked() is to allow the caller to calculate BLEU scores for multiple test sets as efficiently as possible.
18
+ '''
19
+
20
+ import sys, math, re, xml.sax.saxutils
21
+ import subprocess
22
+ import os
23
+
24
+ # Added to bypass NIST-style pre-processing of hyp and ref files -- wade
25
+ nonorm = 0
26
+
27
+ preserve_case = False
28
+ eff_ref_len = "shortest"
29
+
30
+ normalize1 = [
31
+ ('<skipped>', ''), # strip "skipped" tags
32
+ (r'-\n', ''), # strip end-of-line hyphenation and join lines
33
+ (r'\n', ' '), # join lines
34
+ # (r'(\d)\s+(?=\d)', r'\1'), # join digits
35
+ ]
36
+ normalize1 = [(re.compile(pattern), replace) for (pattern, replace) in normalize1]
37
+
38
+ normalize2 = [
39
+ (r'([\{-\~\[-\` -\&\(-\+\:-\@\/])',r' \1 '), # tokenize punctuation. apostrophe is missing
40
+ (r'([^0-9])([\.,])',r'\1 \2 '), # tokenize period and comma unless preceded by a digit
41
+ (r'([\.,])([^0-9])',r' \1 \2'), # tokenize period and comma unless followed by a digit
42
+ (r'([0-9])(-)',r'\1 \2 ') # tokenize dash when preceded by a digit
43
+ ]
44
+ normalize2 = [(re.compile(pattern), replace) for (pattern, replace) in normalize2]
45
+
46
+ def normalize(s):
47
+ '''Normalize and tokenize text. This is lifted from NIST mteval-v11a.pl.'''
48
+ # Added to bypass NIST-style pre-processing of hyp and ref files -- wade
49
+ if (nonorm):
50
+ return s.split()
51
+ if type(s) is not str:
52
+ s = " ".join(s)
53
+ # language-independent part:
54
+ for (pattern, replace) in normalize1:
55
+ s = re.sub(pattern, replace, s)
56
+ s = xml.sax.saxutils.unescape(s, {'&quot;':'"'})
57
+ # language-dependent part (assuming Western languages):
58
+ s = " %s " % s
59
+ if not preserve_case:
60
+ s = s.lower() # this might not be identical to the original
61
+ for (pattern, replace) in normalize2:
62
+ s = re.sub(pattern, replace, s)
63
+ return s.split()
64
+
65
+ def count_ngrams(words, n=4):
66
+ counts = {}
67
+ for k in range(1,n+1):
68
+ for i in range(len(words)-k+1):
69
+ ngram = tuple(words[i:i+k])
70
+ counts[ngram] = counts.get(ngram, 0)+1
71
+ return counts
72
+
73
+ def cook_refs(refs, n=4):
74
+ '''Takes a list of reference sentences for a single segment
75
+ and returns an object that encapsulates everything that BLEU
76
+ needs to know about them.'''
77
+
78
+ refs = [normalize(ref) for ref in refs]
79
+ maxcounts = {}
80
+ for ref in refs:
81
+ counts = count_ngrams(ref, n)
82
+ for (ngram,count) in counts.items():
83
+ maxcounts[ngram] = max(maxcounts.get(ngram,0), count)
84
+ return ([len(ref) for ref in refs], maxcounts)
85
+
86
+ def cook_test(test, item, n=4):
87
+ '''Takes a test sentence and returns an object that
88
+ encapsulates everything that BLEU needs to know about it.'''
89
+ (reflens, refmaxcounts)=item
90
+ test = normalize(test)
91
+ result = {}
92
+ result["testlen"] = len(test)
93
+
94
+ # Calculate effective reference sentence length.
95
+
96
+ if eff_ref_len == "shortest":
97
+ result["reflen"] = min(reflens)
98
+ elif eff_ref_len == "average":
99
+ result["reflen"] = float(sum(reflens))/len(reflens)
100
+ elif eff_ref_len == "closest":
101
+ min_diff = None
102
+ for reflen in reflens:
103
+ if min_diff is None or abs(reflen-len(test)) < min_diff:
104
+ min_diff = abs(reflen-len(test))
105
+ result['reflen'] = reflen
106
+
107
+ result["guess"] = [max(len(test)-k+1,0) for k in range(1,n+1)]
108
+
109
+ result['correct'] = [0]*n
110
+ counts = count_ngrams(test, n)
111
+ for (ngram, count) in counts.items():
112
+ result["correct"][len(ngram)-1] += min(refmaxcounts.get(ngram,0), count)
113
+
114
+ return result
115
+
116
+ def score_cooked(allcomps, n=4, ground=0, smooth=1):
117
+ totalcomps = {'testlen':0, 'reflen':0, 'guess':[0]*n, 'correct':[0]*n}
118
+ for comps in allcomps:
119
+ for key in ['testlen','reflen']:
120
+ totalcomps[key] += comps[key]
121
+ for key in ['guess','correct']:
122
+ for k in range(n):
123
+ totalcomps[key][k] += comps[key][k]
124
+ logbleu = 0.0
125
+ all_bleus = []
126
+ for k in range(n):
127
+ correct = totalcomps['correct'][k]
128
+ guess = totalcomps['guess'][k]
129
+ addsmooth = 0
130
+ if smooth == 1 and k > 0:
131
+ addsmooth = 1
132
+ logbleu += math.log(correct + addsmooth + sys.float_info.min)-math.log(guess + addsmooth+ sys.float_info.min)
133
+ if guess == 0:
134
+ all_bleus.append(-10000000)
135
+ else:
136
+ all_bleus.append(math.log(correct + sys.float_info.min)-math.log( guess ))
137
+
138
+ logbleu /= float(n)
139
+ all_bleus.insert(0, logbleu)
140
+
141
+ brevPenalty = min(0,1-float(totalcomps['reflen'] + 1)/(totalcomps['testlen'] + 1))
142
+ for i in range(len(all_bleus)):
143
+ if i ==0:
144
+ all_bleus[i] += brevPenalty
145
+ all_bleus[i] = math.exp(all_bleus[i])
146
+ return all_bleus
147
+
148
+ def bleu(refs, candidate, ground=0, smooth=1):
149
+ refs = cook_refs(refs)
150
+ test = cook_test(candidate, refs)
151
+ return score_cooked([test], ground=ground, smooth=smooth)
152
+
153
+ def splitPuncts(line):
154
+ return ' '.join(re.findall(r"[\w]+|[^\s\w]", line))
155
+
156
+ def computeMaps(predictions, goldfile):
157
+ predictionMap = {}
158
+ goldMap = {}
159
+ gf = open(goldfile, 'r')
160
+
161
+ for row in predictions:
162
+ cols = row.strip().split('\t')
163
+ if len(cols) == 1:
164
+ (rid, pred) = (cols[0], '')
165
+ else:
166
+ (rid, pred) = (cols[0], cols[1])
167
+ predictionMap[rid] = [splitPuncts(pred.strip().lower())]
168
+
169
+ for row in gf:
170
+ (rid, pred) = row.split('\t')
171
+ if rid in predictionMap: # Only insert if the id exists for the method
172
+ if rid not in goldMap:
173
+ goldMap[rid] = []
174
+ goldMap[rid].append(splitPuncts(pred.strip().lower()))
175
+
176
+ sys.stderr.write('Total: ' + str(len(goldMap)) + '\n')
177
+ return (goldMap, predictionMap)
178
+
179
+
180
+ #m1 is the reference map
181
+ #m2 is the prediction map
182
+ def bleuFromMaps(m1, m2):
183
+ score = [0] * 5
184
+ num = 0.0
185
+
186
+ for key in m1:
187
+ if key in m2:
188
+ bl = bleu(m1[key], m2[key][0])
189
+ score = [ score[i] + bl[i] for i in range(0, len(bl))]
190
+ num += 1
191
+ return [s * 100.0 / num for s in score]
192
+
193
+ if __name__ == '__main__':
194
+ reference_file = sys.argv[1]
195
+ predictions = []
196
+ for row in sys.stdin:
197
+ predictions.append(row)
198
+ (goldMap, predictionMap) = computeMaps(predictions, reference_file)
199
+ print (bleuFromMaps(goldMap, predictionMap)[0])
200
+
Code-Text/code-to-text/code/evaluate.sh ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ lang=python
2
+ gold_file=../model/$lang/dev.gold
3
+ output_file=../model/$lang/dev.output
4
+
5
+ python evaluator.py \
6
+ $gold_file < $output_file
Code-Text/code-to-text/code/evaluator.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python
2
+
3
+ '''
4
+ This script was adapted from the original version by hieuhoang1972 which is part of MOSES.
5
+ '''
6
+
7
+ # $Id: bleu.py 1307 2007-03-14 22:22:36Z hieuhoang1972 $
8
+
9
+ '''Provides:
10
+
11
+ cook_refs(refs, n=4): Transform a list of reference sentences as strings into a form usable by cook_test().
12
+ cook_test(test, refs, n=4): Transform a test sentence as a string (together with the cooked reference sentences) into a form usable by score_cooked().
13
+ score_cooked(alltest, n=4): Score a list of cooked test sentences.
14
+
15
+ score_set(s, testid, refids, n=4): Interface with dataset.py; calculate BLEU score of testid against refids.
16
+
17
+ The reason for breaking the BLEU computation into three phases cook_refs(), cook_test(), and score_cooked() is to allow the caller to calculate BLEU scores for multiple test sets as efficiently as possible.
18
+ '''
19
+
20
+ import sys, math, re, xml.sax.saxutils
21
+ import subprocess
22
+ import os
23
+
24
+ # Added to bypass NIST-style pre-processing of hyp and ref files -- wade
25
+ nonorm = 0
26
+
27
+ preserve_case = False
28
+ eff_ref_len = "shortest"
29
+
30
+ normalize1 = [
31
+ ('<skipped>', ''), # strip "skipped" tags
32
+ (r'-\n', ''), # strip end-of-line hyphenation and join lines
33
+ (r'\n', ' '), # join lines
34
+ # (r'(\d)\s+(?=\d)', r'\1'), # join digits
35
+ ]
36
+ normalize1 = [(re.compile(pattern), replace) for (pattern, replace) in normalize1]
37
+
38
+ normalize2 = [
39
+ (r'([\{-\~\[-\` -\&\(-\+\:-\@\/])',r' \1 '), # tokenize punctuation. apostrophe is missing
40
+ (r'([^0-9])([\.,])',r'\1 \2 '), # tokenize period and comma unless preceded by a digit
41
+ (r'([\.,])([^0-9])',r' \1 \2'), # tokenize period and comma unless followed by a digit
42
+ (r'([0-9])(-)',r'\1 \2 ') # tokenize dash when preceded by a digit
43
+ ]
44
+ normalize2 = [(re.compile(pattern), replace) for (pattern, replace) in normalize2]
45
+
46
+ def normalize(s):
47
+ '''Normalize and tokenize text. This is lifted from NIST mteval-v11a.pl.'''
48
+ # Added to bypass NIST-style pre-processing of hyp and ref files -- wade
49
+ if (nonorm):
50
+ return s.split()
51
+ if type(s) is not str:
52
+ s = " ".join(s)
53
+ # language-independent part:
54
+ for (pattern, replace) in normalize1:
55
+ s = re.sub(pattern, replace, s)
56
+ s = xml.sax.saxutils.unescape(s, {'&quot;':'"'})
57
+ # language-dependent part (assuming Western languages):
58
+ s = " %s " % s
59
+ if not preserve_case:
60
+ s = s.lower() # this might not be identical to the original
61
+ for (pattern, replace) in normalize2:
62
+ s = re.sub(pattern, replace, s)
63
+ return s.split()
64
+
65
+ def count_ngrams(words, n=4):
66
+ counts = {}
67
+ for k in range(1,n+1):
68
+ for i in range(len(words)-k+1):
69
+ ngram = tuple(words[i:i+k])
70
+ counts[ngram] = counts.get(ngram, 0)+1
71
+ return counts
72
+
73
+ def cook_refs(refs, n=4):
74
+ '''Takes a list of reference sentences for a single segment
75
+ and returns an object that encapsulates everything that BLEU
76
+ needs to know about them.'''
77
+
78
+ refs = [normalize(ref) for ref in refs]
79
+ maxcounts = {}
80
+ for ref in refs:
81
+ counts = count_ngrams(ref, n)
82
+ for (ngram,count) in counts.items():
83
+ maxcounts[ngram] = max(maxcounts.get(ngram,0), count)
84
+ return ([len(ref) for ref in refs], maxcounts)
85
+
86
+ def cook_test(test, item, n=4):
87
+ '''Takes a test sentence and returns an object that
88
+ encapsulates everything that BLEU needs to know about it.'''
89
+ (reflens, refmaxcounts)=item
90
+ test = normalize(test)
91
+ result = {}
92
+ result["testlen"] = len(test)
93
+
94
+ # Calculate effective reference sentence length.
95
+
96
+ if eff_ref_len == "shortest":
97
+ result["reflen"] = min(reflens)
98
+ elif eff_ref_len == "average":
99
+ result["reflen"] = float(sum(reflens))/len(reflens)
100
+ elif eff_ref_len == "closest":
101
+ min_diff = None
102
+ for reflen in reflens:
103
+ if min_diff is None or abs(reflen-len(test)) < min_diff:
104
+ min_diff = abs(reflen-len(test))
105
+ result['reflen'] = reflen
106
+
107
+ result["guess"] = [max(len(test)-k+1,0) for k in range(1,n+1)]
108
+
109
+ result['correct'] = [0]*n
110
+ counts = count_ngrams(test, n)
111
+ for (ngram, count) in counts.items():
112
+ result["correct"][len(ngram)-1] += min(refmaxcounts.get(ngram,0), count)
113
+
114
+ return result
115
+
116
+ def score_cooked(allcomps, n=4, ground=0, smooth=1):
117
+ totalcomps = {'testlen':0, 'reflen':0, 'guess':[0]*n, 'correct':[0]*n}
118
+ for comps in allcomps:
119
+ for key in ['testlen','reflen']:
120
+ totalcomps[key] += comps[key]
121
+ for key in ['guess','correct']:
122
+ for k in range(n):
123
+ totalcomps[key][k] += comps[key][k]
124
+ logbleu = 0.0
125
+ all_bleus = []
126
+ for k in range(n):
127
+ correct = totalcomps['correct'][k]
128
+ guess = totalcomps['guess'][k]
129
+ addsmooth = 0
130
+ if smooth == 1 and k > 0:
131
+ addsmooth = 1
132
+ logbleu += math.log(correct + addsmooth + sys.float_info.min)-math.log(guess + addsmooth+ sys.float_info.min)
133
+ if guess == 0:
134
+ all_bleus.append(-10000000)
135
+ else:
136
+ all_bleus.append(math.log(correct + sys.float_info.min)-math.log( guess ))
137
+
138
+ logbleu /= float(n)
139
+ all_bleus.insert(0, logbleu)
140
+
141
+ brevPenalty = min(0,1-float(totalcomps['reflen'] + 1)/(totalcomps['testlen'] + 1))
142
+ for i in range(len(all_bleus)):
143
+ if i ==0:
144
+ all_bleus[i] += brevPenalty
145
+ all_bleus[i] = math.exp(all_bleus[i])
146
+ return all_bleus
147
+
148
+ def bleu(refs, candidate, ground=0, smooth=1):
149
+ refs = cook_refs(refs)
150
+ test = cook_test(candidate, refs)
151
+ return score_cooked([test], ground=ground, smooth=smooth)
152
+
153
+ def splitPuncts(line):
154
+ return ' '.join(re.findall(r"[\w]+|[^\s\w]", line))
155
+
156
+ def computeMaps(predictions, goldfile):
157
+ predictionMap = {}
158
+ goldMap = {}
159
+ gf = open(goldfile, 'r')
160
+
161
+ for row in predictions:
162
+ cols = row.strip().split('\t')
163
+ if len(cols) == 1:
164
+ (rid, pred) = (cols[0], '')
165
+ else:
166
+ (rid, pred) = (cols[0], cols[1])
167
+ predictionMap[rid] = [splitPuncts(pred.strip().lower())]
168
+
169
+ for row in gf:
170
+ (rid, pred) = row.split('\t')
171
+ if rid in predictionMap: # Only insert if the id exists for the method
172
+ if rid not in goldMap:
173
+ goldMap[rid] = []
174
+ goldMap[rid].append(splitPuncts(pred.strip().lower()))
175
+
176
+ sys.stderr.write('Total: ' + str(len(goldMap)) + '\n')
177
+ return (goldMap, predictionMap)
178
+
179
+
180
+ #m1 is the reference map
181
+ #m2 is the prediction map
182
+ def bleuFromMaps(m1, m2):
183
+ score = [0] * 5
184
+ num = 0.0
185
+
186
+ for key in m1:
187
+ if key in m2:
188
+ bl = bleu(m1[key], m2[key][0])
189
+ score = [ score[i] + bl[i] for i in range(0, len(bl))]
190
+ num += 1
191
+ return [s * 100.0 / num for s in score]
192
+
193
+ if __name__ == '__main__':
194
+ reference_file = sys.argv[1]
195
+ predictions = []
196
+ for row in sys.stdin:
197
+ predictions.append(row)
198
+ (goldMap, predictionMap) = computeMaps(predictions, reference_file)
199
+ print (bleuFromMaps(goldMap, predictionMap)[0])
200
+
Code-Text/code-to-text/code/model.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # Licensed under the MIT license.
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch
7
+ from torch.autograd import Variable
8
+ import copy
9
+ class Seq2Seq(nn.Module):
10
+ """
11
+ Build Seqence-to-Sequence.
12
+
13
+ Parameters:
14
+
15
+ * `encoder`- encoder of seq2seq model. e.g. roberta
16
+ * `decoder`- decoder of seq2seq model. e.g. transformer
17
+ * `config`- configuration of encoder model.
18
+ * `beam_size`- beam size for beam search.
19
+ * `max_length`- max length of target for beam search.
20
+ * `sos_id`- start of symbol ids in target for beam search.
21
+ * `eos_id`- end of symbol ids in target for beam search.
22
+ """
23
+ def __init__(self, encoder,decoder,config,beam_size=None,max_length=None,sos_id=None,eos_id=None):
24
+ super(Seq2Seq, self).__init__()
25
+ self.encoder = encoder
26
+ self.decoder=decoder
27
+ self.config=config
28
+ self.register_buffer("bias", torch.tril(torch.ones(2048, 2048)))
29
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
30
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
31
+ self.lsm = nn.LogSoftmax(dim=-1)
32
+ self.tie_weights()
33
+
34
+ self.beam_size=beam_size
35
+ self.max_length=max_length
36
+ self.sos_id=sos_id
37
+ self.eos_id=eos_id
38
+
39
+ def _tie_or_clone_weights(self, first_module, second_module):
40
+ """ Tie or clone module weights depending of weither we are using TorchScript or not
41
+ """
42
+ if self.config.torchscript:
43
+ first_module.weight = nn.Parameter(second_module.weight.clone())
44
+ else:
45
+ first_module.weight = second_module.weight
46
+
47
+ def tie_weights(self):
48
+ """ Make sure we are sharing the input and output embeddings.
49
+ Export to TorchScript can't handle parameter sharing so we are cloning them instead.
50
+ """
51
+ self._tie_or_clone_weights(self.lm_head,
52
+ self.encoder.embeddings.word_embeddings)
53
+
54
+ def forward(self, source_ids=None,source_mask=None,target_ids=None,target_mask=None,args=None):
55
+ outputs = self.encoder(source_ids, attention_mask=source_mask)
56
+ encoder_output = outputs[0].permute([1,0,2]).contiguous()
57
+ if target_ids is not None:
58
+ attn_mask=-1e4 *(1-self.bias[:target_ids.shape[1],:target_ids.shape[1]])
59
+ tgt_embeddings = self.encoder.embeddings(target_ids).permute([1,0,2]).contiguous()
60
+ out = self.decoder(tgt_embeddings,encoder_output,tgt_mask=attn_mask,memory_key_padding_mask=(1-source_mask).bool())
61
+ hidden_states = torch.tanh(self.dense(out)).permute([1,0,2]).contiguous()
62
+ lm_logits = self.lm_head(hidden_states)
63
+ # Shift so that tokens < n predict n
64
+ active_loss = target_mask[..., 1:].ne(0).view(-1) == 1
65
+ shift_logits = lm_logits[..., :-1, :].contiguous()
66
+ shift_labels = target_ids[..., 1:].contiguous()
67
+ # Flatten the tokens
68
+ loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
69
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1))[active_loss],
70
+ shift_labels.view(-1)[active_loss])
71
+
72
+ outputs = loss,loss*active_loss.sum(),active_loss.sum()
73
+ return outputs
74
+ else:
75
+ #Predict
76
+ preds=[]
77
+ zero=torch.cuda.LongTensor(1).fill_(0)
78
+ for i in range(source_ids.shape[0]):
79
+ context=encoder_output[:,i:i+1]
80
+ context_mask=source_mask[i:i+1,:]
81
+ beam = Beam(self.beam_size,self.sos_id,self.eos_id)
82
+ input_ids=beam.getCurrentState()
83
+ context=context.repeat(1, self.beam_size,1)
84
+ context_mask=context_mask.repeat(self.beam_size,1)
85
+ for _ in range(self.max_length):
86
+ if beam.done():
87
+ break
88
+ attn_mask=-1e4 *(1-self.bias[:input_ids.shape[1],:input_ids.shape[1]])
89
+ tgt_embeddings = self.encoder.embeddings(input_ids).permute([1,0,2]).contiguous()
90
+ out = self.decoder(tgt_embeddings,context,tgt_mask=attn_mask,memory_key_padding_mask=(1-context_mask).bool())
91
+ out = torch.tanh(self.dense(out))
92
+ hidden_states=out.permute([1,0,2]).contiguous()[:,-1,:]
93
+ out = self.lsm(self.lm_head(hidden_states)).data
94
+ beam.advance(out)
95
+ input_ids.data.copy_(input_ids.data.index_select(0, beam.getCurrentOrigin()))
96
+ input_ids=torch.cat((input_ids,beam.getCurrentState()),-1)
97
+ hyp= beam.getHyp(beam.getFinal())
98
+ pred=beam.buildTargetTokens(hyp)[:self.beam_size]
99
+ pred=[torch.cat([x.view(-1) for x in p]+[zero]*(self.max_length-len(p))).view(1,-1) for p in pred]
100
+ preds.append(torch.cat(pred,0).unsqueeze(0))
101
+
102
+ preds=torch.cat(preds,0)
103
+ return preds
104
+
105
+ def feature(self, source_ids,source_mask):
106
+ outputs = self.encoder(source_ids, attention_mask=source_mask)
107
+ return outputs.pooler_output
108
+
109
+
110
+ class Beam(object):
111
+ def __init__(self, size,sos,eos):
112
+ self.size = size
113
+ self.tt = torch.cuda
114
+ # The score for each translation on the beam.
115
+ self.scores = self.tt.FloatTensor(size).zero_()
116
+ # The backpointers at each time-step.
117
+ self.prevKs = []
118
+ # The outputs at each time-step.
119
+ self.nextYs = [self.tt.LongTensor(size)
120
+ .fill_(0)]
121
+ self.nextYs[0][0] = sos
122
+ # Has EOS topped the beam yet.
123
+ self._eos = eos
124
+ self.eosTop = False
125
+ # Time and k pair for finished.
126
+ self.finished = []
127
+
128
+ def getCurrentState(self):
129
+ "Get the outputs for the current timestep."
130
+ batch = self.tt.LongTensor(self.nextYs[-1]).view(-1, 1)
131
+ return batch
132
+
133
+ def getCurrentOrigin(self):
134
+ "Get the backpointers for the current timestep."
135
+ return self.prevKs[-1]
136
+
137
+ def advance(self, wordLk):
138
+ """
139
+ Given prob over words for every last beam `wordLk` and attention
140
+ `attnOut`: Compute and update the beam search.
141
+
142
+ Parameters:
143
+
144
+ * `wordLk`- probs of advancing from the last step (K x words)
145
+ * `attnOut`- attention at the last step
146
+
147
+ Returns: True if beam search is complete.
148
+ """
149
+ numWords = wordLk.size(1)
150
+
151
+ # Sum the previous scores.
152
+ if len(self.prevKs) > 0:
153
+ beamLk = wordLk + self.scores.unsqueeze(1).expand_as(wordLk)
154
+
155
+ # Don't let EOS have children.
156
+ for i in range(self.nextYs[-1].size(0)):
157
+ if self.nextYs[-1][i] == self._eos:
158
+ beamLk[i] = -1e20
159
+ else:
160
+ beamLk = wordLk[0]
161
+ flatBeamLk = beamLk.view(-1)
162
+ bestScores, bestScoresId = flatBeamLk.topk(self.size, 0, True, True)
163
+
164
+ self.scores = bestScores
165
+
166
+ # bestScoresId is flattened beam x word array, so calculate which
167
+ # word and beam each score came from
168
+ prevK = bestScoresId // numWords
169
+ self.prevKs.append(prevK)
170
+ self.nextYs.append((bestScoresId - prevK * numWords))
171
+
172
+
173
+ for i in range(self.nextYs[-1].size(0)):
174
+ if self.nextYs[-1][i] == self._eos:
175
+ s = self.scores[i]
176
+ self.finished.append((s, len(self.nextYs) - 1, i))
177
+
178
+ # End condition is when top-of-beam is EOS and no global score.
179
+ if self.nextYs[-1][0] == self._eos:
180
+ self.eosTop = True
181
+
182
+ def done(self):
183
+ return self.eosTop and len(self.finished) >=self.size
184
+
185
+ def getFinal(self):
186
+ if len(self.finished) == 0:
187
+ self.finished.append((self.scores[0], len(self.nextYs) - 1, 0))
188
+ self.finished.sort(key=lambda a: -a[0])
189
+ if len(self.finished) != self.size:
190
+ unfinished=[]
191
+ for i in range(self.nextYs[-1].size(0)):
192
+ if self.nextYs[-1][i] != self._eos:
193
+ s = self.scores[i]
194
+ unfinished.append((s, len(self.nextYs) - 1, i))
195
+ unfinished.sort(key=lambda a: -a[0])
196
+ self.finished+=unfinished[:self.size-len(self.finished)]
197
+ return self.finished[:self.size]
198
+
199
+ def getHyp(self, beam_res):
200
+ """
201
+ Walk back to construct the full hypothesis.
202
+ """
203
+ hyps=[]
204
+ for _,timestep, k in beam_res:
205
+ hyp = []
206
+ for j in range(len(self.prevKs[:timestep]) - 1, -1, -1):
207
+ hyp.append(self.nextYs[j+1][k])
208
+ k = self.prevKs[j][k]
209
+ hyps.append(hyp[::-1])
210
+ return hyps
211
+
212
+ def buildTargetTokens(self, preds):
213
+ sentence=[]
214
+ for pred in preds:
215
+ tokens = []
216
+ for tok in pred:
217
+ if tok==self._eos:
218
+ break
219
+ tokens.append(tok)
220
+ sentence.append(tokens)
221
+ return sentence
222
+
Code-Text/code-to-text/code/run.py ADDED
@@ -0,0 +1,544 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
18
+ GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
19
+ using a masked language modeling (MLM) loss.
20
+ """
21
+
22
+ from __future__ import absolute_import
23
+ import os
24
+ import sys
25
+ import bleu
26
+ import pickle
27
+ import torch
28
+ import json
29
+ import random
30
+ import logging
31
+ import argparse
32
+ import numpy as np
33
+ from io import open
34
+ from itertools import cycle
35
+ import torch.nn as nn
36
+ from model import Seq2Seq
37
+ from tqdm import tqdm, trange
38
+ from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
39
+ from torch.utils.data.distributed import DistributedSampler
40
+ from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
41
+ RobertaConfig, RobertaModel, RobertaTokenizer)
42
+ MODEL_CLASSES = {'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer)}
43
+
44
+ logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
45
+ datefmt = '%m/%d/%Y %H:%M:%S',
46
+ level = logging.INFO)
47
+ logger = logging.getLogger(__name__)
48
+
49
+ class Example(object):
50
+ """A single training/test example."""
51
+ def __init__(self,
52
+ idx,
53
+ source,
54
+ target,
55
+ ):
56
+ self.idx = idx
57
+ self.source = source
58
+ self.target = target
59
+
60
+ def read_examples(filename):
61
+ """Read examples from filename."""
62
+ examples=[]
63
+ with open(filename,encoding="utf-8") as f:
64
+ for idx, line in enumerate(f):
65
+ line=line.strip()
66
+ js=json.loads(line)
67
+ if 'idx' not in js:
68
+ js['idx']=idx
69
+ code=' '.join(js['code_tokens']).replace('\n',' ')
70
+ code=' '.join(code.strip().split())
71
+ nl=' '.join(js['docstring_tokens']).replace('\n','')
72
+ nl=' '.join(nl.strip().split())
73
+ examples.append(
74
+ Example(
75
+ idx = idx,
76
+ source=code,
77
+ target = nl,
78
+ )
79
+ )
80
+ return examples
81
+
82
+
83
+ class InputFeatures(object):
84
+ """A single training/test features for a example."""
85
+ def __init__(self,
86
+ example_id,
87
+ source_ids,
88
+ target_ids,
89
+ source_mask,
90
+ target_mask,
91
+
92
+ ):
93
+ self.example_id = example_id
94
+ self.source_ids = source_ids
95
+ self.target_ids = target_ids
96
+ self.source_mask = source_mask
97
+ self.target_mask = target_mask
98
+
99
+
100
+
101
+ def convert_examples_to_features(examples, tokenizer, args,stage=None):
102
+ features = []
103
+ for example_index, example in enumerate(examples):
104
+ #source
105
+ source_tokens = tokenizer.tokenize(example.source)[:args.max_source_length-2]
106
+ source_tokens =[tokenizer.cls_token]+source_tokens+[tokenizer.sep_token]
107
+ source_ids = tokenizer.convert_tokens_to_ids(source_tokens)
108
+ source_mask = [1] * (len(source_tokens))
109
+ padding_length = args.max_source_length - len(source_ids)
110
+ source_ids+=[tokenizer.pad_token_id]*padding_length
111
+ source_mask+=[0]*padding_length
112
+
113
+ #target
114
+ if stage=="test":
115
+ target_tokens = tokenizer.tokenize("None")
116
+ else:
117
+ target_tokens = tokenizer.tokenize(example.target)[:args.max_target_length-2]
118
+ target_tokens = [tokenizer.cls_token]+target_tokens+[tokenizer.sep_token]
119
+ target_ids = tokenizer.convert_tokens_to_ids(target_tokens)
120
+ target_mask = [1] *len(target_ids)
121
+ padding_length = args.max_target_length - len(target_ids)
122
+ target_ids+=[tokenizer.pad_token_id]*padding_length
123
+ target_mask+=[0]*padding_length
124
+
125
+ if example_index < 5:
126
+ if stage=='train':
127
+ logger.info("*** Example ***")
128
+ logger.info("idx: {}".format(example.idx))
129
+
130
+ logger.info("source_tokens: {}".format([x.replace('\u0120','_') for x in source_tokens]))
131
+ logger.info("source_ids: {}".format(' '.join(map(str, source_ids))))
132
+ logger.info("source_mask: {}".format(' '.join(map(str, source_mask))))
133
+
134
+ logger.info("target_tokens: {}".format([x.replace('\u0120','_') for x in target_tokens]))
135
+ logger.info("target_ids: {}".format(' '.join(map(str, target_ids))))
136
+ logger.info("target_mask: {}".format(' '.join(map(str, target_mask))))
137
+
138
+ features.append(
139
+ InputFeatures(
140
+ example_index,
141
+ source_ids,
142
+ target_ids,
143
+ source_mask,
144
+ target_mask,
145
+ )
146
+ )
147
+ return features
148
+
149
+
150
+
151
+ def set_seed(seed=42):
152
+ random.seed(seed)
153
+ os.environ['PYHTONHASHSEED'] = str(seed)
154
+ np.random.seed(seed)
155
+ torch.manual_seed(seed)
156
+ torch.cuda.manual_seed(seed)
157
+ torch.backends.cudnn.deterministic = True
158
+
159
+ def main():
160
+ parser = argparse.ArgumentParser()
161
+
162
+ ## Required parameters
163
+ parser.add_argument("--model_type", default=None, type=str, required=True,
164
+ help="Model type: e.g. roberta")
165
+ parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
166
+ help="Path to pre-trained model: e.g. roberta-base" )
167
+ parser.add_argument("--output_dir", default=None, type=str, required=True,
168
+ help="The output directory where the model predictions and checkpoints will be written.")
169
+ parser.add_argument("--load_model_path", default=None, type=str,
170
+ help="Path to trained model: Should contain the .bin files" )
171
+ ## Other parameters
172
+ parser.add_argument("--train_filename", default=None, type=str,
173
+ help="The train filename. Should contain the .jsonl files for this task.")
174
+ parser.add_argument("--dev_filename", default=None, type=str,
175
+ help="The dev filename. Should contain the .jsonl files for this task.")
176
+ parser.add_argument("--test_filename", default=None, type=str,
177
+ help="The test filename. Should contain the .jsonl files for this task.")
178
+
179
+ parser.add_argument("--config_name", default="", type=str,
180
+ help="Pretrained config name or path if not the same as model_name")
181
+ parser.add_argument("--tokenizer_name", default="", type=str,
182
+ help="Pretrained tokenizer name or path if not the same as model_name")
183
+ parser.add_argument("--max_source_length", default=64, type=int,
184
+ help="The maximum total source sequence length after tokenization. Sequences longer "
185
+ "than this will be truncated, sequences shorter will be padded.")
186
+ parser.add_argument("--max_target_length", default=32, type=int,
187
+ help="The maximum total target sequence length after tokenization. Sequences longer "
188
+ "than this will be truncated, sequences shorter will be padded.")
189
+
190
+ parser.add_argument("--do_train", action='store_true',
191
+ help="Whether to run training.")
192
+ parser.add_argument("--do_eval", action='store_true',
193
+ help="Whether to run eval on the dev set.")
194
+ parser.add_argument("--do_test", action='store_true',
195
+ help="Whether to run eval on the dev set.")
196
+ parser.add_argument("--do_lower_case", action='store_true',
197
+ help="Set this flag if you are using an uncased model.")
198
+ parser.add_argument("--no_cuda", action='store_true',
199
+ help="Avoid using CUDA when available")
200
+
201
+ parser.add_argument("--train_batch_size", default=8, type=int,
202
+ help="Batch size per GPU/CPU for training.")
203
+ parser.add_argument("--eval_batch_size", default=8, type=int,
204
+ help="Batch size per GPU/CPU for evaluation.")
205
+ parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
206
+ help="Number of updates steps to accumulate before performing a backward/update pass.")
207
+ parser.add_argument("--learning_rate", default=5e-5, type=float,
208
+ help="The initial learning rate for Adam.")
209
+ parser.add_argument("--beam_size", default=10, type=int,
210
+ help="beam size for beam search")
211
+ parser.add_argument("--weight_decay", default=0.0, type=float,
212
+ help="Weight deay if we apply some.")
213
+ parser.add_argument("--adam_epsilon", default=1e-8, type=float,
214
+ help="Epsilon for Adam optimizer.")
215
+ parser.add_argument("--max_grad_norm", default=1.0, type=float,
216
+ help="Max gradient norm.")
217
+ parser.add_argument("--num_train_epochs", default=3, type=int,
218
+ help="Total number of training epochs to perform.")
219
+ parser.add_argument("--max_steps", default=-1, type=int,
220
+ help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
221
+ parser.add_argument("--eval_steps", default=-1, type=int,
222
+ help="")
223
+ parser.add_argument("--train_steps", default=-1, type=int,
224
+ help="")
225
+ parser.add_argument("--warmup_steps", default=0, type=int,
226
+ help="Linear warmup over warmup_steps.")
227
+ parser.add_argument("--local_rank", type=int, default=-1,
228
+ help="For distributed training: local_rank")
229
+ parser.add_argument('--seed', type=int, default=42,
230
+ help="random seed for initialization")
231
+ # print arguments
232
+ args = parser.parse_args()
233
+ logger.info(args)
234
+
235
+ # Setup CUDA, GPU & distributed training
236
+ if args.local_rank == -1 or args.no_cuda:
237
+ device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
238
+ args.n_gpu = torch.cuda.device_count()
239
+ else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
240
+ torch.cuda.set_device(args.local_rank)
241
+ device = torch.device("cuda", args.local_rank)
242
+ torch.distributed.init_process_group(backend='nccl')
243
+ args.n_gpu = 1
244
+ logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s",
245
+ args.local_rank, device, args.n_gpu, bool(args.local_rank != -1))
246
+ args.device = device
247
+ # Set seed
248
+ set_seed(args.seed)
249
+ # make dir if output_dir not exist
250
+ if os.path.exists(args.output_dir) is False:
251
+ os.makedirs(args.output_dir)
252
+
253
+ config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
254
+ config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path)
255
+ tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,do_lower_case=args.do_lower_case)
256
+
257
+ #budild model
258
+ encoder = model_class.from_pretrained(args.model_name_or_path,config=config)
259
+ decoder_layer = nn.TransformerDecoderLayer(d_model=config.hidden_size, nhead=config.num_attention_heads)
260
+ decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)
261
+ model=Seq2Seq(encoder=encoder,decoder=decoder,config=config,
262
+ beam_size=args.beam_size,max_length=args.max_target_length,
263
+ sos_id=tokenizer.cls_token_id,eos_id=tokenizer.sep_token_id)
264
+ if args.load_model_path is not None:
265
+ logger.info("reload model from {}".format(args.load_model_path))
266
+ model.load_state_dict(torch.load(args.load_model_path))
267
+
268
+ model.to(device)
269
+ if args.local_rank != -1:
270
+ # Distributed training
271
+ try:
272
+ from apex.parallel import DistributedDataParallel as DDP
273
+ except ImportError:
274
+ raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
275
+
276
+ model = DDP(model)
277
+ elif args.n_gpu > 1:
278
+ # multi-gpu training
279
+ model = torch.nn.DataParallel(model)
280
+
281
+ if args.do_train:
282
+ # Prepare training data loader
283
+ train_examples = read_examples(args.train_filename)
284
+ train_features = convert_examples_to_features(train_examples, tokenizer,args,stage='train')
285
+ all_source_ids = torch.tensor([f.source_ids for f in train_features], dtype=torch.long)
286
+ all_source_mask = torch.tensor([f.source_mask for f in train_features], dtype=torch.long)
287
+ all_target_ids = torch.tensor([f.target_ids for f in train_features], dtype=torch.long)
288
+ all_target_mask = torch.tensor([f.target_mask for f in train_features], dtype=torch.long)
289
+ train_data = TensorDataset(all_source_ids,all_source_mask,all_target_ids,all_target_mask)
290
+
291
+ if args.local_rank == -1:
292
+ train_sampler = RandomSampler(train_data)
293
+ else:
294
+ train_sampler = DistributedSampler(train_data)
295
+ train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size//args.gradient_accumulation_steps)
296
+
297
+ num_train_optimization_steps = args.train_steps
298
+
299
+ # Prepare optimizer and schedule (linear warmup and decay)
300
+ no_decay = ['bias', 'LayerNorm.weight']
301
+ optimizer_grouped_parameters = [
302
+ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
303
+ 'weight_decay': args.weight_decay},
304
+ {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
305
+ ]
306
+ t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
307
+ optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
308
+ scheduler = get_linear_schedule_with_warmup(optimizer,
309
+ num_warmup_steps=int(t_total*0.1),
310
+ num_training_steps=t_total)
311
+
312
+ #Start training
313
+ logger.info("***** Running training *****")
314
+ logger.info(" Num examples = %d", len(train_examples))
315
+ logger.info(" Batch size = %d", args.train_batch_size)
316
+ logger.info(" Num epoch = %d", args.num_train_epochs)
317
+
318
+
319
+ model.train()
320
+ dev_dataset={}
321
+ nb_tr_examples, nb_tr_steps,tr_loss,global_step,best_bleu,best_loss = 0, 0,0,0,0,1e6
322
+ for epoch in range(args.num_train_epochs):
323
+ bar = tqdm(train_dataloader,total=len(train_dataloader))
324
+ for batch in bar:
325
+ batch = tuple(t.to(device) for t in batch)
326
+ source_ids,source_mask,target_ids,target_mask = batch
327
+ loss,_,_ = model(source_ids=source_ids,source_mask=source_mask,target_ids=target_ids,target_mask=target_mask)
328
+
329
+ if args.n_gpu > 1:
330
+ loss = loss.mean() # mean() to average on multi-gpu.
331
+ if args.gradient_accumulation_steps > 1:
332
+ loss = loss / args.gradient_accumulation_steps
333
+ tr_loss += loss.item()
334
+ train_loss=round(tr_loss*args.gradient_accumulation_steps/(nb_tr_steps+1),4)
335
+ bar.set_description("epoch {} loss {}".format(epoch,train_loss))
336
+ nb_tr_examples += source_ids.size(0)
337
+ nb_tr_steps += 1
338
+ loss.backward()
339
+
340
+ if (nb_tr_steps + 1) % args.gradient_accumulation_steps == 0:
341
+ #Update parameters
342
+ optimizer.step()
343
+ optimizer.zero_grad()
344
+ scheduler.step()
345
+ global_step += 1
346
+
347
+ if args.do_eval:
348
+ #Eval model with dev dataset
349
+ tr_loss = 0
350
+ nb_tr_examples, nb_tr_steps = 0, 0
351
+ eval_flag=False
352
+ if 'dev_loss' in dev_dataset:
353
+ eval_examples,eval_data=dev_dataset['dev_loss']
354
+ else:
355
+ eval_examples = read_examples(args.dev_filename)
356
+ eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='dev')
357
+ all_source_ids = torch.tensor([f.source_ids for f in eval_features], dtype=torch.long)
358
+ all_source_mask = torch.tensor([f.source_mask for f in eval_features], dtype=torch.long)
359
+ all_target_ids = torch.tensor([f.target_ids for f in eval_features], dtype=torch.long)
360
+ all_target_mask = torch.tensor([f.target_mask for f in eval_features], dtype=torch.long)
361
+ eval_data = TensorDataset(all_source_ids,all_source_mask,all_target_ids,all_target_mask)
362
+ dev_dataset['dev_loss']=eval_examples,eval_data
363
+ eval_sampler = SequentialSampler(eval_data)
364
+ eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
365
+
366
+ logger.info("\n***** Running evaluation *****")
367
+ logger.info(" Num examples = %d", len(eval_examples))
368
+ logger.info(" Batch size = %d", args.eval_batch_size)
369
+
370
+ #Start Evaling model
371
+ model.eval()
372
+ eval_loss,tokens_num = 0,0
373
+ for batch in eval_dataloader:
374
+ batch = tuple(t.to(device) for t in batch)
375
+ source_ids,source_mask,target_ids,target_mask = batch
376
+
377
+ with torch.no_grad():
378
+ _,loss,num = model(source_ids=source_ids,source_mask=source_mask,
379
+ target_ids=target_ids,target_mask=target_mask)
380
+ eval_loss += loss.sum().item()
381
+ tokens_num += num.sum().item()
382
+ #Pring loss of dev dataset
383
+ model.train()
384
+ eval_loss = eval_loss / tokens_num
385
+ result = {'eval_ppl': round(np.exp(eval_loss),5),
386
+ 'global_step': global_step+1,
387
+ 'train_loss': round(train_loss,5)}
388
+ for key in sorted(result.keys()):
389
+ logger.info(" %s = %s", key, str(result[key]))
390
+ logger.info(" "+"*"*20)
391
+
392
+ #save last checkpoint
393
+ last_output_dir = os.path.join(args.output_dir, 'checkpoint-last')
394
+ if not os.path.exists(last_output_dir):
395
+ os.makedirs(last_output_dir)
396
+ model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
397
+ output_model_file = os.path.join(last_output_dir, "pytorch_model.bin")
398
+ torch.save(model_to_save.state_dict(), output_model_file)
399
+ if eval_loss<best_loss:
400
+ logger.info(" Best ppl:%s",round(np.exp(eval_loss),5))
401
+ logger.info(" "+"*"*20)
402
+ best_loss=eval_loss
403
+ # Save best checkpoint for best ppl
404
+ output_dir = os.path.join(args.output_dir, 'checkpoint-best-ppl')
405
+ if not os.path.exists(output_dir):
406
+ os.makedirs(output_dir)
407
+ model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
408
+ output_model_file = os.path.join(output_dir, "pytorch_model.bin")
409
+ torch.save(model_to_save.state_dict(), output_model_file)
410
+
411
+
412
+ #Calculate bleu
413
+ if 'dev_bleu' in dev_dataset:
414
+ eval_examples,eval_data=dev_dataset['dev_bleu']
415
+ else:
416
+ eval_examples = read_examples(args.dev_filename)
417
+ eval_examples = random.sample(eval_examples,min(1000,len(eval_examples)))
418
+ eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='test')
419
+ all_source_ids = torch.tensor([f.source_ids for f in eval_features], dtype=torch.long)
420
+ all_source_mask = torch.tensor([f.source_mask for f in eval_features], dtype=torch.long)
421
+ eval_data = TensorDataset(all_source_ids,all_source_mask)
422
+ dev_dataset['dev_bleu']=eval_examples,eval_data
423
+
424
+
425
+
426
+ eval_sampler = SequentialSampler(eval_data)
427
+ eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
428
+
429
+ model.eval()
430
+ p=[]
431
+ for batch in eval_dataloader:
432
+ batch = tuple(t.to(device) for t in batch)
433
+ source_ids,source_mask= batch
434
+ with torch.no_grad():
435
+ preds = model(source_ids=source_ids,source_mask=source_mask)
436
+ for pred in preds:
437
+ t=pred[0].cpu().numpy()
438
+ t=list(t)
439
+ if 0 in t:
440
+ t=t[:t.index(0)]
441
+ text = tokenizer.decode(t,clean_up_tokenization_spaces=False)
442
+ p.append(text)
443
+ model.train()
444
+ predictions=[]
445
+ with open(os.path.join(args.output_dir,"dev.output"),'w') as f, open(os.path.join(args.output_dir,"dev.gold"),'w') as f1:
446
+ for ref,gold in zip(p,eval_examples):
447
+ predictions.append(str(gold.idx)+'\t'+ref)
448
+ f.write(str(gold.idx)+'\t'+ref+'\n')
449
+ f1.write(str(gold.idx)+'\t'+gold.target+'\n')
450
+
451
+ (goldMap, predictionMap) = bleu.computeMaps(predictions, os.path.join(args.output_dir, "dev.gold"))
452
+ dev_bleu=round(bleu.bleuFromMaps(goldMap, predictionMap)[0],2)
453
+ logger.info(" %s = %s "%("bleu-4",str(dev_bleu)))
454
+ logger.info(" "+"*"*20)
455
+ if dev_bleu>best_bleu:
456
+ logger.info(" Best bleu:%s",dev_bleu)
457
+ logger.info(" "+"*"*20)
458
+ best_bleu=dev_bleu
459
+ # Save best checkpoint for best bleu
460
+ output_dir = os.path.join(args.output_dir, 'checkpoint-best-bleu')
461
+ if not os.path.exists(output_dir):
462
+ os.makedirs(output_dir)
463
+ model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
464
+ output_model_file = os.path.join(output_dir, "pytorch_model.bin")
465
+ torch.save(model_to_save.state_dict(), output_model_file)
466
+
467
+ # 每一轮记录checkpoint
468
+ output_dir = os.path.join(args.output_dir, 'epoch_{}'.format(epoch + 1))
469
+ if not os.path.exists(output_dir):
470
+ os.makedirs(output_dir)
471
+ model_to_save = model.module if hasattr(model, 'module') else model
472
+ ckpt_output_path = os.path.join(output_dir, 'subject_model.pth')
473
+ logger.info("Saving model checkpoint to %s", ckpt_output_path)
474
+ torch.save(model_to_save.state_dict(), ckpt_output_path)
475
+ # 每一轮记录表征
476
+ # logger.info("Saving training feature")
477
+ # train_dataloader_bs1 = DataLoader(train_dataset, sampler=train_sampler, batch_size=1,num_workers=4,pin_memory=True)
478
+ # train_feature = []
479
+ # for batch in tqdm(train_dataloader_bs1):
480
+ # batch = tuple(t.to(device) for t in batch)
481
+ # source_ids, source_mask, _, _ = batch
482
+ # model.eval()
483
+ # with torch.no_grad():
484
+ # tf = model.feature(source_ids, source_mask)
485
+ # train_feature.append(tf.cpu().detach().numpy())
486
+ # feature_output_path = os.path.join(output_dir, 'feature.pkl')
487
+ # with open(feature_output_path, 'wb') as f:
488
+ # pickle.dump(train_feature, f)
489
+
490
+ if args.do_test:
491
+ files=[]
492
+ if args.dev_filename is not None:
493
+ files.append(args.dev_filename)
494
+ if args.test_filename is not None:
495
+ files.append(args.test_filename)
496
+ for idx,file in enumerate(files):
497
+ logger.info("Test file: {}".format(file))
498
+ eval_examples = read_examples(file)
499
+ eval_features = convert_examples_to_features(eval_examples, tokenizer, args,stage='test')
500
+ all_source_ids = torch.tensor([f.source_ids for f in eval_features], dtype=torch.long)
501
+ all_source_mask = torch.tensor([f.source_mask for f in eval_features], dtype=torch.long)
502
+ eval_data = TensorDataset(all_source_ids,all_source_mask)
503
+
504
+ # Calculate bleu
505
+ eval_sampler = SequentialSampler(eval_data)
506
+ eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
507
+
508
+ model.eval()
509
+ p=[]
510
+ for batch in tqdm(eval_dataloader,total=len(eval_dataloader)):
511
+ batch = tuple(t.to(device) for t in batch)
512
+ source_ids,source_mask= batch
513
+ with torch.no_grad():
514
+ preds = model(source_ids=source_ids,source_mask=source_mask)
515
+ for pred in preds:
516
+ t=pred[0].cpu().numpy()
517
+ t=list(t)
518
+ if 0 in t:
519
+ t=t[:t.index(0)]
520
+ text = tokenizer.decode(t,clean_up_tokenization_spaces=False)
521
+ p.append(text)
522
+ model.train()
523
+ predictions=[]
524
+ with open(os.path.join(args.output_dir,"test_{}.output".format(str(idx))),'w') as f, open(os.path.join(args.output_dir,"test_{}.gold".format(str(idx))),'w') as f1:
525
+ for ref,gold in zip(p,eval_examples):
526
+ predictions.append(str(gold.idx)+'\t'+ref)
527
+ f.write(str(gold.idx)+'\t'+ref+'\n')
528
+ f1.write(str(gold.idx)+'\t'+gold.target+'\n')
529
+
530
+ (goldMap, predictionMap) = bleu.computeMaps(predictions, os.path.join(args.output_dir, "test_{}.gold".format(idx)))
531
+ dev_bleu=round(bleu.bleuFromMaps(goldMap, predictionMap)[0],2)
532
+ logger.info(" %s = %s "%("bleu-4",str(dev_bleu)))
533
+ logger.info(" "+"*"*20)
534
+
535
+
536
+
537
+
538
+
539
+
540
+
541
+ if __name__ == "__main__":
542
+ main()
543
+
544
+
Code-Text/code-to-text/code/test.sh ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ lang=python #programming language
2
+ batch_size=64
3
+ beam_size=10
4
+ source_length=256
5
+ target_length=128
6
+ data_dir=../dataset
7
+ output_dir=../model/$lang
8
+ dev_file=$data_dir/$lang/valid.jsonl
9
+ test_file=$data_dir/$lang/test.jsonl
10
+ test_model=$output_dir/epoch_10/subject_model.pth #checkpoint for test
11
+
12
+ CUDA_VISIBLE_DEVICES=2,3 python run.py \
13
+ --do_test --model_type roberta \
14
+ --model_name_or_path microsoft/codebert-base \
15
+ --load_model_path $test_model \
16
+ --dev_filename $dev_file \
17
+ --test_filename $test_file \
18
+ --output_dir $output_dir \
19
+ --max_source_length $source_length \
20
+ --max_target_length $target_length \
21
+ --beam_size $beam_size \
22
+ --eval_batch_size $batch_size
Code-Text/code-to-text/code/train.sh ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ lang=python #programming language
2
+ lr=5e-5
3
+ batch_size=32
4
+ beam_size=10
5
+ source_length=256
6
+ target_length=128
7
+ data_dir=../dataset
8
+ output_dir=../model/$lang
9
+ train_file=$data_dir/$lang/train.jsonl
10
+ dev_file=$data_dir/$lang/valid.jsonl
11
+ epochs=10
12
+ pretrained_model=microsoft/codebert-base #Roberta: roberta-base
13
+
14
+ CUDA_VISIBLE_DEVICES=2,3 python run.py \
15
+ --do_train \
16
+ --do_eval \
17
+ --model_type roberta \
18
+ --model_name_or_path $pretrained_model \
19
+ --train_filename $train_file \
20
+ --dev_filename $dev_file \
21
+ --output_dir $output_dir \
22
+ --max_source_length $source_length \
23
+ --max_target_length $target_length \
24
+ --beam_size $beam_size \
25
+ --train_batch_size $batch_size \
26
+ --eval_batch_size $batch_size \
27
+ --learning_rate $lr \
28
+ --num_train_epochs $epochs
Code-Text/code-to-text/data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ee5eea169be9d0516dcb0f100ec1932b830b37b329d96bfa87c8f76ee7da380
3
+ size 381246360