|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" BERT classification fine-tuning: utilities to work with GLUE tasks """ |
|
|
|
from __future__ import absolute_import, division, print_function |
|
|
|
import csv |
|
import json |
|
import logging |
|
import os |
|
import sys |
|
from io import open |
|
from sklearn.metrics import f1_score, precision_score, recall_score |
|
from torch.utils.data import Dataset |
|
import torch |
|
|
|
csv.field_size_limit(sys.maxsize) |
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
class InputFeatures(object): |
|
"""A single training/test features for a example.""" |
|
def __init__(self, code_tokens, code_ids, nl_tokens, nl_ids, label, idx): |
|
self.code_tokens = code_tokens |
|
self.code_ids = code_ids |
|
self.nl_tokens = nl_tokens |
|
self.nl_ids = nl_ids |
|
self.label = label |
|
self.idx = idx |
|
|
|
|
|
class InputFeaturesTriplet(InputFeatures): |
|
"""A single training/test features for a example. Add docstring seperately. """ |
|
def __init__(self, code_tokens, code_ids, nl_tokens, nl_ids, ds_tokens, ds_ids, label, idx): |
|
super(InputFeaturesTriplet, self).__init__(code_tokens, code_ids, nl_tokens, nl_ids, label, idx) |
|
self.ds_tokens = ds_tokens |
|
self.ds_ids = ds_ids |
|
|
|
|
|
def convert_examples_to_features(js, tokenizer, args): |
|
|
|
label = js['label'] |
|
|
|
|
|
code = js['code'] |
|
code_tokens = tokenizer.tokenize(code)[:args.max_seq_length-2] |
|
code_tokens = [tokenizer.cls_token]+code_tokens+[tokenizer.sep_token] |
|
code_ids = tokenizer.convert_tokens_to_ids(code_tokens) |
|
padding_length = args.max_seq_length - len(code_ids) |
|
code_ids += [tokenizer.pad_token_id]*padding_length |
|
|
|
nl = js['doc'] |
|
nl_tokens = tokenizer.tokenize(nl)[:args.max_seq_length-2] |
|
nl_tokens = [tokenizer.cls_token]+nl_tokens+[tokenizer.sep_token] |
|
nl_ids = tokenizer.convert_tokens_to_ids(nl_tokens) |
|
padding_length = args.max_seq_length - len(nl_ids) |
|
nl_ids += [tokenizer.pad_token_id]*padding_length |
|
|
|
return InputFeatures(code_tokens, code_ids, nl_tokens, nl_ids, label, js['idx']) |
|
|
|
|
|
class TextDataset(Dataset): |
|
def __init__(self, tokenizer, args, file_path=None, type=None): |
|
|
|
self.examples = [] |
|
self.type = type |
|
data=[] |
|
with open(file_path, 'r') as f: |
|
data = json.load(f) |
|
|
|
if self.type == 'test': |
|
for js in data: |
|
js['label'] = 0 |
|
for js in data: |
|
self.examples.append(convert_examples_to_features(js, tokenizer, args)) |
|
if 'train' in file_path: |
|
for idx, example in enumerate(self.examples[:3]): |
|
logger.info("*** Example ***") |
|
logger.info("idx: {}".format(idx)) |
|
logger.info("code_tokens: {}".format([x.replace('\u0120','_') for x in example.code_tokens])) |
|
logger.info("code_ids: {}".format(' '.join(map(str, example.code_ids)))) |
|
logger.info("nl_tokens: {}".format([x.replace('\u0120','_') for x in example.nl_tokens])) |
|
logger.info("nl_ids: {}".format(' '.join(map(str, example.nl_ids)))) |
|
|
|
def __len__(self): |
|
return len(self.examples) |
|
|
|
def __getitem__(self, i): |
|
""" return both tokenized code ids and nl ids and label""" |
|
return torch.tensor(self.examples[i].code_ids), \ |
|
torch.tensor(self.examples[i].nl_ids),\ |
|
torch.tensor(self.examples[i].label) |
|
|
|
|
|
|
|
|
|
def simple_accuracy(preds, labels): |
|
return (preds == labels).mean() |
|
|
|
|
|
def acc_and_f1(preds, labels): |
|
acc = simple_accuracy(preds, labels) |
|
f1 = f1_score(y_true=labels, y_pred=preds) |
|
prec = precision_score(y_true=labels, y_pred=preds) |
|
reca = recall_score(y_true=labels, y_pred=preds) |
|
return { |
|
"acc": acc, |
|
"precision": prec, |
|
"recall": reca, |
|
"f1": f1, |
|
"acc_and_f1": (acc + f1) / 2, |
|
} |
|
|
|
|
|
def compute_metrics(task_name, preds, labels): |
|
assert len(preds) == len(labels) |
|
if task_name == "webquery": |
|
return acc_and_f1(preds, labels) |
|
if task_name == "staqc": |
|
return acc_and_f1(preds, labels) |
|
else: |
|
raise KeyError(task_name) |
|
|
|
|