File size: 1,830 Bytes
dee113c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
from transformers import RobertaTokenizer, RobertaConfig, RobertaModel
import torch
import sys
import os
from model import Model
def single_tokenize(text, tokenizer, block_size=256):
tokens = tokenizer.tokenize(text)[:block_size - 2]
tokens = [tokenizer.cls_token] + tokens + [tokenizer.sep_token]
ids = tokenizer.convert_tokens_to_ids(tokens)
padding_length = block_size - len(ids)
ids += [tokenizer.pad_token_id] * padding_length
return torch.tensor([ids])
if __name__ == "__main__":
config =RobertaConfig.from_pretrained("../../../../active_dataset_debugging/base/codebert-base")
config.num_labels = 1
tokenizer = RobertaTokenizer.from_pretrained("../../../../active_dataset_debugging/base/codebert-base", do_lower_case=True)
model = RobertaModel.from_pretrained("../../../../active_dataset_debugging/base/roberta-base", config=config)
model = Model(model, config, tokenizer, args=None)
model.load_state_dict(torch.load("../model/python/epoch_2/subject_model.pth", map_location=torch.device('cpu')))
query = "print hello world"
code_1 = """
import numpy as np
"""
code_2 = """
a = 'hello world'
"""
code_3 = """
cout << "hello world" << endl;
"""
code_4 = '''
print('hello world')
'''
codes = []
codes.append(code_1)
codes.append(code_2)
codes.append(code_3)
codes.append(code_4)
scores = []
nl_inputs = single_tokenize(query, tokenizer)
for code in codes:
code_inputs = single_tokenize(code, tokenizer)
score = model(code_inputs, nl_inputs, return_scores=True)
scores.append(score)
print("Query:", query)
for i in range(len(codes)):
print('------------------------------')
print("Code:", codes[i])
print("Score:", float(scores[i])) |