python_code
stringlengths 0
34.9k
|
---|
from abc import ABC, abstractmethod
from typing import Optional, Dict, Any, List, Tuple, NamedTuple
from dpu_utils.ptutils import BaseComponent
from mlcomponents.embeddings import SequenceEmbedder
class SeqDecoder(BaseComponent, ABC):
def __init__(self, name: str, token_encoder: SequenceEmbedder,
hyperparameters: Optional[Dict[str, Any]] = None
) -> None:
super(SeqDecoder, self).__init__(name, hyperparameters)
self.__target_token_encoder = token_encoder
START = '[CLS]'
END = '[SEP]'
InputOutputSequence = NamedTuple('InputOutputSequence', [
('input_sequence', List[str]),
('output_sequence', List[str]),
])
@property
def target_token_encoder(self):
return self.__target_token_encoder
def _add_start_end(self, sequence: List[str]) -> List[str]:
return [SeqDecoder.START] + sequence + [SeqDecoder.END]
def _load_metadata_from_sample(self, data_to_load: 'SeqDecoder.InputOutputSequence') -> None:
self.target_token_encoder.load_metadata_from_sample(self._add_start_end(data_to_load.output_sequence))
@abstractmethod
def load_data_from_sample(self, data_to_load: 'SeqDecoder.InputOutputSequence') -> Any:
pass
def _reset_component_metrics(self) -> None:
self._num_minibatches = 0
self._loss_sum = 0
def _component_metrics(self) -> Dict[str, float]:
return {
'Total Decoder Loss': self._loss_sum / self._num_minibatches
}
@abstractmethod
def compute_likelihood(self, *, memories, memories_lengths, initial_state, additional_decoder_input,
return_debug_info: bool = False, **kwargs):
pass
@abstractmethod
def greedy_decode(self, memories, memory_lengths, initial_state, memories_str_representations: List[List[str]],
max_length: int = 40, additional_decoder_input = None) -> List[Tuple[List[List[str]], List[float]]]:
pass
@abstractmethod
def beam_decode(self, memories, memory_lengths, initial_state, memories_str_representations: List[List[str]],
max_length: int = 40, max_beam_size: int = 5, additional_decoder_input = None) -> List[Tuple[List[List[str]], List[float]]]:
pass
|
from .seqdecoder import SeqDecoder
from .grudecoder import GruDecoder
from .grucopyingdecoder import GruCopyingDecoder
from .luongattention import LuongAttention
__all__ = [SeqDecoder, GruDecoder, GruCopyingDecoder, LuongAttention] |
from typing import Optional, Dict, Any, NamedTuple, List, Tuple
import numpy as np
import torch
from dpu_utils.mlutils import Vocabulary
import torch
from torch import nn
from data.spanutils import get_copyable_spans
from mlcomponents.embeddings import TokenSequenceEmbedder
from mlcomponents.seqdecoding import SeqDecoder, LuongAttention
BIG_NUMBER = 100
class GruSpanCopyingDecoder(SeqDecoder):
def __init__(self, name: str, token_encoder: TokenSequenceEmbedder,
standard_attention: Optional[LuongAttention]=None,
copy_attention: Optional[LuongAttention]=None,
hyperparameters: Optional[Dict[str, Any]]=None,
include_summarizing_network: bool = True,
pre_trained_gru: Optional[nn.GRU] = None
) -> None:
super(GruSpanCopyingDecoder, self).__init__(name, token_encoder, hyperparameters)
self.__output_gru = pre_trained_gru # type: Optional[nn.GRU]
self.__standard_attention = standard_attention # type: Optional[LuongAttention]
self.__copy_attention = copy_attention # type: Optional[LuongAttention]
self.__dropout_layer = None # type: Optional[nn.Dropout]
self.__include_summarizing_network = include_summarizing_network
self.__summarization_layer = None
self.reset_metrics()
def _finalize_component_metadata_and_model(self) -> None:
if self.__output_gru is None:
self.__output_gru = nn.GRU(
input_size=self.target_token_encoder.embedding_size + self.get_hyperparameter('additional_inputs_size'),
hidden_size=self.get_hyperparameter('hidden_size'),
num_layers=self.get_hyperparameter('num_layers'),
batch_first=True
)
else:
# Make sure that GRU is compatible
assert self.__output_gru.hidden_size == self.get_hyperparameter('hidden_size')
assert self.__output_gru.num_layers == self.get_hyperparameter('num_layers')
assert self.__output_gru.input_size == self.target_token_encoder.embedding_size + self.get_hyperparameter('additional_inputs_size')
assert self.__output_gru.batch_first
self.__dropout_layer = nn.Dropout(p=self.get_hyperparameter('dropout_rate'))
if self.__include_summarizing_network:
self.__summarization_layer = nn.Linear(
self.get_hyperparameter('initial_state_size') + self.get_hyperparameter('additional_initial_state_inputs_size'),
self.get_hyperparameter('hidden_size') * self.get_hyperparameter('num_layers'), bias=False
)
else:
assert self.get_hyperparameter('initial_state_size') + self.get_hyperparameter('additional_initial_state_inputs_size') ==\
self.get_hyperparameter('hidden_size') * self.get_hyperparameter('num_layers'), 'Initial states sizes do not match.'
self.__hidden_to_output = nn.Parameter(torch.randn(self.get_hyperparameter('hidden_size'),
self.target_token_encoder.embedding_size,
dtype=torch.float, requires_grad=True))
k = 3 if self.get_hyperparameter('use_max_pool_span_repr') else 2
self.__hidden_to_end_span_query_vector = nn.Parameter(
torch.randn(k*self.get_hyperparameter('memories_hidden_dimension'),
self.get_hyperparameter('hidden_size'),
dtype=torch.float, requires_grad=True))
self.__vocabulary_bias = nn.Parameter(torch.zeros(len(self.target_token_encoder.vocabulary),
dtype=torch.float, requires_grad=True))
# A constant array with the relative span-lengths
span_lengths = np.zeros((self.get_hyperparameter('max_memories_length'), self.get_hyperparameter('max_memories_length')), dtype=np.int32)
for i in range(self.get_hyperparameter('max_memories_length')):
for j in range(i, self.get_hyperparameter('max_memories_length')):
span_lengths[i, j] = j - i + 1
self.__span_lengths = span_lengths
@classmethod
def default_hyperparameters(cls) -> Dict[str, Any]:
return {
'dropout_rate': 0.2,
'hidden_size': 64,
'initial_state_size': 64,
'num_layers': 1,
'memories_hidden_dimension': 128,
'additional_initial_state_inputs_size': 0,
'additional_inputs_size': 0,
'max_memories_length': 25,
# Ablations for the model
'use_max_pool_span_repr': False, # Default: False
'marginalize_over_copying_decisions': True, # Default: True
'teacher_force_longest': False # Default: False
}
TensorizedData = NamedTuple('GruSpanCopyingDecoderTensorizedData', [
('output_sequence', Any),
('copy_spans', np.ndarray)
])
def load_data_from_sample(self, data_to_load: SeqDecoder.InputOutputSequence) -> Optional['GruSpanCopyingDecoder.TensorizedData']:
max_seq_len = self.target_token_encoder.get_hyperparameter('max_seq_length')
target_output_sequence = self._add_start_end(data_to_load.output_sequence)[:max_seq_len]
copyable_spans = get_copyable_spans(data_to_load.input_sequence[:max_seq_len], target_output_sequence)
if self.get_hyperparameter('teacher_force_longest'):
teacher_forced_spans = np.zeros_like(copyable_spans)
for k in range(copyable_spans.shape[0]):
copyable_ranges = np.nonzero(copyable_spans[k])
if len(copyable_ranges[0]) == 0:
continue
max_i, max_j = max(zip(*copyable_ranges), key=lambda x: x[1]- x[0])
teacher_forced_spans[k, max_i, max_j] = True
copyable_spans = teacher_forced_spans
return self.TensorizedData(
output_sequence=self.target_token_encoder.load_data_from_sample(target_output_sequence),
copy_spans=copyable_spans
)
def initialize_minibatch(self) -> Dict[str, Any]:
return {
'output_sequences': self.target_token_encoder.initialize_minibatch(),
'copy_spans': []
}
def extend_minibatch_by_sample(self, datapoint: 'GruSpanCopyingDecoder.TensorizedData', accumulated_minibatch_data: Dict[str, Any]) -> bool:
continue_extending = self.target_token_encoder.extend_minibatch_by_sample(
datapoint=datapoint.output_sequence,
accumulated_minibatch_data=accumulated_minibatch_data['output_sequences'])
accumulated_minibatch_data['copy_spans'].append(datapoint.copy_spans)
return continue_extending
def finalize_minibatch(self, accumulated_minibatch_data: Dict[str, Any]) -> Dict[str, Any]:
accumulated_copy_spans = accumulated_minibatch_data['copy_spans']
max_out_length = min(self.target_token_encoder.get_hyperparameter('max_seq_length'), max(c.shape[0] for c in accumulated_copy_spans))
max_in_length = min(self.get_hyperparameter('max_memories_length'), max(c.shape[1] for c in accumulated_copy_spans))
padded_copy_spans = np.zeros((len(accumulated_copy_spans), max_out_length-1, max_in_length, max_in_length), dtype=np.bool)
for i, copy_span in enumerate(accumulated_copy_spans):
copy_spans = accumulated_copy_spans[i]
out_seq_size = min(max_out_length, copy_spans.shape[0])
inp_seq_size = min(max_in_length, copy_spans.shape[1])
# Start from 1 because of <sos>
padded_copy_spans[i, :out_seq_size-1, :inp_seq_size, :inp_seq_size] = copy_spans[1:out_seq_size, :inp_seq_size, :inp_seq_size]
return {
'output_sequences': self.target_token_encoder.finalize_minibatch(accumulated_minibatch_data['output_sequences']),
'copyable_spans': torch.tensor(padded_copy_spans, dtype=torch.uint8, device=self.device)
}
def forward(self, *, memories, memories_lengths, initial_state, copyable_spans, output_sequences: Dict[str, Any],
input_sequence_token_embeddings=None, additional_decoder_input=None, **kwargs):
likelihood = self.compute_likelihood(memories=memories, memories_lengths=memories_lengths, initial_state=initial_state,
copyable_spans=copyable_spans, output_sequences=output_sequences,
input_sequence_token_embeddings=input_sequence_token_embeddings,
additional_decoder_input=additional_decoder_input, normalize=True)
loss = -likelihood.mean()
with torch.no_grad():
self._num_minibatches += 1
self._loss_sum += float(loss)
return loss
def __get_output_logprobs(self, decoded_token_embeddings, state, memories, memory_lengths, additional_decoder_input=None):
if additional_decoder_input is not None:
tiled_inputs = additional_decoder_input.unsqueeze(1).repeat(1, decoded_token_embeddings.size(1), 1) # B x max-out-len-1 x D
decoded_token_embeddings = torch.cat([decoded_token_embeddings, tiled_inputs], dim=-1) # B x max-out-len-1 x 2*D
output_states, h_out = self.__output_gru.forward(decoded_token_embeddings,
state) # B x max-out-len-1 x H
output_states = output_states.contiguous()
output_states_flat = self.__standard_attention.forward(
memories=memories, memories_length=memory_lengths,
lookup_vectors=output_states) # (B * max-out-len - 1) x H
output_states_w_attention = output_states_flat.view(output_states.shape)
output_logits = torch.einsum('blh,hd,vd->blv', output_states_w_attention, self.__hidden_to_output,
self.target_token_encoder.embedding_matrix) + self.__vocabulary_bias.unsqueeze(0).unsqueeze(0)
copy_output_states_flat = self.__copy_attention.forward(
memories=memories, memories_length=memory_lengths,
lookup_vectors=output_states) # (B * max_out_len - 1) x H, B x max_out_len - 1 x max_inp_len
copy_output_states = copy_output_states_flat.view(output_states.shape) # B x (max_out_len - 1) x H
memory_a = memories.unsqueeze(1).repeat_interleave(repeats=memories.shape[1], dim=1) # B x max-in-len x max-in-len x H
memory_b = memories.unsqueeze(2).repeat_interleave(repeats=memories.shape[1], dim=2) # B x max-in-len x max-in-len x H
all_memory_pairs = [memory_a, memory_b]
if self.get_hyperparameter('use_max_pool_span_repr'):
# An additional representation of the span by max pooling the encoder states.
max_input_length = memories.shape[1]
all_memories = memories.reshape((memories.shape[0], 1, 1, max_input_length, memories.shape[2])) # B x 1 x 1 x max-in-len x H
all_memories = all_memories.repeat((1, max_input_length, max_input_length, 1, 1)) # B x max-in-len x max-in-len x max-in-len x H
range = torch.arange(max_input_length, device=self.device)
# valid_range_elements[i,j,k] = True if i <=j and i<=k<=j
valid_range_elements = (range.reshape(-1, 1, 1) <= range.reshape(1, 1, -1)) & (range.reshape(1, 1, -1) <= range.reshape(1, -1, 1)) # max-in-len x max-in-len x max-in-len
invalid_range_elements = ~valid_range_elements # max-in-len x max-in-len x max-in-len
all_memories.masked_fill_(mask=invalid_range_elements.unsqueeze(0).unsqueeze(-1), value=0)
span_representation2, _ = all_memories.max(dim=-2) # B x max-in-len x max-in-len x H
all_memory_pairs.append(span_representation2)
all_memory_pairs = torch.cat(all_memory_pairs, dim=-1) # B x max-in-len x max-in-len x 3H
copy_span_logits = torch.einsum('bmnh,hd,bld->blmn', all_memory_pairs, self.__hidden_to_end_span_query_vector,
copy_output_states)
# Fill lower-triangular with -inf (end of span should not be before start) and removed padded memories length
range = torch.arange(copy_span_logits.shape[-1], device=self.device)
copy_span_logits.masked_fill_(
mask=
range.unsqueeze(-1).gt(range.unsqueeze(0)).unsqueeze(0).unsqueeze(0) |
torch.arange(memories.shape[1], device=self.device).unsqueeze(0).ge(memory_lengths.unsqueeze(-1)).unsqueeze(1).unsqueeze(-1), # B x max_out_len - 1 x max_in_len x max_in_len
value=float('-inf')
)
all_action_logits = torch.cat([output_logits, copy_span_logits.flatten(-2)], dim=-1) # B x max_out_len-1 x V + max_in_len*max_in_len
all_action_logprobs = nn.functional.log_softmax(all_action_logits, dim=-1)
generation_logprobs = all_action_logprobs[:, :, :output_logits.shape[-1]] # B x max_out_len-1 x V
copy_span_logprobs = all_action_logprobs[:, :, output_logits.shape[-1]:].reshape(copy_span_logits.shape)
return generation_logprobs, copy_span_logprobs, h_out
def compute_likelihood(self, *, memories, memories_lengths, initial_state, copyable_spans, output_sequences: Dict[str, Any],
input_sequence_token_embeddings=None, data_as_demonstrator_rate: float=0, normalize: bool=False, additional_decoder_input=None,
return_debug_info: bool = False):
# copyable_spans: B x max_out_len-1 x max-inp-len x max-inp-len
if self.__include_summarizing_network:
initial_state = self.__summarization_layer(initial_state)
initial_state = torch.reshape(initial_state,
(memories.shape[0], self.get_hyperparameter('num_layers'), self.get_hyperparameter('hidden_size'))).transpose(0, 1).contiguous() # num_layers x B x H
target_token_embeddings, sequence_lengths = self.target_token_encoder.forward(as_packed_sequence=False,
**output_sequences) # B x max_len x D and B
target_token_embeddings = target_token_embeddings[:, :-1]
generation_logprobs, copy_span_logprobs, _ = self.__get_output_logprobs(target_token_embeddings, initial_state,
memories, memories_lengths,
additional_decoder_input=additional_decoder_input)
target_generated_tokens = output_sequences['token_ids'][:, 1:]
generation_logprobs_flat = generation_logprobs.flatten(0, 1)
generation_target_logprob = generation_logprobs_flat[
torch.arange(generation_logprobs_flat.shape[0]),
target_generated_tokens.flatten(0, 1)
].view(target_generated_tokens.shape) # B x max_out_len - 1
# Reward an UNK only when we cannot copy
can_copy = torch.flatten(copyable_spans, start_dim=-2).max(dim=-1)[0] # B x max_out_len - 1
target_is_unk = target_generated_tokens.eq(
self.target_token_encoder.vocabulary.get_id_or_unk(Vocabulary.get_unk()))
generation_target_logprob.masked_fill_(
mask=can_copy.bool() & target_is_unk,
value=float('-inf')
)
# Marginalize over all actions
if self.get_hyperparameter('marginalize_over_copying_decisions'):
logprob_at_position = torch.zeros_like(can_copy, dtype=torch.float32) # B x max_out_len-1
max_in_length = copy_span_logprobs.shape[-1]
max_out_length = logprob_at_position.shape[1]
for i in range(max_out_length-1, -1, -1):
copy_end_idxs = torch.tensor(i + self.__span_lengths[:max_in_length, :max_in_length], dtype=torch.int64, device=self.device)\
.clamp(0, max_out_length-1)\
.expand(logprob_at_position.shape[0], -1, -1) # B x max_in_len x max_in_len # TODO: This is const[i], precompute instead of __span_legths
generation_after_copy_logprob = logprob_at_position.gather(dim=1, index=copy_end_idxs.flatten(-2)).reshape(copy_end_idxs.shape) # B x max_in_len x max_in_len
marginalized_at_pos_copy_logprobs = copy_span_logprobs[:, i] + generation_after_copy_logprob
# Mask invalid copy actions
marginalized_at_pos_copy_logprobs.masked_fill_(mask=(1 - copyable_spans[:, i]).bool(), value=float('-inf'))
action_logprobs = torch.cat([
(generation_target_logprob[:, i] + (logprob_at_position[:, i+1] if i < max_out_length -1 else 0)).unsqueeze(-1),
marginalized_at_pos_copy_logprobs.flatten(-2)
], dim=-1) # B x 1 + max_inp_len * max_inp_len
length_mask = (i < sequence_lengths-1).float() # -1 accounts for the <s> symbol
logprob_at_position = logprob_at_position.clone() # To allow for grad propagation
logprob_at_position[:, i] = torch.logsumexp(action_logprobs, dim=-1).clamp(min=float('-inf'), max=0) * length_mask
correct_seq_gen_logprob = logprob_at_position[:, 0] # B
else:
copy_target_logprobs = copy_span_logprobs.masked_fill(mask=(1 - copyable_spans).bool(), value=float('-inf')) # B x max_out_len-1 x max_in_len x max_in_len
copy_target_logprobs = copy_target_logprobs.flatten(2).logsumexp(dim=-1) # B x max_out_len-1
correct_seq_gen_logprob = torch.logsumexp(torch.stack([generation_target_logprob, copy_target_logprobs], dim=-1), dim=-1) # B x max_out_len-1
correct_seq_gen_logprob = correct_seq_gen_logprob.sum(dim=-1)
if normalize:
correct_seq_gen_logprob = correct_seq_gen_logprob / (sequence_lengths-1).float()
if return_debug_info:
return correct_seq_gen_logprob, {
'generation_logprobs': generation_logprobs.cpu().numpy(),
'copy_span_logprobs': copy_span_logprobs.cpu().numpy(),
'vocabulary': self.target_token_encoder.vocabulary
}
return correct_seq_gen_logprob
def greedy_decode(self, memories, memory_lengths, initial_state, memories_str_representations: List[List[str]],
max_length: int = 50, additional_decoder_input=None) -> List[Tuple[List[str], float]]:
vocabulary = self.target_token_encoder.vocabulary
if self.__include_summarizing_network:
initial_state = self.__summarization_layer(initial_state)
initial_state = torch.reshape(initial_state,
(initial_state.shape[0], self.get_hyperparameter('num_layers'), self.get_hyperparameter('hidden_size'))).transpose(0, 1).contiguous() # num_layers x B x H
current_decoder_state = initial_state # num_layers x B x H
batch_size = memories.shape[0]
next_tokens = torch.tensor(
[[vocabulary.get_id_or_unk(self.START)]] * batch_size,
device=self.device) # B x 1
is_done = np.zeros(next_tokens.shape[0], dtype=np.bool)
predicted_tokens = [] # type: List[List[str]]
predicted_logprobs = [] # type: List[List[float]]
remaining_copied_span = [[] for _ in range(batch_size)] # type: List[List[str]]
actions_taken = [[] for _ in range(batch_size)]
for i in range(max_length):
if np.all(is_done):
max_length = i
break
# Embed next_tokens
target_token_embeddings, _ = self.target_token_encoder.forward(
as_packed_sequence=False,
token_ids=next_tokens,
lengths=None) # B x 1 x D
generation_logprobs, copy_span_logprobs, current_decoder_state = self.__get_output_logprobs(
target_token_embeddings, current_decoder_state, memories, memory_lengths,
additional_decoder_input=additional_decoder_input
)
generation_logprobs, token_to_generate = generation_logprobs.squeeze(1).max(dim=-1)
generation_logprobs = generation_logprobs.cpu().numpy()
token_to_generate = token_to_generate.squeeze(-1).cpu().numpy() # B
copy_span_logprobs = copy_span_logprobs.squeeze(1) # B x max-inp-len (span-start) x max-inp-len (span-end)
copy_target_span_logprobs, copy_span_idxs = copy_span_logprobs.flatten(start_dim=1).max(dim=-1)
copy_target_span_logprobs = copy_target_span_logprobs.cpu().numpy()
should_copy_logprobs = copy_target_span_logprobs
copy_span_idxs = copy_span_idxs.cpu().numpy()
copy_span_start = copy_span_idxs // copy_span_logprobs.shape[-1] # B
copy_span_end = copy_span_idxs % copy_span_logprobs.shape[-1] # B
assert np.all(copy_span_start <= copy_span_end)
predicted_tokens_for_this_step = [] # type: List[str]
predicted_logprobs_for_this_step = [] # type: List[float]
for j in range(batch_size):
if len(remaining_copied_span[j]) > 0:
# We still have a copied span, keep copying...
predicted_tokens_for_this_step.append(remaining_copied_span[j][0])
remaining_copied_span[j] = remaining_copied_span[j][1:]
predicted_logprobs_for_this_step.append(0) # We have already "paid" the loss for this copy, when we decided to copy the span.
else:
if should_copy_logprobs[j] >= generation_logprobs[j]:
# We copy
span_to_be_copied = memories_str_representations[j][copy_span_start[j]: copy_span_end[j]+1]
predicted_tokens_for_this_step.append(span_to_be_copied[0])
remaining_copied_span[j] = span_to_be_copied[1:]
predicted_logprobs_for_this_step.append(copy_target_span_logprobs[j])
actions_taken[j].append(f'Copy Span {span_to_be_copied}')
else:
# We generate a token
target_token_idx = token_to_generate[j]
target_token = vocabulary.get_name_for_id(target_token_idx)
predicted_tokens_for_this_step.append(target_token)
predicted_logprobs_for_this_step.append(generation_logprobs[j])
actions_taken[j].append(f'Generating Token {target_token}')
if predicted_tokens_for_this_step[-1] == self.END:
is_done[j] = True
predicted_logprobs.append(predicted_logprobs_for_this_step)
predicted_tokens.append(predicted_tokens_for_this_step)
next_tokens = torch.tensor([[vocabulary.get_id_or_unk(t)] for t in predicted_tokens_for_this_step],
device=self.device)
# Finally, convert ids to to strings
predictions = [] # type: List[Tuple[List[List[str]], List[float]]]
for i in range(batch_size):
tokens = [predicted_tokens[j][i] for j in range(max_length)]
try:
end_idx = tokens.index(self.END)
except ValueError:
end_idx = max_length
tokens = tokens[:end_idx]
logprob = sum(float(predicted_logprobs[j][i]) for j in range(min(end_idx+1, max_length)))
predictions.append(([tokens], [logprob]))
# print(tokens, logprob, actions_taken[i])
return predictions
def beam_decode(self, memories, memory_lengths, initial_state, memories_str_representations: List[List[str]],
max_length: int = 150, beam_size: int = 20, max_search_size: int = 50, additional_decoder_input=None) -> List[Tuple[List[List[str]], List[float]]]:
vocabulary = self.target_token_encoder.vocabulary
if self.__include_summarizing_network:
initial_state = self.__summarization_layer(initial_state)
initial_state = torch.reshape(initial_state,
(memories.shape[0], self.get_hyperparameter('num_layers'), self.get_hyperparameter('hidden_size'))).transpose(0, 1).contiguous() # num_layers x B x H
current_decoder_state = initial_state # num_layers x B x H
batch_size = memories.shape[0]
next_tokens = torch.tensor(
[[vocabulary.get_id_or_unk(self.START)]] * batch_size,
device=self.device).unsqueeze(-1) # B x beam_size=1 x 1
is_done = [np.zeros(1, dtype=np.bool) for _ in range(batch_size)] # B x current_beam_size
predicted_tokens = [[[] for _ in range(beam_size)] for _ in range(batch_size)] # type: List[List[List[str]]]
predicted_logprobs = [[0 for _ in range(beam_size)] for _ in range(batch_size)] # type: List[List[float]]
remaining_copied_span = [[[] for _ in range(beam_size)] for _ in range(batch_size)] # type: List[List[List[str]]]
actions_taken = [[[] for _ in range(beam_size)] for _ in range(batch_size)] # type: List[List[List[str]]]
for i in range(max_length):
if np.all(is_done):
break
current_beam_size = (next_tokens.shape[0] * next_tokens.shape[1]) // batch_size
# Embed next_tokens
target_token_embeddings, _ = self.target_token_encoder.forward(
as_packed_sequence=False,
token_ids=next_tokens.flatten(0, 1),
lengths=None) # B * current_beam_size x 1 x D
generation_logprobs, copy_span_logprobs, current_decoder_state = self.__get_output_logprobs(
target_token_embeddings, current_decoder_state,
memories=memories.unsqueeze(1).expand(-1, current_beam_size, -1, -1).flatten(0,1),
memory_lengths=memory_lengths.unsqueeze(1).expand(-1, current_beam_size).flatten(0, 1),
additional_decoder_input=additional_decoder_input.unsqueeze(1).expand(-1, current_beam_size, -1).flatten(0, 1) if additional_decoder_input is not None else None
)
current_decoder_state = current_decoder_state.transpose(0, 1).reshape(
batch_size, current_beam_size, self.get_hyperparameter('num_layers'), -1) # B x current_beam_size x num_layers x H
generation_logprobs, token_to_generate = generation_logprobs.squeeze(1).topk(max_search_size, dim=-1)
generation_logprobs = generation_logprobs.reshape(batch_size, current_beam_size, max_search_size).cpu().numpy() # B x current_beam_size x max_search_size
token_to_generate = token_to_generate.squeeze(-1).reshape(batch_size, current_beam_size, max_search_size).cpu().numpy() # B x current_beam_size x max_search_size
copy_span_logprobs = copy_span_logprobs.squeeze(1) # B*current_beam_size x max-inp-len (span-start) x max-inp-len (span-end)
num_topk = min(max_search_size, copy_span_logprobs.shape[-1] * (copy_span_logprobs.shape[-1] - 1) // 2)
copy_target_span_logprobs, copy_span_idxs = copy_span_logprobs.flatten(start_dim=1).topk(num_topk, dim=-1)
copy_target_span_logprobs = copy_target_span_logprobs.reshape(batch_size, current_beam_size, num_topk).cpu().numpy() # B x current_beam_size x num_topk
copy_span_idxs = copy_span_idxs.reshape(batch_size, current_beam_size, num_topk).cpu().numpy()
copy_span_start = copy_span_idxs // copy_span_logprobs.shape[-1] # B x current_beam_size x max_search_size
copy_span_end = copy_span_idxs % copy_span_logprobs.shape[-1] # B x current_beam_size x max_search_size
assert np.all(copy_span_start <= copy_span_end)
predicted_tokens_for_this_step = []
next_decoder_state = np.zeros((batch_size, beam_size) + current_decoder_state.shape[-2:], dtype=np.float)
for j in range(batch_size):
beam_predicted_tokens = []
beam_predicted_logprobs = []
beam_remaining_copied_spans = []
beam_is_done = []
beam_state_idx = []
beam_action_taken = []
for k in range(current_beam_size):
if is_done[j][k]:
beam_predicted_tokens.append(predicted_tokens[j][k])
beam_remaining_copied_spans.append([])
beam_predicted_logprobs.append(predicted_logprobs[j][k])
beam_is_done.append(True)
beam_state_idx.append(k)
beam_action_taken.append(actions_taken[j][k])
continue
if len(remaining_copied_span[j][k]) > 0:
# We still have a copied span, keep copying... the beam of suggestions makes no sense here.
beam_predicted_tokens.append(predicted_tokens[j][k] + remaining_copied_span[j][k][:1])
beam_remaining_copied_spans.append(remaining_copied_span[j][k][1:])
beam_predicted_logprobs.append(predicted_logprobs[j][k]) # We have already "paid" the loss for this copy, when we decided to copy the span.
beam_is_done.append(remaining_copied_span[j][k][0] == self.END)
beam_state_idx.append(k)
beam_action_taken.append(actions_taken[j][k])
else:
for l in range(num_topk):
# Option 1: We copy
span_to_be_copied = memories_str_representations[j][copy_span_start[j, k, l]: copy_span_end[j, k, l] + 1]
assert copy_span_start[j, k, l] < copy_span_end[j, k, l] + 1
assert len(span_to_be_copied) > 0
beam_predicted_tokens.append(predicted_tokens[j][k] + span_to_be_copied[:1])
beam_remaining_copied_spans.append(span_to_be_copied[1:])
beam_predicted_logprobs.append(predicted_logprobs[j][k] + copy_target_span_logprobs[j, k, l])
beam_is_done.append(span_to_be_copied[0]==self.END)
beam_state_idx.append(k)
beam_action_taken.append(actions_taken[j][k] + ['Copy Span ' + str(span_to_be_copied)])
# Option 2: We generate a token
target_token_idx = token_to_generate[j, k, l]
target_token = vocabulary.get_name_for_id(target_token_idx)
# There are rare cases where an empty sequence is predicted. Explicitly add the END token in those cases.
beam_predicted_tokens.append(predicted_tokens[j][k] + [target_token])
beam_is_done.append(target_token == self.END)
beam_remaining_copied_spans.append([])
beam_predicted_logprobs.append(predicted_logprobs[j][k] + generation_logprobs[j, k, l])
beam_state_idx.append(k)
beam_action_taken.append(actions_taken[j][k] + ['Generate ' + target_token])
# Merge identical beams
idxs_for_next_beam = np.argsort(beam_predicted_logprobs)[::-1] # Descending order
to_ignore = set()
for k in idxs_for_next_beam:
if k in to_ignore:
continue
for l in range(k+1, len(beam_predicted_tokens)):
if beam_predicted_tokens[k] == beam_predicted_tokens[l] and len(beam_remaining_copied_spans[l]) == 0 and len(beam_remaining_copied_spans[k]) == 0:
# l can and should be merged into k
to_ignore.add(l)
beam_predicted_logprobs[k] = min(0, np.logaddexp(beam_predicted_logprobs[k], beam_predicted_logprobs[l]))
if len(beam_action_taken[l]) < len(beam_action_taken[k]):
beam_action_taken[k] = beam_action_taken[l]
# Now merge all the beams and pick the top beam_size elements
idxs_for_next_beam = np.argsort(beam_predicted_logprobs)[::-1]
idxs_for_next_beam = [idx for idx in idxs_for_next_beam if idx not in to_ignore][:beam_size] # Remove merged idxs
if len(idxs_for_next_beam) < beam_size:
# In some cases, we won't have enough elements to fill in the beam (due to merging). Fill them with dummy elements
beam_predicted_logprobs.append(float('-inf'))
idxs_for_next_beam = idxs_for_next_beam + [-1] * (beam_size - len(idxs_for_next_beam))
predicted_tokens[j] = [beam_predicted_tokens[k] for k in idxs_for_next_beam]
predicted_logprobs[j] = [beam_predicted_logprobs[k] for k in idxs_for_next_beam]
actions_taken[j] = [beam_action_taken[k] for k in idxs_for_next_beam]
remaining_copied_span[j] = [beam_remaining_copied_spans[k] for k in idxs_for_next_beam]
is_done[j] = np.array([beam_is_done[k] for k in idxs_for_next_beam], dtype=np.bool)
predicted_tokens_for_this_step.append([beam_predicted_tokens[k][-1] for k in idxs_for_next_beam])
for k, idx in enumerate(idxs_for_next_beam):
next_decoder_state[j, k] = current_decoder_state[j, beam_state_idx[idx]].cpu().numpy()
next_tokens = torch.tensor([[vocabulary.get_id_or_unk(t) for t in r] for r in predicted_tokens_for_this_step], device=self.device).unsqueeze(-1)
current_decoder_state = torch.tensor(next_decoder_state, device=self.device, dtype=torch.float32).flatten(0, 1).reshape(
(self.get_hyperparameter('num_layers'), -1, self.get_hyperparameter('hidden_size'))
) # num_layers x B * beam_size x H
# Probe for visualizing decoding
for i in range(batch_size):
print('\nInput: ', ' '.join(memories_str_representations[i]))
for j in range(2):
print(f'>Pred{j+1}', ' '.join(predicted_tokens[i][j]), predicted_logprobs[i][j], actions_taken[i][j])
return [([s[:-1] for s in predicted_tokens[i]], predicted_logprobs[i]) for i in range(batch_size)]
|
import heapq
from collections import defaultdict
from typing import Dict, Any, Optional, List, Tuple, NamedTuple
import numpy as np
import torch
from dpu_utils.mlutils import Vocabulary
from torch import nn
from mlcomponents.embeddings import TokenSequenceEmbedder
from . import SeqDecoder
from .luongattention import LuongAttention
LARGE_NUMBER = 5000
class GruCopyingDecoder(SeqDecoder):
def __init__(self, name: str, token_encoder: TokenSequenceEmbedder,
standard_attention: Optional[LuongAttention]=None,
pre_trained_gru: Optional[nn.GRU] = None,
hyperparameters: Optional[Dict[str, Any]]=None,
include_summarizing_network: bool = True
) -> None:
super(GruCopyingDecoder, self).__init__(name, token_encoder, hyperparameters)
self.__output_gru = None # type: Optional[nn.GRU]
self.__standard_attention = standard_attention # type: Optional[LuongAttention]
self.__dropout_layer = None # type: Optional[nn.Dropout]
self.__cross_entropy_loss = torch.nn.CrossEntropyLoss(reduce=False)
self.__include_summarizing_network = include_summarizing_network
self.__summarization_layer = None
self.reset_metrics()
def _finalize_component_metadata_and_model(self) -> None:
if self.__output_gru is None:
self.__output_gru = nn.GRU(
input_size=self.target_token_encoder.embedding_size + self.get_hyperparameter('additional_inputs_size'),
hidden_size=self.get_hyperparameter('hidden_size'),
num_layers=self.get_hyperparameter('num_layers'),
batch_first=True
)
else:
assert self.__output_gru.hidden_size == self.get_hyperparameter('hidden_size')
assert self.__output_gru.num_layers == 1
assert self.__output_gru.input_size == self.target_token_encoder.embedding_size + self.get_hyperparameter('additional_inputs_size')
assert self.__output_gru.batch_first
self.__dropout_layer = nn.Dropout(p=self.get_hyperparameter('dropout_rate'))
if self.__include_summarizing_network:
self.__summarization_layer = nn.Linear(
self.get_hyperparameter('initial_state_size') + self.get_hyperparameter('additional_initial_state_inputs_size'),
self.get_hyperparameter('hidden_size') * self.get_hyperparameter('num_layers'), bias=False
)
else:
assert self.get_hyperparameter('initial_state_size') + self.get_hyperparameter('additional_initial_state_inputs_size') ==\
self.get_hyperparameter('hidden_size') * self.get_hyperparameter('num_layers'), 'Initial states sizes do not match.'
self.__hidden_to_output = nn.Parameter(torch.randn(self.get_hyperparameter('hidden_size'),
self.target_token_encoder.embedding_size,
dtype=torch.float, requires_grad=True)*.1)
self.__hidden_to_query_vector = nn.Parameter(torch.randn(self.get_hyperparameter('memories_hidden_dimension'),
self.get_hyperparameter('hidden_size'),
dtype=torch.float, requires_grad=True)*.1)
self.__vocabulary_bias = nn.Parameter(torch.zeros(len(self.target_token_encoder.vocabulary),
dtype=torch.float, requires_grad=True))
@classmethod
def default_hyperparameters(cls) -> Dict[str, Any]:
return {
'dropout_rate': 0.2,
'hidden_size': 64,
'num_layers': 1,
'initial_state_size': 64,
'memories_hidden_dimension': 128,
'additional_initial_state_inputs_size': 0,
'additional_inputs_size': 0,
'max_memories_length': 25,
'num_layers': 1,
'data_as_demonstrator_rate': 0.
}
TensorizedData = NamedTuple('GruCopyingDecoderTensorizedData', [
('output_sequence', Any),
('copy_locations', np.ndarray)
])
def load_data_from_sample(self, data_to_load: SeqDecoder.InputOutputSequence) -> Optional['GruCopyingDecoder.TensorizedData']:
return self.TensorizedData(
output_sequence=self.target_token_encoder.load_data_from_sample(self._add_start_end(data_to_load.output_sequence)),
copy_locations=self.__get_copy_locations(data_to_load.input_sequence, self._add_start_end(data_to_load.output_sequence))
)
def __get_copy_locations(self, input_sequence, output_sequence):
max_in_length = min(len(input_sequence), self.get_hyperparameter('max_memories_length'))
max_out_length = min(len(output_sequence), self.target_token_encoder.get_hyperparameter('max_seq_length'))
copy_locations = np.zeros((max_out_length, max_in_length), dtype=np.bool)
input_sequence_elements = np.array([input_sequence[:max_in_length]], dtype=np.object) # 1 x I
output_sequence_elements = np.array([output_sequence[:max_out_length]], dtype=np.object).T # O x 1
copy_locations[:len(output_sequence), :len(input_sequence)] = input_sequence_elements == output_sequence_elements # O x I
return copy_locations
def initialize_minibatch(self) -> Dict[str, Any]:
return {
'output_sequences': self.target_token_encoder.initialize_minibatch(),
'copy_locations': []
}
def extend_minibatch_by_sample(self, datapoint: 'GruCopyingDecoder.TensorizedData', accumulated_minibatch_data: Dict[str, Any]) -> bool:
continue_extending = self.target_token_encoder.extend_minibatch_by_sample(
datapoint=datapoint.output_sequence,
accumulated_minibatch_data=accumulated_minibatch_data['output_sequences'])
accumulated_minibatch_data['copy_locations'].append(datapoint.copy_locations)
return continue_extending
def finalize_minibatch(self, accumulated_minibatch_data: Dict[str, Any]) -> Dict[str, Any]:
accumulated_copy_locations = accumulated_minibatch_data['copy_locations']
max_in_length = max(t.shape[1] for t in accumulated_copy_locations)
max_out_length = max(t.shape[0] for t in accumulated_copy_locations)
batch_size = len(accumulated_copy_locations)
copy_locations = np.zeros((batch_size, max_out_length, max_in_length), dtype=np.bool)
for i in range(batch_size):
locations_for_i = accumulated_copy_locations[i]
copy_locations[i, :locations_for_i.shape[0], :locations_for_i.shape[1]] = locations_for_i
return {
'output_sequences': self.target_token_encoder.finalize_minibatch(accumulated_minibatch_data['output_sequences']),
'copy_locations': torch.tensor(copy_locations, dtype=torch.int64, device=self.device)
}
def __get_output_logits(self, decoded_token_embeddings, state, memories, memory_lengths, sample_rate: float=0,
input_sequence_token_embeddings=None, additional_rnn_input=None):
if additional_rnn_input is not None:
tiled_additional_decoder_input = additional_rnn_input.unsqueeze(1).repeat(1,
decoded_token_embeddings.size(1),
1) # B x max-out-len-1 x D
tiled_additional_decoder_input = torch.repeat_interleave(tiled_additional_decoder_input,
int(decoded_token_embeddings.size(
0) / additional_rnn_input.size(0)),
dim=0) # B*beam width x max-out-len-1 x D
decoded_token_embeddings = torch.cat([decoded_token_embeddings, tiled_additional_decoder_input], dim=-1)
if not self.training or sample_rate == 0 or input_sequence_token_embeddings is None:
return self.__get_output_logits_xent(decoded_token_embeddings, state, memories, memory_lengths)
else:
if input_sequence_token_embeddings is None:
raise Exception('The input sequence token embeddings cannot be None when using data-as-demonstrator.')
return self.__get_output_logits_dad(decoded_token_embeddings, state, memories, memory_lengths, sample_rate,
input_sequence_token_embeddings, additional_rnn_input)
def __get_output_logits_xent(self, decoded_token_embeddings, state, memories, memory_lengths):
"""
:param decoded_token_embeddings: B x max-out-len x D
:param state: B x D
:param memories: B x max-in-len x H
:param memory_lengths: B
:return: output_logits: B x max-out-len x V, copy_logits B x max-out-len x max-in-len, h_out: num-layers x B x H
"""
output_states, h_out = self.__output_gru.forward(decoded_token_embeddings, state) # B x max-out-len-1 x H
output_states = output_states.contiguous()
if self.__standard_attention is not None:
output_states_flat = self.__standard_attention.forward(
memories=memories, memories_length=memory_lengths,
lookup_vectors=output_states) # (B * max-out-len - 1) x H
output_states = output_states_flat.view(output_states.shape)
output_logits = torch.einsum('blh,hd,vd->blv', output_states, self.__hidden_to_output,
self.target_token_encoder.embedding_matrix) + self.__vocabulary_bias.view(1, 1, -1)
copy_logits = torch.einsum('bmh,hd,bld->blm', memories, self.__hidden_to_query_vector, output_states)
copy_logits.masked_fill_(mask=torch.arange(memories.shape[1], device=self.device).view(1, 1, -1) >= memory_lengths.view(-1, 1, 1),
value=float('-inf'))
return output_logits, copy_logits, h_out
def __get_output_logits_dad(self, decoded_token_embeddings, state, memories, memory_lengths,
sample_rate: float, input_sequence_token_embeddings, rnn_additional_input):
"""
:param decoded_token_embeddings: B x max-out-len x D
:param state: num_layers x B x D
:param memories: B x max-in-len x H
:param memory_lengths: B
:return: output_logits: B x max-out-len x V, copy_logits B x max-out-len x max-in-len, h_out: num-layers x B x H
"""
output_logits = []
copy_logits = []
current_state = state # num_layers x B x D
for i in range(decoded_token_embeddings.shape[1]):
# Sample inputs
use_base = np.random.random() > sample_rate
if use_base or i == 0:
inputs = decoded_token_embeddings[:, i]
else:
# Sample from previous decision
last_prediction_logits = torch.cat([output_logits[-1], copy_logits[-1]], dim=-1) # B x V + max-len
sampled_ids = torch.multinomial(nn.functional.softmax(last_prediction_logits, dim=-1), num_samples=1).squeeze(1) # B
# To avoid constructing a large batched lookup table, do this separately.
vocab_embeddings = self.target_token_encoder.embedding_matrix[
torch.min(sampled_ids,
torch.ones_like(sampled_ids) * self.target_token_encoder.embedding_matrix.shape[0]-1)]
copy_embeddings = input_sequence_token_embeddings[
torch.arange(sampled_ids.shape[0], device=self.device),
torch.nn.functional.relu(sampled_ids-self.target_token_encoder.embedding_matrix.shape[0]) # Clamp to 0
]
inputs = torch.where((sampled_ids<self.target_token_encoder.embedding_matrix.shape[0]).view(-1, 1),
vocab_embeddings,
copy_embeddings) # B x D
if rnn_additional_input is not None:
inputs = torch.cat([inputs, rnn_additional_input], dim=-1)
# Now run one step of GRU
inputs = inputs.unsqueeze(1) # B x 1 x D
output_states, h_out = self.__output_gru.forward(inputs,
current_state)
output_states = output_states.contiguous()
if self.__standard_attention is not None:
output_states_flat = self.__standard_attention.forward(
memories=memories, memories_length=memory_lengths,
lookup_vectors=output_states) # (B * 1) x H
output_states = output_states_flat.view(output_states.shape)
output_states = output_states.squeeze(1) # B x H
next_output_logits = torch.einsum('bh,hd,vd->bv', output_states, self.__hidden_to_output,
self.target_token_encoder.embedding_matrix) + self.__vocabulary_bias.view(1, -1)
next_copy_logits = torch.einsum('bmh,hd,bd->bm', memories, self.__hidden_to_query_vector, output_states)
next_copy_logits.masked_fill_(torch.arange(next_copy_logits.shape[1], device=self.device).view(1, -1) >= memory_lengths.view(-1, 1), float('-inf'))
# Append logits
output_logits.append(next_output_logits)
copy_logits.append(next_copy_logits)
current_state = h_out
return torch.stack(output_logits, dim=1), torch.stack(copy_logits, dim=1), current_state
def forward(self, *, memories, memories_lengths, initial_state, copy_locations, output_sequences: Dict[str, Any],
input_sequence_token_embeddings=None, additional_decoder_input=None):
likelihood = self.compute_likelihood(memories=memories, memories_lengths=memories_lengths,
initial_state=initial_state, copy_locations=copy_locations, output_sequences=output_sequences,
input_sequence_token_embeddings=input_sequence_token_embeddings,
additional_decoder_input=additional_decoder_input, normalize=True)
loss = -(likelihood).mean()
with torch.no_grad():
self._num_minibatches += 1
self._loss_sum += float(loss)
return loss
def compute_likelihood(self, *, memories, memories_lengths, initial_state, copy_locations, output_sequences: Dict[str, Any],
input_sequence_token_embeddings=None, normalize: bool=False, additional_decoder_input=None,
return_debug_info: bool = False):
# copy_locations: B x max-out-len x max-inp-len
if self.__include_summarizing_network:
initial_state = self.__summarization_layer(initial_state)
initial_state = torch.reshape(initial_state,
(initial_state.shape[0], self.get_hyperparameter('num_layers'), self.get_hyperparameter('hidden_size'))).transpose(0, 1).contiguous() # num_layers x B x H
copy_locations = copy_locations[:, 1:, :memories.shape[1]] # The input might be truncated if all sequences are smaller.
target_token_embeddings, sequence_lengths = self.target_token_encoder.forward(as_packed_sequence=False,
**output_sequences) # B x max_len x D and B
target_token_embeddings = target_token_embeddings[:, :-1]
output_logits, copy_logits, _ = self.__get_output_logits(target_token_embeddings, initial_state, memories, memories_lengths,
sample_rate=self.get_hyperparameter('data_as_demonstrator_rate') if self.training else 0,
input_sequence_token_embeddings=input_sequence_token_embeddings,
additional_rnn_input=additional_decoder_input
)
# Merge the output and copy logits
logits = torch.cat([output_logits, copy_logits], dim=-1) # B x max-out-len x V + max-in-len
log_probs = nn.functional.log_softmax(logits, dim=-1)
can_copy = copy_locations.max(dim=-1)[0].gt(0) # B x max-out-len
target_generated_tokens = output_sequences['token_ids'][:, 1:].flatten(0, 1)
target_is_unk = target_generated_tokens.eq(self.target_token_encoder.vocabulary.get_id_or_unk(Vocabulary.get_unk()))
generation_logprobs = log_probs.flatten(0, 1)[torch.arange(target_generated_tokens.shape[0]), target_generated_tokens]
generation_logprobs = generation_logprobs + (target_is_unk & can_copy.flatten(0, 1)).float() * -LARGE_NUMBER # B x max-out-len
copy_logprobs = log_probs[:, :, -copy_logits.shape[-1]:] # B x max-out-len x max-in-len
copy_logprobs = copy_logprobs + copy_locations.ne(1).float() * -LARGE_NUMBER
copy_logprobs = copy_logprobs.logsumexp(dim=-1) # B x max-out-len
mask = torch.arange(target_token_embeddings.shape[1], device=self.device).unsqueeze(0) < sequence_lengths.unsqueeze(-1)-1 # B x max-out-len - 1
full_logprob = torch.logsumexp(torch.stack([generation_logprobs, copy_logprobs.flatten(0, 1)], dim=-1), dim=-1) # B x max-out-len
full_logprob = full_logprob.view(copy_logprobs.shape) * mask.float()
if normalize:
return full_logprob.sum(dim=-1) / (sequence_lengths-1).float()
else:
return full_logprob.sum(dim=-1)
def greedy_decode(self, memories, memory_lengths, initial_state, memories_str_representations: List[List[str]],
max_length: int=40, additional_decoder_input=None) -> List[Tuple[List[List[str]], List[float]]]:
vocabulary = self.target_token_encoder.vocabulary
if self.__include_summarizing_network:
initial_state = self.__summarization_layer(initial_state)
initial_state = torch.reshape(initial_state, (self.get_hyperparameter('num_layers'), -1, self.get_hyperparameter('hidden_size'))) # num_layers x B x H
current_decoder_state = initial_state # num_layers x B x H
next_token_sequences = torch.tensor(
[[vocabulary.get_id_or_unk(self.START)]] * memories.shape[0],
device=self.device) # B x 1
predicted_tokens = [] # List[List[str]]
predicted_logprobs = [] # List[float]
for i in range(max_length):
# Embed next_token_sequences
target_token_embeddings, _ = self.target_token_encoder.forward(
as_packed_sequence=False,
token_ids=next_token_sequences,
lengths=None) # B x 1 x D
output_logits, copy_logits, current_decoder_state = self.__get_output_logits(
target_token_embeddings, current_decoder_state, memories, memory_lengths, additional_rnn_input=additional_decoder_input)
logits = torch.cat([output_logits, copy_logits], dim=-1) # B x 1 x V + max-in-len
log_probs = nn.functional.log_softmax(logits, dim=-1)
copy_logprobs = log_probs[:, :, -copy_logits.shape[-1]:]
log_probs = log_probs.cpu().numpy()
copy_logprobs = copy_logprobs.cpu().numpy()
output_logprobs = [] # type: List[Dict[str, float]]
for j in range(logits.shape[0]):
sample_logprobs = defaultdict(lambda :float('-inf'))
for k in range(len(vocabulary)):
sample_logprobs[vocabulary.get_name_for_id(k)] = log_probs[j, 0, k]
output_logprobs.append(sample_logprobs)
for j in range(logits.shape[0]):
for k in range(memory_lengths[j]):
target_word = memories_str_representations[j][k]
output_logprobs[j][target_word] = np.logaddexp(output_logprobs[j][target_word], copy_logprobs[j, 0, k])
predicted_tokens_for_this_step = []
predicted_logprobs_for_this_step = []
for j in range(logits.shape[0]):
best_word, best_logprob = None, float('-inf')
for word, logprob in output_logprobs[j].items():
if best_logprob < logprob:
best_logprob = logprob
best_word = word
assert best_word is not None
predicted_tokens_for_this_step.append(best_word)
predicted_logprobs_for_this_step.append(best_logprob)
predicted_logprobs.append(predicted_logprobs_for_this_step)
predicted_tokens.append(predicted_tokens_for_this_step)
next_token_sequences = torch.tensor([[vocabulary.get_id_or_unk(t)] for t in predicted_tokens_for_this_step],
device=self.device)
return self.convert_ids_to_str(initial_state.shape[0], max_length, predicted_logprobs, predicted_tokens)
def convert_ids_to_str(self, batch_size: int, max_length: int, predicted_logprobs, predicted_tokens):
predictions = [] # type: List[Tuple[List[List[str]], List[float]]]
for i in range(batch_size):
tokens = [predicted_tokens[j][i] for j in range(max_length)]
try:
end_idx = tokens.index(self.END)
except ValueError:
end_idx = max_length
tokens = tokens[:end_idx]
logprob = sum(float(predicted_logprobs[j][i]) for j in range(min(end_idx, max_length)))
predictions.append(([tokens], [logprob]))
return predictions
def beam_decode(self, memories, memory_lengths, initial_state, memories_str_representations: List[List[str]],
max_length: int=150, max_beam_size: int=20, additional_decoder_input=None) -> List[Tuple[List[List[str]], List[float]]]:
vocabulary = self.target_token_encoder.vocabulary
if self.__include_summarizing_network:
initial_state = self.__summarization_layer(initial_state)
initial_state = torch.reshape(initial_state, (self.get_hyperparameter('num_layers'), -1, self.get_hyperparameter('hidden_size'))) # num_layers x B x H
batch_size = memory_lengths.shape[0]
current_decoder_state = initial_state # B*beam_size=1 x H
frontier_tokens = torch.tensor(
[[vocabulary.get_id_or_unk(self.START)]] * batch_size,
device=self.device).unsqueeze(-1) # B x beam_size=1 x 1
sequence_logprobs = torch.zeros(frontier_tokens.shape[:2], dtype=torch.float, device=self.device)
is_done = np.zeros(frontier_tokens.shape[:2], dtype=np.bool)
predicted_tokens_beam = [[list() for _ in range(max_beam_size)] for _ in range(batch_size)] # type: List[List[List[str]]]
for i in range(max_length):
if np.all(is_done):
break
beam_size = frontier_tokens.shape[1]
# Embed frontier_tokens
target_token_embeddings, _ = self.target_token_encoder.forward(
as_packed_sequence=False,
token_ids=frontier_tokens.flatten(0, 1),
lengths=None) # B*beam_size x 1 x D
output_logits, copy_logits, current_decoder_state = self.__get_output_logits(
target_token_embeddings, current_decoder_state,
memories.unsqueeze(1).expand(-1, beam_size, -1, -1).flatten(0,1),
memory_lengths.unsqueeze(1).expand(-1, beam_size).flatten(0,1), additional_rnn_input=additional_decoder_input)
current_decoder_state = current_decoder_state.transpose(0, 1).contiguous() # B*beam_size x num_layers x H
logits = torch.cat([output_logits, copy_logits], dim=-1) # B*beam_size x 1 x V + max-in-len
log_probs = nn.functional.log_softmax(logits, dim=-1)
copy_logprobs = log_probs[:, :, -copy_logits.shape[-1]:]
log_probs = log_probs.cpu().numpy()
copy_logprobs = copy_logprobs.cpu().numpy()
next_sequence_logprobs = torch.zeros((batch_size, max_beam_size), dtype=torch.float, device=self.device)
next_is_done = np.zeros((batch_size, max_beam_size), dtype=np.bool)
next_frontier_tokens = torch.zeros((batch_size, max_beam_size), dtype=torch.int64, device=self.device)
next_decoder_state = torch.zeros((batch_size * max_beam_size, self.get_hyperparameter('num_layers'), current_decoder_state.shape[-1]),
dtype=torch.float, device=self.device)
for batch_idx in range(batch_size):
per_beam_logprobs = []
for beam_idx in range(beam_size):
idx = batch_idx * frontier_tokens.shape[1] + beam_idx
sample_logprobs = defaultdict(lambda: float('-inf'))
for k in np.argsort(-log_probs[idx, 0, :len(vocabulary)])[:200]: # To speed things up use only top words
sample_logprobs[vocabulary.id_to_token[k]] = log_probs[idx, 0, k]
for k in range(memory_lengths[batch_idx]):
target_word = memories_str_representations[batch_idx][k]
if target_word in sample_logprobs:
sample_logprobs[target_word] = np.logaddexp(sample_logprobs[target_word], copy_logprobs[idx, 0, k])
else:
sample_logprobs[target_word] = copy_logprobs[idx, 0, k]
per_beam_logprobs.append(sample_logprobs)
# Pick next beam
def all_elements():
for beam_idx in range(beam_size):
if is_done[batch_idx][beam_idx]:
yield beam_idx, None, True, sequence_logprobs[batch_idx, beam_idx]
else:
for word, word_logprob in per_beam_logprobs[beam_idx].items():
yield beam_idx, word, word == self.END, word_logprob + sequence_logprobs[batch_idx, beam_idx]
top_elements = heapq.nlargest(n=max_beam_size, iterable=all_elements(), key=lambda x:x[-1])
old_beam = predicted_tokens_beam[batch_idx]
new_beam = [list() for _ in range(max_beam_size)]
for i, (beam_idx, word, beam_is_done, seq_logprob) in enumerate(top_elements):
next_frontier_tokens[batch_idx, i] = vocabulary.get_id_or_unk(word)
next_is_done[batch_idx, i] = beam_is_done
next_sequence_logprobs[batch_idx, i] = float(seq_logprob)
if beam_is_done:
new_beam[i] = old_beam[beam_idx]
else:
new_beam[i] = old_beam[beam_idx] + [ word ]
next_decoder_state[batch_idx * max_beam_size + i, :] = current_decoder_state[batch_idx * beam_size + beam_idx]
predicted_tokens_beam[batch_idx] = new_beam
# After we are done for all batches
is_done = next_is_done
sequence_logprobs = next_sequence_logprobs.cpu().numpy()
frontier_tokens = next_frontier_tokens.unsqueeze(-1)
current_decoder_state = next_decoder_state.transpose(0, 1).contiguous()
return [(predicted_tokens_beam[i], sequence_logprobs[i]) for i in range(batch_size)]
|
from typing import Dict, Any, Optional, List, Tuple
import torch
from torch import nn
from mlcomponents.embeddings import SequenceEmbedder
from . import SeqDecoder
from .luongattention import LuongAttention
class GruDecoder(SeqDecoder):
def __init__(self, name: str, token_encoder: SequenceEmbedder,
standard_attention: Optional[LuongAttention]=None,
hyperparameters: Optional[Dict[str, Any]]=None,
pre_trained_gru: Optional[nn.GRU] = None,
include_summarizing_network: bool = True
) -> None:
super(GruDecoder, self).__init__(name, token_encoder, hyperparameters)
self.__output_gru = pre_trained_gru # type: Optional[nn.GRU]
self.__standard_attention = standard_attention # type: Optional[LuongAttention]
self.__dropout_layer = None # type: Optional[nn.Dropout]
self.__cross_entropy_loss = torch.nn.CrossEntropyLoss(reduce=False)
self.__include_summarizing_network = include_summarizing_network
self.__summarization_layer = None
self.reset_metrics()
@property
def gru(self) -> nn.GRU:
return self.__output_gru
def _finalize_component_metadata_and_model(self) -> None:
if self.__output_gru is None:
self.__output_gru = nn.GRU(
input_size=self.target_token_encoder.embedding_size + self.get_hyperparameter('additional_inputs_size'),
hidden_size=self.get_hyperparameter('hidden_size'),
batch_first=True
)
else:
# Make sure that GRU is compatible
assert self.__output_gru.hidden_size == self.get_hyperparameter('hidden_size')
assert self.__output_gru.num_layers == self.get_hyperparameter('num_layers')
assert self.__output_gru.input_size == self.target_token_encoder.embedding_size + self.get_hyperparameter('additional_inputs_size')
assert self.__output_gru.batch_first
self.__dropout_layer = nn.Dropout(p=self.get_hyperparameter('dropout_rate'))
if self.__include_summarizing_network:
self.__summarization_layer = nn.Linear(
self.get_hyperparameter('initial_state_size') + self.get_hyperparameter('additional_initial_state_inputs_size'),
self.get_hyperparameter('hidden_size')
)
else:
assert self.get_hyperparameter('initial_state_size') + self.get_hyperparameter('additional_initial_state_inputs_size') ==\
self.get_hyperparameter('hidden_size'), 'Initial states sizes do not match.'
self.__hidden_to_output = nn.Parameter(torch.randn(self.get_hyperparameter('hidden_size'),
self.target_token_encoder.embedding_size,
dtype=torch.float, requires_grad=True))
@classmethod
def default_hyperparameters(cls) -> Dict[str, Any]:
return {
'dropout_rate': 0.2,
'hidden_size': 64,
'initial_state_size': 64,
'additional_initial_state_inputs_size': 0,
'additional_inputs_size': 0
}
def load_data_from_sample(self, data_to_load: SeqDecoder.InputOutputSequence) -> Optional[Any]:
return self.target_token_encoder.load_data_from_sample(self._add_start_end(data_to_load.output_sequence))
def initialize_minibatch(self) -> Dict[str, Any]:
return self.target_token_encoder.initialize_minibatch()
def extend_minibatch_by_sample(self, datapoint: Any, accumulated_minibatch_data: Dict[str, Any]) -> bool:
return self.target_token_encoder.extend_minibatch_by_sample(
datapoint=datapoint,
accumulated_minibatch_data=accumulated_minibatch_data)
def finalize_minibatch(self, accumulated_minibatch_data: Dict[str, Any]) -> Dict[str, Any]:
return {'output_sequences': self.target_token_encoder.finalize_minibatch(accumulated_minibatch_data)}
def __get_output_logits(self, target_token_embeddings, state, memories, memory_lengths):
"""
:param target_token_embeddings: B x max-out-len x D
:param state: B x D
:param memories: B x max-in-len x H
:param memory_lengths: B
:return: output_logits: B x max-out-len x V, h_out: num-layers x B x H
"""
output_states, h_out = self.__output_gru.forward(target_token_embeddings, state.unsqueeze(0)) # B x max-out-len-1 x H
if self.__standard_attention is not None:
output_states_flat = self.__standard_attention.forward(
memories=memories, memories_length=memory_lengths,
lookup_vectors=output_states.contiguous()) # (B * max-out-len - 1) x H
output_states = output_states_flat.view(output_states.shape)
output_logits = torch.einsum('blh,hd,vd->blv', output_states, self.__hidden_to_output,
self.target_token_encoder.embedding_matrix)
return output_logits, h_out
def forward(self, *, memories=None, memories_lengths=None, initial_state=None, output_sequences: Dict[str, Any]=None):
target_token_embeddings, sequence_lengths = self.target_token_encoder.forward(as_packed_sequence=False,
**output_sequences) # B x max_len x D and B
if initial_state is None:
initial_state = torch.zeros((sequence_lengths.shape[0], self.get_hyperparameter('hidden_size')), device=self.device)
if self.__include_summarizing_network:
initial_state = self.__summarization_layer(initial_state)
target_token_embeddings = target_token_embeddings[:, :-1]
output_logits, _ = self.__get_output_logits(target_token_embeddings, initial_state, memories, memories_lengths)
loss = self.__cross_entropy_loss(input=output_logits.flatten(0,1), target=output_sequences['token_ids'][:, 1:].flatten(0, 1))
mask = torch.arange(target_token_embeddings.shape[1], device=self.device).unsqueeze(0) <= sequence_lengths.unsqueeze(-1) # B x max_len - 1
loss = loss.view(output_logits.shape[0], output_logits.shape[1]) * mask.float()
loss = (loss.sum(dim=-1) / mask.sum(dim=-1).float()).mean()
with torch.no_grad():
self._num_minibatches += 1
self._loss_sum += float(loss)
return loss
def compute_likelihood(self, *, memories, memories_lengths, initial_state, additional_decoder_input, **kwargs):
raise NotImplemented()
def greedy_decode(self, memories, memory_lengths, initial_state, memories_str_representations: List[List[str]], max_length: int=10) -> List[Tuple[List[List[str]], List[float]]]:
vocabulary = self.target_token_encoder.vocabulary
if self.__include_summarizing_network:
initial_state = self.__summarization_layer(initial_state)
current_decoder_state = initial_state # B x H
next_token_sequences = torch.tensor(
[[vocabulary.get_id_or_unk(self.START)]] * initial_state.shape[0],
device=self.device) # B x 1
predicted_tokens = [] # List[torch.Tensor]
predicted_logprobs = [] # List[float]
for i in range(max_length):
# Embed next_token_sequences
target_token_embeddings, _ = self.target_token_encoder.forward(
as_packed_sequence=False,
token_ids=next_token_sequences,
lengths=None) # B x 1 x D
output_logits, current_decoder_state = self.__get_output_logits(target_token_embeddings, current_decoder_state, memories, memory_lengths)
current_decoder_state = current_decoder_state.squeeze(0)
output = nn.functional.log_softmax(output_logits, dim=-1)
greedy_word_logprobs, next_tokens = output.max(dim=-1)
predicted_logprobs.append(greedy_word_logprobs.squeeze(1))
predicted_tokens.append(next_tokens.squeeze(1))
next_token_sequences = next_tokens
# Now convert sequences back to str
predictions = [] # type: List[Tuple[List[str], float]]
for i in range(initial_state.shape[0]):
tokens = [vocabulary.get_name_for_id(int(predicted_tokens[j][i])) for j in range(max_length)]
try:
end_idx = tokens.index(self.END)
except ValueError:
end_idx = max_length
tokens = tokens[:end_idx]
logprob = sum(float(predicted_logprobs[j][i].cpu()) for j in range(min(end_idx, max_length)))
predictions.append(([tokens], [logprob]))
return predictions
def beam_decode(self, memories, memory_lengths, initial_state, memories_str_representations: List[List[str]],
max_length: int = 40, max_beam_size: int = 5) -> List[Tuple[List[List[str]], List[float]]]:
raise NotImplementedError
|
from collections import Counter
from difflib import SequenceMatcher
from typing import List, NamedTuple, Set, Tuple, Dict
class EditEvaluator:
"""Evaluate a (code) editing model."""
def __init__(self):
self.__num_samples = 0 # type: int
self.__sum_exact_matches = 0 # type: int
# Does the model edit the correct span of the original (the concrete edit doesn't matter)
self.__sum_prediction_similarity_to_target = 0 # type: float
self.__span_precision_sum = 0.0 # type: float
self.__span_recall_sum = 0.0 # type: float
self.__span_f1_sum = 0.0 # type: float
# Jaccard
self.__sum_jaccard_similarity_of_tokens = 0. # type: float
self.__sum_jaccard_similarity_of_added_tokens = 0. # type: float
self.__sum_jaccard_similarity_of_deleted_tokens = 0. # type: float
EditInformation = NamedTuple('EditInformation', [
('added_tokens', Counter),
('deleted_tokens', Counter),
('edits', List[Tuple[int, int, Tuple]]),
('sequence_matcher', SequenceMatcher)
])
def __get_edit_information(self, before, after) -> 'EditEvaluator.EditInformation':
seq_matcher = SequenceMatcher(None, before, after)
added_tokens, deleted_tokens = Counter(), Counter()
edited_spans_on_before = [] # type: List[Tuple[int, int, Tuple]]
for tag, i1, i2, j1, j2 in seq_matcher.get_opcodes():
if tag == 'equal':
continue
deleted_tokens.update(before[i1:i2])
added_tokens.update(after[j1:j2])
edited_spans_on_before.append((i1, i2, tuple(after[j1:j2])))
return self.EditInformation(added_tokens, deleted_tokens, edited_spans_on_before, seq_matcher)
def add_sample(self, original: List[str], gold_edited: List[str], predicted_edited: List[str]) -> None:
"""
:param original: The original input that is being edited
:param gold_edit: The gold edited version of `original`
:param predicted_edits: The predicted edited version of the original
"""
self.__num_samples += 1
if gold_edited == predicted_edited:
self.__sum_exact_matches += 1
gold_edit_info = self.__get_edit_information(original, gold_edited)
predicted_edit_info = self.__get_edit_information(original, predicted_edited)
num_added_by_both_tokens = sum((gold_edit_info.added_tokens & predicted_edit_info.added_tokens).values())
num_deleted_by_both_tokens = sum((gold_edit_info.deleted_tokens & predicted_edit_info.deleted_tokens).values())
num_added_by_either_tokens = sum((gold_edit_info.added_tokens | predicted_edit_info.added_tokens).values())
num_deleted_by_either_tokens = sum((gold_edit_info.deleted_tokens | predicted_edit_info.deleted_tokens).values())
sample_jaccard = (num_added_by_both_tokens + num_deleted_by_both_tokens) / (num_added_by_either_tokens + num_deleted_by_either_tokens)
self.__sum_jaccard_similarity_of_tokens += sample_jaccard
if num_added_by_either_tokens > 0:
self.__sum_jaccard_similarity_of_added_tokens += num_added_by_both_tokens / num_added_by_either_tokens
else:
self.__sum_jaccard_similarity_of_added_tokens += 1
if num_deleted_by_either_tokens > 0:
self.__sum_jaccard_similarity_of_deleted_tokens += num_deleted_by_both_tokens / num_deleted_by_either_tokens
else:
self.__sum_jaccard_similarity_of_deleted_tokens += 1
# This counts how more similar the predicted is to the golden edited compared to the original, implicitly normalizing
# for the similarity of the original->gold_edited
# < 1 means that the prediction is worse than doing no edits
# > 1 means that the prediction is better than doing a random edit.
original_to_gold_ratio = gold_edit_info.sequence_matcher.ratio()
predicted_to_gold_ratio = SequenceMatcher(None, predicted_edited, gold_edited).ratio()
if original_to_gold_ratio > 0:
self.__sum_prediction_similarity_to_target += predicted_to_gold_ratio / original_to_gold_ratio
elif predicted_to_gold_ratio > 0: # This should be very rare. The whole original->gold changed completely.
self.__sum_prediction_similarity_to_target += 1
else:
self.__sum_prediction_similarity_to_target += 0
# How accurate are the edit locations?
# We count the precision/recall in term of tokens in the original that are edited. Insertions (which do not
# edit a range) are are considered to have a span of 1 in the original input.
def find_overlap_on_original(start_pos: int, end_pos: int, edits: List[Tuple[int, int, Tuple]]) -> Tuple[int, int]:
assert start_pos <= end_pos
correctly_changed = 0
if start_pos == end_pos:
for s, e, _ in edits:
if s <= start_pos <= e:
# Insertions at the correct positions count as correct
return 1, 1
return 0, 1 # An insertion in the wrong place.
for s, e, _ in edits:
if s <= start_pos <= e or s <= end_pos <= e or (start_pos <= s and e <= end_pos):
if s == e: # An insertion in [s,e) which overlaps with [start_pos, end_pos)
correctly_changed += 1
else:
correctly_changed += min(e, end_pos) - max(s, start_pos)
return correctly_changed, end_pos - start_pos
def compute_coverage(edits: List[Tuple[int, int, Tuple]], target_edits: List[Tuple[int, int, Tuple]]):
sum_correct_changed, sum_total_changed = 0, 0
for predicted_edit_start, predicted_edit_end, _ in edits:
cor, total = find_overlap_on_original(predicted_edit_start, predicted_edit_end, target_edits)
sum_correct_changed += cor
sum_total_changed += total
if sum_correct_changed > 0:
return sum_correct_changed / sum_total_changed
else:
return 0
precision = compute_coverage(predicted_edit_info.edits, gold_edit_info.edits)
recall = compute_coverage(gold_edit_info.edits, predicted_edit_info.edits)
self.__span_precision_sum += precision
self.__span_recall_sum += recall
if precision + recall > 0:
self.__span_f1_sum += 2 * (precision * recall) / (precision + recall)
else:
self.__span_f1_sum += 0
def evaluation_statistics(self) -> Dict[str, float]:
return {
'Exact Match': self.__sum_exact_matches / self.__num_samples,
'Jaccard Similarity of Edits': self.__sum_jaccard_similarity_of_tokens / self.__num_samples,
'Jaccard Similarity of Edits - Addition:': self.__sum_jaccard_similarity_of_added_tokens / self.__num_samples,
'Jaccard Similarity of Edit - Deletion': self.__sum_jaccard_similarity_of_deleted_tokens / self.__num_samples,
'Normalized Prediction Similarity to Target': self.__sum_prediction_similarity_to_target / self.__num_samples,
'Span Precision': self.__span_precision_sum / self.__num_samples,
'Span Recall': self.__span_recall_sum / self.__num_samples,
'Span F1': self.__span_f1_sum / self.__num_samples,
}
if __name__ == '__main__':
evaluator = EditEvaluator()
evaluator.add_sample(['a', 'b', 'd', 'e'], ['a', 'x', 'd', 'x', 'e'], ['a', 'b', 'd', 'x', 'e'])
print(evaluator.evaluation_statistics()) |
from typing import Iterable, Callable
from dpu_utils.utils import RichPath
from data import fcedataloader as fcedataloader, codadataloader as codedataloader, \
wikieditsloader as wikiatomiceditsloader, paraphraseloader
from data.edits import Edit
from data.jsonldata import parse_jsonl_edit_data, parse_monolingual_edit_data, parse_monolingual_synthetic_edit_data
from data.datautils import LazyDataIterable
from data.m2loader import parse_m2_folder
def load_data_by_type(path: RichPath, data_type: str, cleanup: bool=False, as_list: bool=True) -> Iterable[Edit]:
def pkg(x: Callable):
if as_list:
return list(x())
else:
return LazyDataIterable(x)
if data_type == 'fce':
return pkg(lambda: fcedataloader.load_data_from(path))
elif data_type == 'code':
return pkg(lambda: codedataloader.load_data_from(path))
elif data_type == 'codecontext':
# Returns List[EditContext] (includes one extra field)
return pkg(lambda: codedataloader.load_data_with_context_from(path))
elif data_type == 'fixer':
return pkg(lambda: codedataloader.load_fixer_data(path))
elif data_type == 'wikiatomicedits':
return pkg(lambda: wikiatomiceditsloader.load_data_from(path, 4000000, remove_identical=cleanup))
elif data_type == 'wikiedits':
return pkg(lambda: wikiatomiceditsloader.load_data_from(path, has_phrase=False, remove_identical=cleanup))
elif data_type == 'paraphrase':
return pkg(lambda: paraphraseloader.load_data_from(path, remove_identical=cleanup))
elif data_type == 'jsonl':
return pkg(lambda: parse_jsonl_edit_data(path))
elif data_type == 'm2':
return pkg(lambda: parse_m2_folder(path))
elif data_type == 'monolingual':
return pkg(lambda : parse_monolingual_edit_data(path))
elif data_type == 'monolingual-synth-edits':
return pkg(lambda : parse_monolingual_synthetic_edit_data(path))
else:
raise ValueError('Unrecognized data type %s' % data_type)
|
from typing import Iterator
from dpu_utils.utils import RichPath
from data.edits import Edit, EditContext, CONTEXT_SEPERATOR
def load_data_from(file: RichPath) -> Iterator[Edit]:
data = file.read_by_file_suffix()
for line in data:
yield Edit(
input_sequence=line['PrevCodeChunkTokens'],
output_sequence=line['UpdatedCodeChunkTokens'],
provenance=line['Id'],
edit_type=''
)
def load_fixer_data(file: RichPath) -> Iterator[Edit]:
for line in file.read_by_file_suffix():
yield Edit(
input_sequence= line['PrevCodeChunkTokens'],
output_sequence= line['UpdatedCodeChunkTokens'],
provenance=line['Id'],
edit_type=line['Id'].split('_')[0]
)
def load_data_with_context_from(file: RichPath) -> Iterator[EditContext]:
data = file.read_by_file_suffix()
for line in data:
try:
if 'PrecedingContextTokens' in line:
preceding_context_tokens = line['PrecedingContextTokens']
succeeding_context_tokens = line['SucceedingContextTokens']
else:
preceding_context_tokens = line['PrecedingContext']
succeeding_context_tokens = line['SucceedingContext']
yield EditContext(
input_sequence=line['PrevCodeChunkTokens'],
output_sequence=line['UpdatedCodeChunkTokens'],
context_sequence=preceding_context_tokens + [CONTEXT_SEPERATOR] + succeeding_context_tokens,
provenance=line['Id'],
edit_type=''
)
except:
pass
|
import logging
from typing import Iterator, List, Tuple, NamedTuple
from dpu_utils.utils import RichPath
from data.edits import Edit
def load_data_from(file: RichPath) -> Iterator[Edit]:
num_excluded_samples = 0
with open(file.to_local_path().path) as f:
for i, row in enumerate(f):
edit_start_idx, edit_end_idx, source_words, target_words, error_type, sentence = row.split('\t')
edit_start_idx, edit_end_idx = int(edit_start_idx), int(edit_end_idx)
sentence = sentence.lower().split()
source_words = source_words.lower().split()
target_words = target_words.lower().split()
assert sentence[edit_start_idx:edit_end_idx] == source_words
output_sequence = sentence[:edit_start_idx] + target_words + sentence[edit_end_idx:]
if sentence == output_sequence:
num_excluded_samples += 1
continue
if len(sentence) < 2 or len(output_sequence) < 2:
num_excluded_samples += 1
continue
yield Edit(
input_sequence=sentence,
output_sequence=output_sequence,
edit_type=error_type,
provenance=f'row{i}'
)
logging.warning('Removed %s samples because before/after sentence was identical or too small.', num_excluded_samples) |
#!/usr/bin/env python
"""
Usage:
monolingualprocess.py bert-tokenize [options] INPUT_DATA OUTPUT_DATA_PATH
monolingualprocess.py bert-tokenize multiple [options] INPUT_DATA_LIST OUTPUT_DATA_PATH
Options:
--azure-info=<path> Azure authentication information file (JSON). Used to load data from Azure storage.
--vocab-size=<num> The vocabulary size. [default: 25000]
--pct-bpe=<pct> Percent of the vocabulary size to be BPE. [default: 0.1]
--bert-model=<model> Pretrained BERT model to use from pytorch-transformers. [default: bert-base-cased]
-h --help Show this screen.
--debug Enable debug routines. [default: False]
"""
import logging
from itertools import chain
from tqdm import tqdm
from docopt import docopt
from dpu_utils.utils import run_and_debug, RichPath
def load_data(path: str):
with open(path) as f:
for line in f:
yield line.strip()
def run(arguments):
if arguments['multiple']:
all_data = []
for input_file in arguments['INPUT_DATA_LIST'].split(','):
all_data.append(load_data(input_file))
data = chain(*all_data)
else:
in_data_path = arguments['INPUT_DATA']
data = load_data(in_data_path)
out_data_path = RichPath.create(arguments['OUTPUT_DATA_PATH'])
from pytorch_transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained(arguments['--bert-model'])
logging.info('Converting data...')
def bpe_convertor():
for line in tqdm(data):
tokens = tokenizer.tokenize(line)
if len(tokens) < 4:
continue
yield tokens
out_data_path.save_as_compressed_file(bpe_convertor())
if __name__ == '__main__':
args = docopt(__doc__)
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s')
run_and_debug(lambda: run(args), args.get('--debug', False))
|
import gzip
import logging
from typing import Optional, Iterator, List
from dpu_utils.utils import RichPath
from data.edits import Edit
def clean_up_sentence(tokens: List[str]) -> List[str]:
# Remove empty spaces
return [t.strip() for t in tokens if len(t.strip()) > 0]
def load_data_from(file: RichPath, max_size_to_load: Optional[int]=None, has_phrase: bool=True, remove_identical: bool=True) -> Iterator[Edit]:
num_removed = 0
with gzip.open(file.to_local_path().path, 'rt') as f:
for i, row in enumerate(f):
if max_size_to_load is not None and i >= max_size_to_load: break
if has_phrase:
original_sentence, phrase, target_sentence = row.split('\t')
else:
original_sentence, target_sentence = row.split('\t')[:2]
input_sentence = original_sentence.strip().lower().split()
output_sentence =target_sentence.strip().lower().split()
input_sentence = clean_up_sentence(input_sentence)
output_sentence = clean_up_sentence(output_sentence)
if remove_identical and input_sentence == output_sentence:
num_removed += 1
continue
yield Edit(
input_sequence=input_sentence,
output_sequence=output_sentence,
edit_type='',
provenance=str(i)
)
if num_removed > 0:
logging.info('Removed %s samples because they differed only in whitespace.', num_removed)
|
import json
import os
from typing import List, Callable, TypeVar, Generic, Optional
import numpy as np
from annoy import AnnoyIndex
from sklearn.manifold import TSNE
from data.representationviz import RepresentationsVisualizer
T = TypeVar('T')
class NLRepresentationsVisualizer(RepresentationsVisualizer):
def __init__(self, labeler: Callable[[T], str]=None, colorer: Callable[[T], str]=None, distance_metric: str='euclidean'):
super(NLRepresentationsVisualizer, self).__init__(labeler, colorer, distance_metric)
def nearest_neighbors_to_html(self, datapoints: List[T], representations: np.ndarray,
datapoint_to_html: Callable[[T], str], outfile: str,
num_neighbors: int=2, num_items: Optional[int]=None,
num_items_to_show: int=10000):
import pystache
assert len(datapoints) == representations.shape[0], 'Number of datapoints and representations do not match.'
nns_viz_data = []
for i, nns, distances in self.compute_nearest_neighbors(datapoints, representations, num_neighbors, num_items):
nns_viz_data.append({
'num': i,
'diff': datapoint_to_html(datapoints[i]),
'nns': [dict(diff=datapoint_to_html(datapoints[nn]),dist=dist,
nl=' '.join(datapoints[nn].nl_sequence),
link=datapoints[nn].provenance) for nn, dist in zip(nns, distances)],
'nl': ' '.join(datapoints[i].nl_sequence),
'link': datapoints[i].provenance
})
if i > num_items_to_show:
break
with open(os.path.join(os.path.dirname(__file__), 'nldiffviz.mustache')) as f:
diff_template = pystache.parse(f.read())
renderer = pystache.Renderer()
html = renderer.render(diff_template, dict(samples=nns_viz_data))
print('Writing output at %s' % outfile)
with open(outfile, 'w', encoding='utf-8') as f:
f.write(html) |
import random
from typing import Iterator, TypeVar, Iterable, Callable
class LazyDataIterable(Iterable):
def __init__(self, base_iterable_func: Callable[[], Iterator]):
self.__base_iterable_func = base_iterable_func
def __iter__(self):
return self.__base_iterable_func()
|
"""
Code from https://github.com/kilink/ghdiff
"""
import difflib
import six
import html
def escape(text):
return html.escape(text)
def diff(a, b, n=4):
if isinstance(a, six.string_types):
a = a.splitlines()
if isinstance(b, six.string_types):
b = b.splitlines()
return colorize(list(difflib.unified_diff(a, b, n=n)))
def colorize(diff):
return "\n".join(_colorize(diff))
def _colorize(diff):
if isinstance(diff, six.string_types):
lines = diff.splitlines()
else:
lines = diff
lines.reverse()
while lines and not lines[-1].startswith("@@"):
lines.pop()
if len(lines) > 0: lines.pop() # Remove top of hunk. Lines not meaningful for us.
yield '<div class="diff">'
while lines:
line = lines.pop()
klass = ""
if line.startswith("@@"):
klass = "control"
elif line.startswith("-"):
klass = "delete"
if lines:
_next = []
while lines and len(_next) < 2:
_next.append(lines.pop())
if _next[0].startswith("+") and (
len(_next) == 1 or _next[1][0] not in ("+", "-")):
aline, bline = _line_diff(line[1:], _next.pop(0)[1:])
yield '<div class="delete">-%s</div>' % (aline,)
yield '<div class="insert">+%s</div>' % (bline,)
if _next:
lines.append(_next.pop())
continue
lines.extend(reversed(_next))
elif line.startswith("+"):
klass = "insert"
yield '<div class="%s">%s</div>' % (klass, escape(line),)
yield "</div>"
def _line_diff(a, b):
aline = []
bline = []
for tag, i1, i2, j1, j2 in difflib.SequenceMatcher(a=a, b=b).get_opcodes():
if tag == 'equal':
aline.append(escape(a[i1:i2]))
bline.append(escape(b[j1:j2]))
continue
aline.append('<span class="highlight">%s</span>' % (escape(a[i1:i2]),))
bline.append('<span class="highlight">%s</span>' % (escape(b[j1:j2]),))
return "".join(aline), "".join(bline)
|
from typing import Iterator, Dict, Union, List
from collections import Counter
import numpy as np
from dpu_utils.utils import RichPath
from data.edits import Edit, NLEdit
def parse_jsonl_edit_data(path: RichPath) -> Iterator[Edit]:
for line in path.read_as_jsonl():
yield Edit(
input_sequence=line['input_sequence'],
output_sequence=line['output_sequence'],
provenance=line.get('provenance', ''),
edit_type=line.get('edit_type', '')
)
def parse_monolingual_edit_data(path: RichPath) -> Iterator[Edit]:
for i, line in enumerate(path.read_as_jsonl()):
yield Edit(
input_sequence=None,
output_sequence=line,
provenance=f'L{i}',
edit_type=''
)
def make_random_edit(text: List[str], cache: List[str]) -> List[str]:
rnd_num = np.random.rand()
if rnd_num < 0.2:
# no edit (20%)
return text
elif rnd_num < 0.4:
# Delete random element (20%)
num_deletions = np.random.random_integers(1, 2)
deleted_text = list(text)
for _ in range(num_deletions):
rnd_pos = np.random.randint(len(deleted_text))
deletion_size = np.random.randint(1, 4-num_deletions)
deleted_text = deleted_text[:rnd_pos] + deleted_text[rnd_pos+deletion_size:]
return deleted_text
elif rnd_num < 0.6:
# Swap two consecutive words (20%)
swapped_text = list(text)
rnd_pos = np.random.randint(len(swapped_text)-1)
swapped_text[rnd_pos], swapped_text[rnd_pos+1] = swapped_text[rnd_pos], swapped_text[rnd_pos+1]
return swapped_text
elif rnd_num < 0.8:
# Swap with a word in the cache (20%)
swapped_text = list(text)
rnd_pos = np.random.randint(len(swapped_text))
num_swaps = np.random.randint(1, 3)
for _ in range(num_swaps):
rnd_pos = np.random.randint(len(swapped_text))
swapped_text[rnd_pos] = cache[np.random.randint(len(cache))]
return swapped_text
else:
# Add random word (20%)
rnd_pos = np.random.randint(0, len(text))
return text[:rnd_pos] + [cache[np.random.randint(len(cache))]] + text[rnd_pos:]
def parse_monolingual_synthetic_edit_data(path: RichPath) -> Iterator[Edit]:
word_cache = []
for i, line in enumerate(path.read_as_jsonl()):
if len(line) < 3:
continue
word_cache.extend(line)
yield Edit(
input_sequence=make_random_edit(line, word_cache),
output_sequence=line,
provenance=f'L{i}',
edit_type=''
)
if np.random.rand() < .01:
# Clean pseudo-cache
np.random.shuffle(word_cache)
word_cache = word_cache[:2000]
def save_jsonl_edit_data(data: Iterator[Union[Edit, Dict]], path: RichPath) -> None:
def to_dict():
for edit in data:
if isinstance(edit, Edit):
yield edit._asdict()
elif isinstance(edit, dict):
yield edit
else:
raise ValueError('Unrecognized input data')
path.save_as_compressed_file(to_dict())
|
import difflib
from enum import Enum
from typing import NamedTuple, TypeVar, Optional, List, Dict
import enum
Edit = NamedTuple('Edit', [
('input_sequence', List[str]),
('output_sequence', List[str]),
('provenance', str),
('edit_type', List[str])
])
NLEdit = NamedTuple('NLEdit', [
('input_sequence', List[str]),
('output_sequence', List[str]),
('nl_sequence', List[str]),
('provenance', str),
('edit_type', List[str])
])
EditContext = NamedTuple('EditContext', [
('input_sequence', List[str]),
('output_sequence', List[str]),
('context_sequence', List[str]),
('provenance', str),
('edit_type', List[str])
])
# TODO: Make sure this doesn't get cut by BPE
CONTEXT_SEPERATOR = '%CONTEXT_SEP%'
@enum.unique
class ChangeType(Enum):
EQUAL = 0
INSERT = 1
REPLACE = 2
DELETE = 3
T = TypeVar('T')
AlignedDiffRepresentation = NamedTuple('AlignedDiffRepresentation', [
('change_type', List[ChangeType]),
('before_tokens', List[Optional[T]]),
('after_tokens', List[Optional[T]])
])
def sequence_diff(before: List[T], after: List[T]) -> AlignedDiffRepresentation:
"""
Return a linearized sequence diff as explained in Yin et al. 2019
"""
matcher = difflib.SequenceMatcher()
before_tokens = [] # type: List[Optional[T]]
after_tokens = [] # type: List[Optional[T]]
change_types = [] # type: List[ChangeType]
matcher.set_seqs(before, after)
for tag, i1, i2, j1, j2 in matcher.get_opcodes():
if tag == 'equal':
change_types.extend([ChangeType.EQUAL] * (i2 - i1))
before_tokens.extend(before[i1:i2])
after_tokens.extend(after[j1:j2])
elif tag == 'delete':
change_types.extend([ChangeType.DELETE] * (i2 - i1))
before_tokens.extend(before[i1:i2])
after_tokens.extend([None] * (i2 - i1))
elif tag == 'insert':
change_types.extend([ChangeType.INSERT] * (j2 - j1))
before_tokens.extend([None] * (j2 - j1))
after_tokens.extend(after[j1:j2])
elif tag == 'replace':
largest_span_size = max(i2-i1, j2-j1)
change_types.extend([ChangeType.REPLACE] * largest_span_size)
before_tokens.extend(before[i1:i2] + [None] * (largest_span_size - (i2-i1)))
after_tokens.extend(after[j1:j2] + [None] * (largest_span_size - (j2 - j1)))
else:
raise Exception('Unrecognized opcode %s' % tag)
assert len(change_types) == len(before_tokens) == len(after_tokens)
return AlignedDiffRepresentation(change_types, before_tokens, after_tokens)
|
import logging
from typing import Optional, Iterator, List
from dpu_utils.utils import RichPath
from data.edits import Edit
def clean_up_sentence(tokens: List[str]) -> List[str]:
# Remove empty spaces
return [t.strip() for t in tokens if len(t.strip()) > 0]
def load_data_from(file: RichPath, max_size_to_load: Optional[int]=None, remove_identical: bool=True) -> Iterator[Edit]:
num_removed = 0
with open(file.to_local_path().path, 'r') as f:
for i, row in enumerate(f):
if i == 0: continue # ignore header
if max_size_to_load is not None and i >= max_size_to_load: break
is_para, orig_id, target_id, original_sentence, target_sentence = row.split('\t')
input_sentence = original_sentence.strip().lower().split()
output_sentence = target_sentence.strip().lower().split()
input_sentence = clean_up_sentence(input_sentence)
output_sentence = clean_up_sentence(output_sentence)
if remove_identical and input_sentence == output_sentence:
num_removed += 1
continue
yield Edit(
input_sequence=input_sentence,
output_sequence=output_sentence,
edit_type=f'{is_para}',
provenance=f'{i}:{orig_id}->{target_id}'
)
if num_removed > 0:
logging.info('Removed %s samples because they differed only in whitespace.', num_removed)
|
#!/usr/bin/env python
"""
Usage:
convertcnndmgraphs.py INPUTS_JSONL SUMMARIES_JSONL OUTPUT_DATA_PATH
Options:
--azure-info=<path> Azure authentication information file (JSON). Used to load data from Azure storage.
-h --help Show this screen.
--debug Enable debug routines. [default: False]
"""
from typing import Iterator, Dict, Union
from docopt import docopt
from dpu_utils.utils import run_and_debug, RichPath
from data.edits import Edit, NLEdit
def parse_jsonl_edit_data(inputs_path: RichPath, target_path: RichPath) -> Iterator[Edit]:
for i, (input_graph, target_sequence) in enumerate(zip(inputs_path.read_as_jsonl(), target_path.read_as_jsonl())):
input_sequence = [input_graph['node_labels'][idx] for idx in input_graph['backbone_sequence']]
yield Edit(
input_sequence=input_sequence,
output_sequence=target_sequence,
provenance='L'+str(i),
edit_type=''
)
def run(arguments):
azure_info_path = arguments.get('--azure-info', None)
inputs_path = RichPath.create(arguments['INPUTS_JSONL'], azure_info_path)
summaries_path = RichPath.create(arguments['SUMMARIES_JSONL'], azure_info_path)
out_data_path = RichPath.create(arguments['OUTPUT_DATA_PATH'], azure_info_path)
data = parse_jsonl_edit_data(inputs_path, summaries_path)
def to_dict():
for edit in data:
yield edit._asdict()
out_data_path.save_as_compressed_file(to_dict())
if __name__ == '__main__':
args = docopt(__doc__)
run_and_debug(lambda: run(args), args.get('--debug', False))
|
from typing import List
import numpy as np
def get_copyable_spans(input: List[str], output: List[str]) -> np.ndarray:
"""
Return a 3D tensor copy_mask[k, i, j] that for a given location k shows the all the possible
spans that can be copied.
All valid start locations can be obtained at point k by diag(copy_masks[k])
All the possible end positions at position k, given a starting span i are given by copy_mask[k, i]
"""
copy_masks = np.zeros((len(output), len(input), len(input)), dtype=np.bool) # out-len x start_pos x end_pos
for k in range(len(output)-1, -1, -1):
for i in range(len(input)):
if input[i] == output[k]:
if k + 1 < len(output) and i + 1 < len(input):
# Everything valid end point at k+1 is also valid here
copy_masks[k, i] = copy_masks[k+1, i+1]
copy_masks[k, i, i] = True
return copy_masks
|
#!/usr/bin/env python
"""
Usage:
paralleltoedit.py BEFORE AFTER OUTPUT_DATA_PATH
Options:
--azure-info=<path> Azure authentication information file (JSON). Used to load data from Azure storage.
-h --help Show this screen.
--debug Enable debug routines. [default: False]
"""
from typing import Iterator, Dict, Union
from docopt import docopt
from dpu_utils.utils import run_and_debug, RichPath
from data.edits import Edit, NLEdit
def parse_jsonl_edit_data(inputs_path: RichPath, target_path: RichPath) -> Iterator[Edit]:
for i, (input_sequence, target_sequence) in enumerate(zip(inputs_path.read_as_text().splitlines(), target_path.read_as_text().splitlines())):
yield Edit(
input_sequence=input_sequence.strip().split(),
output_sequence=target_sequence.strip().split(),
provenance='L'+str(i),
edit_type=''
)
def run(arguments):
azure_info_path = arguments.get('--azure-info', None)
inputs_path = RichPath.create(arguments['BEFORE'], azure_info_path)
summaries_path = RichPath.create(arguments['AFTER'], azure_info_path)
out_data_path = RichPath.create(arguments['OUTPUT_DATA_PATH'], azure_info_path)
data = parse_jsonl_edit_data(inputs_path, summaries_path)
def to_dict():
for edit in data:
yield edit._asdict()
out_data_path.save_as_compressed_file(to_dict())
if __name__ == '__main__':
args = docopt(__doc__)
run_and_debug(lambda: run(args), args.get('--debug', False))
|
import json
import os
from typing import List, Callable, TypeVar, Generic, Optional
import numpy as np
from annoy import AnnoyIndex
from sklearn.manifold import TSNE
T = TypeVar('T')
class RepresentationsVisualizer(Generic[T]):
def __init__(self, labeler: Callable[[T], str], colorer: Callable[[T], str]=None, distance_metric: str='euclidean'):
self.__labeler = labeler
self.__colorer = colorer
self.__distance_metric = distance_metric
def print_nearest_neighbors(self, datapoints: List[T], representations: np.ndarray, num_neighbors: int=2,
num_items: Optional[int]=None, datapoint_to_string: Optional[Callable[[T], str]]=None):
assert len(datapoints) == representations.shape[0], 'Number of datapoints and representations do not match.'
for i, nns, distances in self.compute_nearest_neighbors(datapoints, representations, num_neighbors, num_items):
print('-------------------------------------------------------')
print(f'Target: {datapoints[i] if datapoint_to_string is None else datapoint_to_string(datapoints[i])}')
for j, (nn, dist) in enumerate(zip(nns, distances)):
print(f'Neighbor {j+1} (distance={dist:.2f}) {datapoints[nn] if datapoint_to_string is None else datapoint_to_string(datapoints[nn])}')
def nearest_neighbors_to_html(self, datapoints: List[T], representations: np.ndarray,
datapoint_to_html: Callable[[T], str], outfile: str,
num_neighbors: int=2, num_items: Optional[int]=None,
num_items_to_show: int=10000):
import pystache
assert len(datapoints) == representations.shape[0], 'Number of datapoints and representations do not match.'
nns_viz_data = []
for i, nns, distances in self.compute_nearest_neighbors(datapoints, representations, num_neighbors, num_items):
nns_viz_data.append({
'num': i,
'diff': datapoint_to_html(datapoints[i]),
'nns': [dict(diff=datapoint_to_html(datapoints[nn]),dist='%.3f' % dist) for nn, dist in zip(nns, distances)]
})
if i > num_items_to_show:
break
with open(os.path.join(os.path.dirname(__file__), 'diffviz.mustache')) as f:
diff_template = pystache.parse(f.read())
renderer = pystache.Renderer()
html = renderer.render(diff_template, dict(samples=nns_viz_data))
print('Writing output at %s' % outfile)
with open(outfile, 'w', encoding='utf-8') as f:
f.write(html)
@staticmethod
def square_to_condensed(i, j, n):
assert i != j, "no diagonal elements in condensed matrix"
if i < j:
i, j = j, i
return int(n * j - j * (j + 1) / 2 + i - 1 - j)
def compute_nearest_neighbors(self, datapoints, representations, num_neighbors: int, num_items: Optional[int]=None, distance_threshold: float=.6):
index = AnnoyIndex(representations.shape[1], metric='angular')
for i in range(len(datapoints)):
index.add_item(i, representations[i])
index.build(50) # TODO: Fine tune this hyper
print('Nearest neighbor index built.')
num_items_shown = 0
for i, data in enumerate(datapoints):
if num_items is not None and i > num_items:
break
nns, distances = index.get_nns_by_item(i, num_neighbors+1, include_distances=True)
if nns[0] == i:
distance_of_first = distances[1]
else:
distance_of_first = distances[0]
if distance_of_first > distance_threshold:
continue
num_items_shown += 1
if nns[0] == i:
yield i, nns[1:], distances[1:]
else:
yield i, nns, distances
def save_tsne_as_json(self, datapoints, representations, save_file: str):
emb_2d = TSNE(n_components=2, verbose=1, metric=self.__distance_metric).fit_transform(representations)
out_dict = []
for i in range(len(datapoints)):
out_dict.append(
{
'xy': [float(emb_2d[i][0]), float(emb_2d[i][1])],
'before': datapoints[i]['input_sequence'],
'after': datapoints[i]['output_sequence'],
'label': self.__labeler(datapoints[i]),
'color': self.__colorer(datapoints[i]) if self.__colorer is not None else ''
}
)
with open(save_file, 'w') as f:
json.dump(out_dict, f)
def plot_tsne(self, datapoints, representations, save_file: Optional[str]=None):
emb_2d = TSNE(n_components=2, verbose=1, metric=self.__distance_metric).fit_transform(representations)
import matplotlib.pyplot as plt
plt.figure(figsize=(20,20))
plt.scatter(emb_2d[:, 0], emb_2d[:, 1])
for i in range(len(datapoints)):
plt.annotate(s=self.__labeler(datapoints[i]), xy=(emb_2d[i, 0], emb_2d[i, 1]))
if save_file is None:
plt.show()
else:
plt.savefig(save_file)
|
from typing import Iterator, List, Tuple
from dpu_utils.utils import RichPath
from data.edits import Edit
def apply_edits(original_sentence: List[str], edits: List[Tuple[int, int, List[str]]]) -> List[str]:
edited_sentence = []
last_edit_idx = 0
for from_idx, to_idx, edit in edits:
edited_sentence += original_sentence[last_edit_idx:from_idx] + edit
last_edit_idx = to_idx
edited_sentence += original_sentence[last_edit_idx:]
return edited_sentence
def parse_m2_file(m2_file: RichPath) -> Iterator[Edit]:
original_sentence = None
edits = []
provenance = None
annotator_id = None
for i, line in enumerate(m2_file.read_as_text().splitlines()):
line = line.strip()
if len(line) == 0:
continue
if line.startswith('S'):
if original_sentence is not None and len(edits) > 0:
edited_sentence = apply_edits(original_sentence, edits)
yield Edit(input_sequence=original_sentence, output_sequence=edited_sentence,
provenance=provenance, edit_type='')
original_sentence = line.split(' ')[1:] # Remove " S"
edits = []
provenance = m2_file.path + 'L' + str(i)
annotator_id = None
elif line.startswith('A '):
range, edit_type, replacement, _, _, next_annotator_id = line[2:].split('|||')
if edit_type == 'noop':
yield Edit(input_sequence=original_sentence, output_sequence=original_sentence,
provenance=provenance, edit_type='')
continue
if annotator_id != next_annotator_id and annotator_id is not None:
edited_sentence = apply_edits(original_sentence, edits)
yield Edit(input_sequence=original_sentence, output_sequence=edited_sentence,
provenance=provenance, edit_type='')
edits = []
annotator_id = next_annotator_id
start_idx, end_idx = range.split()
start_idx, end_idx = int(start_idx), int(end_idx)
replacement = replacement.split()
if start_idx == end_idx and len(replacement) == 0:
continue
edits.append((start_idx, end_idx, replacement))
# Last edit.
if original_sentence is not None:
edited_sentence = apply_edits(original_sentence, edits)
yield Edit(input_sequence=original_sentence, output_sequence=edited_sentence,
provenance=provenance, edit_type='')
def parse_m2_folder(folder: RichPath) -> Iterator[Edit]:
for m2_file in folder.iterate_filtered_files_in_dir('*.m2'):
yield from parse_m2_file(m2_file)
|
from typing import List
import numpy as np
from data.edits import Edit
all_chars = [chr(65+i) for i in range(26)] + [chr(97+i) for i in range(26)]
def create_random_sequences(min_size: int, max_size: int, num_sequences_per_size: int):
for seq_size in range(min_size, max_size):
all_input_seqs = set()
while len(all_input_seqs) < num_sequences_per_size:
sample = np.random.choice(all_chars, size=seq_size, replace=False)
all_input_seqs.add(tuple(sample))
yield from all_input_seqs
##### Operations
def add_char(input_sequence: List[str]) -> List[str]:
pos = np.random.randint(len(input_sequence)+1)
char = np.random.choice(all_chars)
return input_sequence[:pos] + [char] + input_sequence[pos:]
def remove_char(input_sequence: List[str]) -> List[str]:
pos = np.random.randint(len(input_sequence))
return input_sequence[:pos] + input_sequence[pos+1:]
def swap_char(input_sequence: List[str]) -> List[str]:
pos = np.random.randint(len(input_sequence))
char = np.random.choice(all_chars)
return input_sequence[:pos] + [char] + input_sequence[pos+1:]
edit_choices = [add_char, remove_char, swap_char]
def apply_random(input_sequence: List[str]) -> Edit:
edit_op = np.random.choice(edit_choices)
return Edit(input_sequence=input_sequence, output_sequence=edit_op(input_sequence),
edit_type=edit_op.__name__, provenance='')
def get_dataset():
input_sequences = (list(s) for s in create_random_sequences(3, 10, 5000))
all_edits = [apply_random(s) for s in input_sequences]
return all_edits
|
from flask import Flask, render_template, request, make_response, g
from redis import Redis
import os
import socket
import random
import json
option_a = os.getenv('OPTION_A', "<option2>")
option_b = os.getenv('OPTION_B', "<option1>")
hostname = socket.gethostname()
app = Flask(__name__)
def get_redis():
if not hasattr(g, 'redis'):
g.redis = Redis(host="redis", db=0, socket_timeout=5)
return g.redis
@app.route("/", methods=['POST','GET'])
def hello():
voter_id = request.cookies.get('voter_id')
if not voter_id:
voter_id = hex(random.getrandbits(64))[2:-1]
vote = None
if request.method == 'POST':
redis = get_redis()
vote = request.form['vote']
data = json.dumps({'voter_id': voter_id, 'vote': vote})
redis.rpush('votes', data)
resp = make_response(render_template(
'index.html',
option_a=option_a,
option_b=option_b,
hostname=hostname,
vote=vote,
))
resp.set_cookie('voter_id', voter_id)
return resp
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80, debug=True, threaded=True)
|
plt.imshow(recognisedimage['original'], interpolation='nearest', cmap=plt.cm.Greys_r)
plt.show()
recognisedimage = min(trainimages[:x], key=lambda e: sum((e['singular']-testimage['singular'])**2))
from scipy import misc
trainimages = []
for i in range(x):
A = misc.imread(str(i) + '.png', flatten=True)
B, c, D = np.linalg.svd(A, full_matrices=False)
trainimages.append({'original': A, 'singular': c[:x]})
import numpy as np
import matplotlib.pyplot as plt
testimage = trainimages[x]
import os
os.chdir('data/images_part1')
|
import scipy
import numpy as np
import matplotlib.pyplot as plt
from sklearn.externals._pilutil import imread
import os
os.chdir('data/images_part1')
trainimage = []
for i in range(11):
A = imread(str(i) + '.png', flatten = True)
B, c, D = np.linalg.svd(A)
trainimage.append({'original': A, 'singular': c[:10]})
testimage = trainimage[10]
recognisedimage = min(trainimage[:10], key=lambda e: sum((e['singular']-testimage['singular'])**2))
plt.imshow(recognisedimage['original'], interpolation='nearest', cmap=plt.cm.Greys_r)
plt.show()
|
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
from sklearn.metrics import accuracy_score
import os
os.chdir("data")
seed = 1234
#get data
forestation= pd.read_csv('forestation.csv')
forestation
y= forestation[['Fire']] # predicting Risk
categoricals = pd.get_dummies(forestation[['Fuel', 'Slopes', 'Access','SB','Drought']])#get dummies for categoricals
X = pd.concat([categoricals], axis = 1)
clf = RandomForestClassifier(random_state=seed, max_depth = 5) # maximum depth of 3, use seed for repeatability
clf = clf.fit(X, y.values.ravel())# fit a model
y_pred = clf.predict(X)
accuracy = accuracy_score(y, y_pred, normalize=True)
Fuel = 0
Slopes = 0
Access = 0
SB = 0
Drought = 1
print ('Accuracy: ', accuracy)
print(clf.predict([[Fuel,Slopes,Access,SB,Drought]]))
##note - the print function below allows you to see the features as set out in the algorithm
##with pd.option_context('display.max_rows', None, 'display.max_columns', None): print(X)
|
from pylab import *
x = [1,2,3,4,5,6,7,8,9,10,11]
y = [11,12,25,21,31,40,48,55,54,60,61]
scatter (x,y)
(m,c)=polyfit(x,y,1)
print ("Slope(m),", m)
print ("y-intercept (c),", c)
yp=polyval([m,c],x)
x2 = 12
y2 = m*x2 + c
print ("Predicted value of y in month 12,", y2)
plot(x2, y2, 'ro')
plot(x,yp)
grid (True)
xlabel('x')
ylabel('y')
show()
|
from sklearn import tree
from sklearn.tree import export_graphviz
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pandas as pd
import graphviz
import os
os.chdir("data")
seed = 1234
power_investment = pd.read_csv('powergen.csv')
y= power_investment[['Profitable']]
X = pd.get_dummies(power_investment[['Sector', 'Hemisphere', 'Value']])
clf = tree.DecisionTreeClassifier(max_depth=4, random_state=seed)
clf = clf.fit(X, y)
# Disable graphviz for non Windows environments
export_graphviz(clf, out_file='tree.dot',
feature_names=X.columns,
class_names=['Yes', 'No'],
filled=True)
# Convert to png and show image
#from subprocess import call
#call(['dot', '-Tpng', 'tree.dot', '-o', 'tree.png', '-Gdpi=600'])
#Show plot
image = mpimg.imread('tree.png')
plt.imshow(image)
plt.show()
|
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
from sklearn.metrics import accuracy_score
import os
os.chdir("data")
seed = 1234
forestation= pd.read_csv('forestation_1.csv')
forestation
y= forestation[['Fire']] # predicting fire
categoricals = pd.get_dummies(forestation[['Fuel', 'Slopes', 'Access','SB','Drought']])
X = pd.concat([categoricals], axis = 1)
clf = RandomForestClassifier(random_state=seed)
clf = clf.fit(X, y.values.ravel())
y_pred = clf.predict(X)
def plot_feature_importance(importance,names,model_type):
feature_importance = np.array(importance)
feature_names = np.array(names)
data={'feature_names':feature_names,'feature_importance':feature_importance}
fi_df = pd.DataFrame(data)
fi_df.sort_values(by=['feature_importance'], ascending=False,inplace=True)
plt.figure(figsize=(10,8))
sns.barplot(x=fi_df['feature_importance'], y=fi_df['feature_names'])
plt.title(model_type + 'FEATURE IMPORTANCE')
plt.xlabel('FEATURE IMPORTANCE')
plt.ylabel('FEATURE NAMES')
plot_feature_importance(clf.feature_importances_,X.columns,'RANDOM FOREST ')
plt.show()
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.externals._pilutil import imread
import os
os.chdir('data/images_part2')
trainimage = []
for i in range(22):
A = imread(str(i) + '.tif', flatten = True)
B, c, D = np.linalg.svd(A)
trainimage.append({'original': A, 'singular': c[:21]})
testimage = trainimage[21]
recognisedimage = min(trainimage[:21], key=lambda e: sum((e['singular']-testimage['singular'])**2))
plt.imshow(recognisedimage['original'], interpolation='nearest', cmap=plt.cm.Greys_r)
plt.show()
|
#!/usr/bin/python
import tensorflow as tf
hello = tf.constant('Hello, TensorFlow!')
sess = tf.Session()
print(sess.run(hello)) |
import csv
import sys
from math import sin, cos, sqrt, atan2, radians
import datetime
import time
from azure.storage.blob import AppendBlobService
# Configure account name with the Azure Storage Account Name and the account Key from Storage Explorer
append_blob_service = AppendBlobService(
account_name='storage_account_name',
account_key='storage_account_key')
# Reads the start and stop index passed in through SLURM
start = int(sys.argv[1])
stop = int(sys.argv[2])
#Creates the blob for this batch.
append_blob_service.create_blob('distances', str(start) + "-" + str(stop) + '.csv')
#Logs the start time
append_blob_service.append_blob_from_text('distances', 'log.txt', "Starting " + str(start) + "-" + str(stop) + ":" + datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S') + "\n")
LatLongDict = {}
# radius of earth in miles
R = 3959.0
# Reads the airport data in to a list for easy access.
with open('airports-world.csv') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
LatLongDict[row['LocationID']] = [row['Latitude'], row['Longitude']]
#Creates the column names for the distance table
fieldnames = "id"
for code1 in LatLongDict:
fieldnames+= "," + code1
fieldnames += "\n"
append_blob_service.append_blob_from_text('distances', str(start) + "-" + str(stop) + '.csv', fieldnames)
rowIdx = 0
count = 0
batchCount = 0;
rows = ""
#This function appends to rows to the Append Blob in Azures Storage.
def appendBlob(rowStr):
while True:
try:
append_blob_service.append_blob_from_text('distances', str(start) + "-" + str(stop) + '.csv', rowStr)
break
except:
print("error posting rows-- trying again")
for code1 in LatLongDict:
if(rowIdx >= start and rowIdx <= stop):
lat1 = radians(float(LatLongDict[code1][0]))
lon1 = radians(float(LatLongDict[code1][1]))
rows += code1
#outputs progress.
if count % 10 == 0:
print(str(start) + "-" + str(stop) + ": Processing " + str(count) + " of " + str(stop - start) + " airports.")
count += 1
# Selects the destination airport, then calculates the distance between it
# and the origin using the distance over sphere based on the latitude and longitude.
for code2 in LatLongDict:
#calculates the distances between two airports over the surface of the earth.
lat2 = radians(float(LatLongDict[code2][0]))
lon2 = radians(float(LatLongDict[code2][1]))
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance = R * c
rows += "," + str(round(distance, 2))
rows+="\n"
#Appends 100 rows at a time to the storage blob.
if count % 100 == 0:
appendBlob(rows)
rows=""
rowIdx += 1
#Appends any final rows if not already appended.
if count % 100 != 0:
appendBlob(rows)
#Logs the finish time
append_blob_service.append_blob_from_text('distances', 'log.txt', "Finishing " + str(start) + "-" + str(stop) + ":" + datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S') + "\n")
|
# Usage: Call python3 controller.py X, where X is the number of SLURM
# jobs you SLURM to spawn on the SLURM nodes
import csv
import sys
import subprocess
import datetime
import time
from azure.storage.blob import AppendBlobService
# Configure account name with the Azure Storage Account Name and the account Key from Storage Explorer
append_blob_service = AppendBlobService(
account_name='storage_account_name',
account_key='storage_account_key')
# Creates an append blob for this app.
append_blob_service.create_container('distances')
append_blob_service.create_blob('distances', 'log.txt')
append_blob_service.append_blob_from_text('distances', 'log.txt', "Starting: " + datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S') + "\n")
LatLongDict = {}
# Reads the number of jobs from the command line.
jobCount = int(sys.argv[1])
# Reads the airport data in to a list for easy access.
with open('airports-world.csv') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
LatLongDict[row['LocationID']] = [row['Latitude'], row['Longitude']]
# Configures the job size based on the job count passed in.
jobSize = int(len(LatLongDict) / jobCount) + 1
#Creates the slurm worker processes.
for i in range(0,len(LatLongDict),jobSize):
start = i
stop = i+jobSize-1;
if (stop >= len(LatLongDict)):
stop = len(LatLongDict) -1
#calls SLURM
subprocess.Popen(["sbatch","worker.sh",str(start),str(stop)],close_fds=True)
#alternately, run these locally
#subprocess.Popen(["python","worker.py",str(start),str(stop)],close_fds=True)
|
import os, socket, sys, json
from base64 import b64encode, b64decode
from hashlib import sha256
from time import time
from urllib.parse import quote_plus, urlencode
from hmac import HMAC
import paho.mqtt.client as mqtt
conn_str = os.getenv("conn_str")
osname = ""
rid = 0
if sys.platform == "linux":
osname = str(os.uname().release + " " + os.uname().version + " " + os.uname().machine)
else:
osname = str("Windows build " + str(sys.getwindowsversion().build) + " " + os.environ["PROCESSOR_IDENTIFIER"])
if conn_str == None:
print("Please set the enviornment variable conn_str to the value of a device connection string")
print(" example: export conn_str=\"HostName=ksaye.azure-devices.net;DeviceId=python;SharedAccessKey=4sDfmCBS1MnfVsQxUv/rEksRlzOctcOU=\"")
print("exiting now")
quit()
conn_str = conn_str.replace("\"","")
print("Connection String: " + conn_str)
print()
def generate_sas_token(uri, key, expiry=3600):
ttl = time() + expiry
sign_key = "%s\n%d" % ((quote_plus(uri)), int(ttl))
sign_key = sign_key.encode('utf-8')
signature = b64encode(HMAC(b64decode(key), sign_key, sha256).digest())
return 'SharedAccessSignature ' + urlencode({'sr' : uri,'sig': signature, 'se' : str(int(ttl))})
def message_handler(client, userdata, msg):
global rid
msgpayload = msg.payload.decode("utf-8")
if "$iothub/methods/POST/" in msg.topic:
# responding to a direct method
print("Received Direct Method: " + str(msg.topic).split("/")[3] + " with payload: " + str(msgpayload))
print()
rid = int(msg.topic.split("=")[1])
# acknowledging the direct method
payload = {"result": True, "data": "some data"}
status = 200
client.publish("$iothub/methods/res/" + str(status) + "/?$rid=" + str(rid), str(payload))
elif "$iothub/twin/res/" in msg.topic:
# received a twin
print("Received twin status " + str(msg.topic).split("/")[3] + " with payload: "+str(msgpayload))
print()
int(msg.topic.split("=")[1].split("&")[0])
elif "devices/" + deviceID + "/messages/devicebound/" in msg.topic:
# we have a cloud to device message
print("Received C2D with payload: "+str(msgpayload))
def on_connect(client, userdata, flags, rc):
client.subscribe("devices/" + deviceID + "/messages/devicebound/#") # C2D
client.subscribe("$iothub/methods/POST/#") # direct methods
client.subscribe("$iothub/twin/res/#") # twins
client.publish("$iothub/twin/GET/?$rid=" + str(rid), "") # must send a message to get the initial twin
hostname = conn_str.split(";")[0].split("=")[1]
deviceID = conn_str.split(";")[1].split("=")[1]
deviceKey = conn_str.split(";")[2].replace("SharedAccessKey=", "")
password = generate_sas_token(hostname + "/devices/" + deviceID, deviceKey)
client = mqtt.Client(client_id=deviceID)
client.on_message = message_handler
client.on_connect = on_connect
client.username_pw_set(hostname + "/" + deviceID + "/api-version=2016-11-14", password)
client.tls_set_context(context=None)
client.connect(hostname, 8883)
while not client.is_connected():
client.loop() # waiting for connection
# updating a twin property
reported_properties = {"OS": osname, "CurrentUser": str(os.environ["LOGNAME"]), "IP": str(socket.gethostbyname(socket.gethostname()))}
client.publish("$iothub/twin/PATCH/properties/reported/?$rid=" + str(rid), json.dumps(reported_properties))
print("sent twin: " + str(reported_properties))
# send a message with properties and encoding
iotmessage = json.dumps({"Message": "Hello World"})
client.publish("devices/" + deviceID + "/messages/events/alert=False$.ct=application%2Fjson&$.ce=utf-8", iotmessage)
print("Message " + str(iotmessage) + " and twin sent, waiting to reveive messages/command/twins, Control+C to exit")
client.loop_forever()
|
import os, socket, sys, json
from azure.iot.device import IoTHubDeviceClient, Message, MethodResponse
conn_str = os.getenv("conn_str")
osname = ""
if sys.platform == "linux":
osname = str(os.uname().release + " " + os.uname().version + " " + os.uname().machine)
else:
osname = str("Windows build " + str(sys.getwindowsversion().build) + " " + os.environ["PROCESSOR_IDENTIFIER"])
if conn_str == None:
print("Please set the enviornment variable conn_str to the value of a device connection string")
print(" example: export conn_str=\"HostName=ksaye.azure-devices.net;DeviceId=python;SharedAccessKey=4sDfmCBS1MnfVsQxUv/rEksRlzOctcOU=\"")
print("exiting now")
quit()
conn_str = conn_str.replace("\"","")
print("Connection String: " + conn_str)
print()
# reference: https://github.com/Azure/azure-iot-sdk-python/blob/main/samples/sync-samples/receive_direct_method.py
def method_request_handler(method_request):
# Determine how to respond to the method request based on the method name
if method_request.name == "method1":
payload = {"result": True, "data": "some data"} # set response payload
status = 200 # set return status code
print("executed method1")
else:
payload = {"result": False, "data": "unknown method"} # set response payload
status = 400 # set return status code
print("executed unknown method: " + method_request.name)
# Send the response
method_response = MethodResponse.create_from_method_request(method_request, status, payload)
device_client.send_method_response(method_response)
# reference: https://github.com/Azure/azure-iot-sdk-python/blob/main/samples/sync-samples/receive_message.py
def message_handler(message):
print("the data in the message received was ")
print(message.data)
print("custom properties are")
print(message.custom_properties)
# reference: https://github.com/Azure/azure-iot-sdk-python/blob/main/samples/sync-samples/receive_twin_desired_properties_patch.py
def twin_patch_handler(patch):
print("the data in the desired properties patch was: {}".format(patch))
device_client = IoTHubDeviceClient.create_from_connection_string(conn_str)
device_client.connect()
device_client.on_message_received = message_handler
device_client.on_method_request_received = method_request_handler
device_client.on_twin_desired_properties_patch_received = twin_patch_handler
# get the initial twin, reference: https://github.com/Azure/azure-iot-sdk-python/blob/main/samples/sync-samples/get_twin.py
twin = device_client.get_twin()
print("Initial twin document received:")
print(" {}".format(twin))
print()
# setting a reported twin, reference: https://github.com/Azure/azure-iot-sdk-python/blob/main/samples/sync-samples/update_twin_reported_properties.py
reported_properties = {"OS": osname, "CurrentUser": str(os.environ["LOGNAME"]), "IP": str(socket.gethostbyname(socket.gethostname()))}
print("Sending reported twin properties:")
print(" {}".format(reported_properties))
print()
device_client.patch_twin_reported_properties(reported_properties)
#finally sending a message with properties and encoding
iotmessage = Message(json.dumps({"Message": "Hello World"}))
iotmessage.custom_properties["alert"] = False
iotmessage.content_encoding = "utf-8"
iotmessage.content_type = "application/json"
device_client.send_message(iotmessage)
input("Message " + str(iotmessage) + " and twin sent, waiting to reveive messages/command/twins, Press any key to quit\n")
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
import time
import uuid
from azure.iot.device import IoTHubDeviceClient, Message, X509
# The connection string for a device should never be stored in code.
# For the sake of simplicity we are creating the X509 connection string
# containing Hostname and Device Id in the following format:
# "HostName=<iothub_host_name>;DeviceId=<device_id>;x509=true"
hostname = os.getenv("HOSTNAME")
# The device that has been created on the portal using X509 CA signing or Self signing capabilities
device_id = os.getenv("DEVICE_ID")
x509 = X509(
cert_file=os.getenv("X509_CERT_FILE"),
key_file=os.getenv("X509_KEY_FILE"),
pass_phrase=os.getenv("X509_PASS_PHRASE"),
)
# The client object is used to interact with your Azure IoT hub.
device_client = IoTHubDeviceClient.create_from_x509_certificate(
hostname=hostname, device_id=device_id, x509=x509
)
# Connect the client.
device_client.connect()
# send 5 messages with a 1 second pause between each message
for i in range(1, 6):
print("sending message #" + str(i))
msg = Message("test wind speed " + str(i))
msg.message_id = uuid.uuid4()
msg.correlation_id = "correlation-1234"
msg.custom_properties["tornado-warning"] = "yes"
msg.content_encoding = "utf-8"
msg.content_type = "application/json"
device_client.send_message(msg)
time.sleep(1)
# send only string messages
for i in range(6, 11):
print("sending message #" + str(i))
device_client.send_message("test payload message " + str(i))
time.sleep(1)
# finally, shut down the client
device_client.shutdown()
|