|
|
|
|
|
import math |
|
import torch |
|
import torch.nn as nn |
|
import torch.nn.functional as F |
|
|
|
class RNNModel(nn.Module): |
|
"""Container module with an encoder, a recurrent module, and a decoder.""" |
|
|
|
def __init__(self, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False): |
|
super(RNNModel, self).__init__() |
|
self.ntoken = ntoken |
|
self.drop = nn.Dropout(dropout) |
|
self.encoder = nn.Embedding(ntoken, ninp) |
|
self.rnn = nn.LSTM(ninp, nhid, nlayers, dropout=dropout, batch_first=True) |
|
self.decoder = nn.Linear(nhid, ntoken) |
|
self.criterion = nn.CrossEntropyLoss() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if tie_weights: |
|
if nhid != ninp: |
|
raise ValueError('When using the tied flag, nhid must be equal to emsize') |
|
self.decoder.weight = self.encoder.weight |
|
|
|
self.init_weights() |
|
|
|
self.nhid = nhid |
|
self.nlayers = nlayers |
|
|
|
def init_weights(self): |
|
initrange = 0.1 |
|
nn.init.uniform_(self.encoder.weight, -initrange, initrange) |
|
nn.init.zeros_(self.decoder.weight) |
|
nn.init.uniform_(self.decoder.weight, -initrange, initrange) |
|
|
|
def forward(self, input, hidden=None, labels=None): |
|
emb = self.encoder(input) |
|
if hidden is not None: |
|
output, hidden = self.rnn(emb, hidden) |
|
else: |
|
output, hidden = self.rnn(emb) |
|
output = self.drop(output) |
|
output = self.decoder(output) |
|
|
|
|
|
if labels is not None: |
|
shift_logits = output[..., :-1, :].contiguous() |
|
shift_labels = labels[..., 1:].contiguous() |
|
loss = self.criterion(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) |
|
return loss, output, hidden |
|
else: |
|
return output, hidden |
|
|
|
def init_hidden(self, bsz): |
|
weight = next(self.parameters()) |
|
if self.rnn_type == 'LSTM': |
|
return (weight.new_zeros(self.nlayers, bsz, self.nhid), |
|
weight.new_zeros(self.nlayers, bsz, self.nhid)) |
|
else: |
|
return weight.new_zeros(self.nlayers, bsz, self.nhid) |
|
|
|
|