RRFRRF
init commit without .pth
dee113c
raw
history blame
2.61 kB
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
super(RNNModel, self).__init__()
self.ntoken = ntoken
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
self.rnn = nn.LSTM(ninp, nhid, nlayers, dropout=dropout, batch_first=True)
self.decoder = nn.Linear(nhid, ntoken)
self.criterion = nn.CrossEntropyLoss()
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.init_weights()
self.nhid = nhid
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
nn.init.uniform_(self.encoder.weight, -initrange, initrange)
nn.init.zeros_(self.decoder.weight)
nn.init.uniform_(self.decoder.weight, -initrange, initrange)
def forward(self, input, hidden=None, labels=None):
emb = self.encoder(input)
if hidden is not None:
output, hidden = self.rnn(emb, hidden)
else:
output, hidden = self.rnn(emb)
output = self.drop(output)
output = self.decoder(output)
# decoded = decoded.view(-1, self.ntoken)
# output = F.log_softmax(decoded, dim=1)
if labels is not None:
shift_logits = output[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
loss = self.criterion(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
return loss, output, hidden
else:
return output, hidden
def init_hidden(self, bsz):
weight = next(self.parameters())
if self.rnn_type == 'LSTM':
return (weight.new_zeros(self.nlayers, bsz, self.nhid),
weight.new_zeros(self.nlayers, bsz, self.nhid))
else:
return weight.new_zeros(self.nlayers, bsz, self.nhid)