|
import torch |
|
from torch import nn |
|
import torch.nn.functional as F |
|
import math |
|
from math import sqrt |
|
import numpy as np |
|
|
|
|
|
|
|
|
|
class PositionalEmbedding(nn.Module): |
|
def __init__(self, d_model, max_len=5000): |
|
super(PositionalEmbedding, self).__init__() |
|
|
|
pe = torch.zeros(max_len, d_model).float() |
|
pe.require_grad = False |
|
|
|
position = torch.arange(0, max_len).float().unsqueeze(1) |
|
div_term = (torch.arange(0, d_model, 2).float() |
|
* -(math.log(10000.0) / d_model)).exp() |
|
|
|
pe[:, 0::2] = torch.sin(position * div_term) |
|
pe[:, 1::2] = torch.cos(position * div_term) |
|
|
|
pe = pe.unsqueeze(0) |
|
self.register_buffer('pe', pe) |
|
|
|
def forward(self, x): |
|
return self.pe[:, :x.size(1)] |
|
|
|
class PatchEmbedding(nn.Module): |
|
def __init__(self, d_model, patch_len, stride, padding, dropout): |
|
super(PatchEmbedding, self).__init__() |
|
|
|
self.patch_len = patch_len |
|
self.stride = stride |
|
self.padding_patch_layer = nn.ReplicationPad1d((0, padding)) |
|
|
|
|
|
self.value_embedding = nn.Linear(patch_len, d_model, bias=False) |
|
|
|
|
|
self.position_embedding = PositionalEmbedding(d_model) |
|
|
|
|
|
self.dropout = nn.Dropout(dropout) |
|
|
|
def forward(self, x): |
|
|
|
n_vars = x.shape[1] |
|
x = self.padding_patch_layer(x) |
|
x = x.unfold(dimension=-1, size=self.patch_len, step=self.stride) |
|
x = torch.reshape(x, (x.shape[0] * x.shape[1], x.shape[2], x.shape[3])) |
|
|
|
x = self.value_embedding(x) + self.position_embedding(x) |
|
return self.dropout(x), n_vars |
|
|
|
class AttentionLayer(nn.Module): |
|
def __init__(self, attention, d_model, n_heads, d_keys=None, |
|
d_values=None): |
|
super(AttentionLayer, self).__init__() |
|
|
|
d_keys = d_keys or (d_model // n_heads) |
|
d_values = d_values or (d_model // n_heads) |
|
|
|
self.inner_attention = attention |
|
self.query_projection = nn.Linear(d_model, d_keys * n_heads) |
|
self.key_projection = nn.Linear(d_model, d_keys * n_heads) |
|
self.value_projection = nn.Linear(d_model, d_values * n_heads) |
|
self.out_projection = nn.Linear(d_values * n_heads, d_model) |
|
self.n_heads = n_heads |
|
|
|
def forward(self, queries, keys, values, attn_mask, tau=None, delta=None): |
|
B, L, _ = queries.shape |
|
_, S, _ = keys.shape |
|
H = self.n_heads |
|
|
|
queries = self.query_projection(queries).view(B, L, H, -1) |
|
keys = self.key_projection(keys).view(B, S, H, -1) |
|
values = self.value_projection(values).view(B, S, H, -1) |
|
|
|
out, attn = self.inner_attention( |
|
queries, |
|
keys, |
|
values, |
|
attn_mask, |
|
tau=tau, |
|
delta=delta |
|
) |
|
out = out.view(B, L, -1) |
|
|
|
return self.out_projection(out), attn |
|
|
|
class FullAttention(nn.Module): |
|
def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False): |
|
super(FullAttention, self).__init__() |
|
self.scale = scale |
|
self.mask_flag = mask_flag |
|
self.output_attention = output_attention |
|
self.dropout = nn.Dropout(attention_dropout) |
|
|
|
def forward(self, queries, keys, values, attn_mask, tau=None, delta=None): |
|
B, L, H, E = queries.shape |
|
_, S, _, D = values.shape |
|
scale = self.scale or 1. / sqrt(E) |
|
|
|
scores = torch.einsum("blhe,bshe->bhls", queries, keys) |
|
|
|
if self.mask_flag: |
|
if attn_mask is None: |
|
attn_mask = TriangularCausalMask(B, L, device=queries.device) |
|
|
|
scores.masked_fill_(attn_mask.mask, -np.inf) |
|
|
|
A = self.dropout(torch.softmax(scale * scores, dim=-1)) |
|
V = torch.einsum("bhls,bshd->blhd", A, values) |
|
|
|
if self.output_attention: |
|
return V.contiguous(), A |
|
else: |
|
return V.contiguous(), None |
|
|
|
class TriangularCausalMask(): |
|
def __init__(self, B, L, device="cpu"): |
|
mask_shape = [B, 1, L, L] |
|
with torch.no_grad(): |
|
self._mask = torch.triu(torch.ones(mask_shape, dtype=torch.bool), diagonal=1).to(device) |
|
|
|
@property |
|
def mask(self): |
|
return self._mask |
|
|
|
class FullAttention(nn.Module): |
|
def __init__(self, mask_flag=True, factor=5, scale=None, attention_dropout=0.1, output_attention=False): |
|
super(FullAttention, self).__init__() |
|
self.scale = scale |
|
self.mask_flag = mask_flag |
|
self.output_attention = output_attention |
|
self.dropout = nn.Dropout(attention_dropout) |
|
|
|
def forward(self, queries, keys, values, attn_mask, tau=None, delta=None): |
|
B, L, H, E = queries.shape |
|
_, S, _, D = values.shape |
|
scale = self.scale or 1. / sqrt(E) |
|
|
|
scores = torch.einsum("blhe,bshe->bhls", queries, keys) |
|
|
|
if self.mask_flag: |
|
if attn_mask is None: |
|
attn_mask = TriangularCausalMask(B, L, device=queries.device) |
|
|
|
scores.masked_fill_(attn_mask.mask, -np.inf) |
|
|
|
A = self.dropout(torch.softmax(scale * scores, dim=-1)) |
|
V = torch.einsum("bhls,bshd->blhd", A, values) |
|
|
|
if self.output_attention: |
|
return V.contiguous(), A |
|
else: |
|
return V.contiguous(), None |
|
|
|
class AttentionLayer(nn.Module): |
|
def __init__(self, attention, d_model, n_heads, d_keys=None, |
|
d_values=None): |
|
super(AttentionLayer, self).__init__() |
|
|
|
d_keys = d_keys or (d_model // n_heads) |
|
d_values = d_values or (d_model // n_heads) |
|
|
|
self.inner_attention = attention |
|
self.query_projection = nn.Linear(d_model, d_keys * n_heads) |
|
self.key_projection = nn.Linear(d_model, d_keys * n_heads) |
|
self.value_projection = nn.Linear(d_model, d_values * n_heads) |
|
self.out_projection = nn.Linear(d_values * n_heads, d_model) |
|
self.n_heads = n_heads |
|
|
|
def forward(self, queries, keys, values, attn_mask, tau=None, delta=None): |
|
B, L, _ = queries.shape |
|
_, S, _ = keys.shape |
|
H = self.n_heads |
|
|
|
queries = self.query_projection(queries).view(B, L, H, -1) |
|
keys = self.key_projection(keys).view(B, S, H, -1) |
|
values = self.value_projection(values).view(B, S, H, -1) |
|
|
|
out, attn = self.inner_attention( |
|
queries, |
|
keys, |
|
values, |
|
attn_mask, |
|
tau=tau, |
|
delta=delta |
|
) |
|
out = out.view(B, L, -1) |
|
|
|
return self.out_projection(out), attn |
|
|
|
class EncoderLayer(nn.Module): |
|
def __init__(self, attention, d_model, d_ff=None, dropout=0.1, activation="relu"): |
|
super(EncoderLayer, self).__init__() |
|
d_ff = d_ff or 4 * d_model |
|
self.attention = attention |
|
self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1) |
|
self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1) |
|
self.norm1 = nn.LayerNorm(d_model) |
|
self.norm2 = nn.LayerNorm(d_model) |
|
self.dropout = nn.Dropout(dropout) |
|
self.activation = F.relu if activation == "relu" else F.gelu |
|
|
|
def forward(self, x, attn_mask=None, tau=None, delta=None): |
|
new_x, attn = self.attention( |
|
x, x, x, |
|
attn_mask=attn_mask, |
|
tau=tau, delta=delta |
|
) |
|
x = x + self.dropout(new_x) |
|
|
|
y = x = self.norm1(x) |
|
y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1)))) |
|
y = self.dropout(self.conv2(y).transpose(-1, 1)) |
|
|
|
return self.norm2(x + y), attn |
|
|
|
|
|
class Encoder(nn.Module): |
|
def __init__(self, attn_layers, conv_layers=None, norm_layer=None): |
|
super(Encoder, self).__init__() |
|
self.attn_layers = nn.ModuleList(attn_layers) |
|
self.conv_layers = nn.ModuleList(conv_layers) if conv_layers is not None else None |
|
self.norm = norm_layer |
|
|
|
def forward(self, x, attn_mask=None, tau=None, delta=None): |
|
|
|
attns = [] |
|
if self.conv_layers is not None: |
|
for i, (attn_layer, conv_layer) in enumerate(zip(self.attn_layers, self.conv_layers)): |
|
delta = delta if i == 0 else None |
|
x, attn = attn_layer(x, attn_mask=attn_mask, tau=tau, delta=delta) |
|
x = conv_layer(x) |
|
attns.append(attn) |
|
x, attn = self.attn_layers[-1](x, tau=tau, delta=None) |
|
attns.append(attn) |
|
else: |
|
for attn_layer in self.attn_layers: |
|
x, attn = attn_layer(x, attn_mask=attn_mask, tau=tau, delta=delta) |
|
attns.append(attn) |
|
|
|
if self.norm is not None: |
|
x = self.norm(x) |
|
|
|
return x, attns |
|
|
|
class Transpose(nn.Module): |
|
def __init__(self, *dims, contiguous=False): |
|
super().__init__() |
|
self.dims, self.contiguous = dims, contiguous |
|
def forward(self, x): |
|
if self.contiguous: return x.transpose(*self.dims).contiguous() |
|
else: return x.transpose(*self.dims) |
|
|
|
|
|
class FlattenHead(nn.Module): |
|
def __init__(self, n_vars, nf, target_window, head_dropout=0): |
|
super().__init__() |
|
self.n_vars = n_vars |
|
self.flatten = nn.Flatten(start_dim=-2) |
|
self.linear = nn.Linear(nf, target_window) |
|
self.dropout = nn.Dropout(head_dropout) |
|
|
|
def forward(self, x): |
|
x = self.flatten(x) |
|
x = self.linear(x) |
|
x = self.dropout(x) |
|
return x |
|
|
|
|
|
class PatchTST(nn.Module): |
|
""" |
|
Paper link: https://arxiv.org/pdf/2211.14730.pdf |
|
""" |
|
|
|
def __init__( |
|
self, |
|
enc_in, |
|
dec_in, |
|
c_out, |
|
pred_len, |
|
seq_len, |
|
d_model = 64, |
|
patch_len = 16, |
|
stride = 8, |
|
data_idx = [0,3,4,5,6,7], |
|
time_idx = [1,2], |
|
output_attention = False, |
|
factor = 3, |
|
n_heads = 4, |
|
d_ff = 512, |
|
e_layers = 3, |
|
activation = 'gelu', |
|
dropout = 0.1 |
|
): |
|
|
|
|
|
""" |
|
patch_len: int, patch len for patch_embedding |
|
stride: int, stride for patch_embedding |
|
""" |
|
super().__init__() |
|
self.seq_len = seq_len |
|
self.pred_len = pred_len |
|
self.data_idx = data_idx |
|
self.time_idx = time_idx |
|
self.dec_in = dec_in |
|
padding = stride |
|
|
|
|
|
self.patch_embedding = PatchEmbedding( |
|
d_model, patch_len, stride, padding, dropout) |
|
|
|
|
|
self.encoder = Encoder( |
|
[ |
|
EncoderLayer( |
|
AttentionLayer( |
|
FullAttention(False, factor, attention_dropout=dropout, |
|
output_attention=output_attention), d_model, n_heads), |
|
d_model, |
|
d_ff, |
|
dropout=dropout, |
|
activation=activation |
|
) for l in range(e_layers) |
|
], |
|
norm_layer=nn.Sequential(Transpose(1,2), nn.BatchNorm1d(d_model), Transpose(1,2)) |
|
) |
|
|
|
|
|
self.head_nf = d_model * \ |
|
int((seq_len - patch_len) / stride + 2) |
|
self.head = FlattenHead(enc_in, self.head_nf,pred_len, |
|
head_dropout=dropout) |
|
|
|
def forecast(self, x_enc, x_mark_enc, x_dec, x_mark_dec): |
|
|
|
means = x_enc.mean(1, keepdim=True).detach() |
|
x_enc = x_enc - means |
|
stdev = torch.sqrt( |
|
torch.var(x_enc, dim=1, keepdim=True, unbiased=False) + 1e-5) |
|
x_enc /= stdev |
|
|
|
|
|
x_enc = x_enc.permute(0, 2, 1) |
|
|
|
enc_out, n_vars = self.patch_embedding(x_enc) |
|
|
|
|
|
|
|
enc_out, attns = self.encoder(enc_out) |
|
|
|
enc_out = torch.reshape( |
|
enc_out, (-1, n_vars, enc_out.shape[-2], enc_out.shape[-1])) |
|
|
|
enc_out = enc_out.permute(0, 1, 3, 2) |
|
|
|
|
|
dec_out = self.head(enc_out) |
|
dec_out = dec_out.permute(0, 2, 1) |
|
|
|
|
|
dec_out = dec_out * \ |
|
(stdev[:, 0, :].unsqueeze(1).repeat(1, self.pred_len, 1)) |
|
dec_out = dec_out + \ |
|
(means[:, 0, :].unsqueeze(1).repeat(1, self.pred_len, 1)) |
|
return dec_out |
|
|
|
def forward(self, x, fut_time): |
|
|
|
x_enc = x[:,:,self.data_idx] |
|
x_mark_enc = x[:,:,self.time_idx] |
|
x_dec = torch.zeros((fut_time.shape[0],fut_time.shape[1],self.dec_in),dtype=fut_time.dtype,device=fut_time.device) |
|
x_mark_dec = fut_time |
|
|
|
return self.forecast(x_enc,x_mark_enc,x_dec,x_mark_dec)[:,-1,[0]] |