HUANGYIFEI
commited on
add lib
Browse files
Graph/GraphMAE_MQ9/lib/__pycache__/metrics.cpython-38.pyc
ADDED
Binary file (2.42 kB). View file
|
|
Graph/GraphMAE_MQ9/lib/metrics.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding:utf-8 -*-
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import torch
|
5 |
+
import torch.nn.functional as F
|
6 |
+
|
7 |
+
|
8 |
+
def masked_mape_np(y_true, y_pred, null_val=np.nan):
|
9 |
+
with np.errstate(divide='ignore', invalid='ignore'):
|
10 |
+
if np.isnan(null_val):
|
11 |
+
mask = ~np.isnan(y_true)
|
12 |
+
else:
|
13 |
+
mask = np.not_equal(y_true, null_val)
|
14 |
+
mask = mask.astype('float32')
|
15 |
+
mask /= np.mean(mask)
|
16 |
+
mape = np.abs(np.divide(np.subtract(y_pred, y_true).astype('float32'),
|
17 |
+
y_true))
|
18 |
+
mape = np.nan_to_num(mask * mape)
|
19 |
+
return np.mean(mape)
|
20 |
+
|
21 |
+
|
22 |
+
def masked_mse(preds, labels, null_val=np.nan):
|
23 |
+
if np.isnan(null_val):
|
24 |
+
mask = ~torch.isnan(labels)
|
25 |
+
else:
|
26 |
+
mask = (labels != null_val)
|
27 |
+
mask = mask.float()
|
28 |
+
# print(mask.sum())
|
29 |
+
# print(mask.shape[0]*mask.shape[1]*mask.shape[2])
|
30 |
+
mask /= torch.mean((mask))
|
31 |
+
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
|
32 |
+
loss = (preds - labels) ** 2
|
33 |
+
loss = loss * mask
|
34 |
+
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
|
35 |
+
return torch.mean(loss)
|
36 |
+
|
37 |
+
|
38 |
+
def masked_rmse(preds, labels, null_val=np.nan):
|
39 |
+
return torch.sqrt(masked_mse(preds=preds, labels=labels,
|
40 |
+
null_val=null_val))
|
41 |
+
|
42 |
+
|
43 |
+
def masked_mae(preds, labels, null_val=np.nan):
|
44 |
+
if np.isnan(null_val):
|
45 |
+
mask = ~torch.isnan(labels)
|
46 |
+
else:
|
47 |
+
mask = (labels != null_val)
|
48 |
+
mask = mask.float()
|
49 |
+
mask /= torch.mean((mask))
|
50 |
+
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
|
51 |
+
loss = torch.abs(preds - labels)
|
52 |
+
loss = loss * mask
|
53 |
+
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
|
54 |
+
return torch.mean(loss)
|
55 |
+
|
56 |
+
|
57 |
+
def masked_mae_test(y_true, y_pred, null_val=np.nan):
|
58 |
+
with np.errstate(divide='ignore', invalid='ignore'):
|
59 |
+
if np.isnan(null_val):
|
60 |
+
mask = ~np.isnan(y_true)
|
61 |
+
else:
|
62 |
+
mask = np.not_equal(y_true, null_val)
|
63 |
+
mask = mask.astype('float32')
|
64 |
+
mask /= np.mean(mask)
|
65 |
+
mae = np.abs(np.subtract(y_pred, y_true).astype('float32'),
|
66 |
+
)
|
67 |
+
mae = np.nan_to_num(mask * mae)
|
68 |
+
return np.mean(mae)
|
69 |
+
|
70 |
+
|
71 |
+
def masked_rmse_test(y_true, y_pred, null_val=np.nan):
|
72 |
+
with np.errstate(divide='ignore', invalid='ignore'):
|
73 |
+
if np.isnan(null_val):
|
74 |
+
mask = ~np.isnan(y_true)
|
75 |
+
else:
|
76 |
+
# null_val=null_val
|
77 |
+
mask = np.not_equal(y_true, null_val)
|
78 |
+
mask = mask.astype('float32')
|
79 |
+
mask /= np.mean(mask)
|
80 |
+
mse = ((y_pred - y_true) ** 2)
|
81 |
+
mse = np.nan_to_num(mask * mse)
|
82 |
+
return np.sqrt(np.mean(mse))
|
83 |
+
|
84 |
+
|
85 |
+
def sce_loss(x, y, alpha=3):
|
86 |
+
x = F.normalize(x, p=2, dim=-1)
|
87 |
+
y = F.normalize(y, p=2, dim=-1)
|
88 |
+
|
89 |
+
# loss = - (x * y).sum(dim=-1)
|
90 |
+
# loss = (x_h - y_h).norm(dim=1).pow(alpha)
|
91 |
+
|
92 |
+
loss = (1 - (x * y).sum(dim=-1)).pow_(alpha)
|
93 |
+
|
94 |
+
loss = loss.mean()
|
95 |
+
return loss
|
Graph/GraphMAE_MQ9/lib/utils.py
ADDED
@@ -0,0 +1,397 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import numpy as np
|
3 |
+
import torch
|
4 |
+
import torch.utils.data
|
5 |
+
from sklearn.metrics import mean_absolute_error
|
6 |
+
from sklearn.metrics import mean_squared_error
|
7 |
+
import sys
|
8 |
+
project_path = "/content/gdrive//My Drive/CS5248_project"
|
9 |
+
sys.path.append(project_path + '/lib')
|
10 |
+
from metrics import masked_mape_np
|
11 |
+
from scipy.sparse.linalg import eigs
|
12 |
+
from metrics import masked_mape_np, masked_mae,masked_mse,masked_rmse,masked_mae_test,masked_rmse_test
|
13 |
+
|
14 |
+
|
15 |
+
def re_normalization(x, mean, std):
|
16 |
+
x = x * std + mean
|
17 |
+
return x
|
18 |
+
|
19 |
+
|
20 |
+
def max_min_normalization(x, _max, _min):
|
21 |
+
x = 1. * (x - _min)/(_max - _min)
|
22 |
+
x = x * 2. - 1.
|
23 |
+
return x
|
24 |
+
|
25 |
+
|
26 |
+
def re_max_min_normalization(x, _max, _min):
|
27 |
+
x = (x + 1.) / 2.
|
28 |
+
x = 1. * x * (_max - _min) + _min
|
29 |
+
return x
|
30 |
+
|
31 |
+
|
32 |
+
def get_adjacency_matrix(distance_df_filename, num_of_vertices, id_filename=None):
|
33 |
+
'''
|
34 |
+
Parameters
|
35 |
+
----------
|
36 |
+
distance_df_filename: str, path of the csv file contains edges information
|
37 |
+
|
38 |
+
num_of_vertices: int, the number of vertices
|
39 |
+
|
40 |
+
Returns
|
41 |
+
----------
|
42 |
+
A: np.ndarray, adjacency matrix
|
43 |
+
|
44 |
+
'''
|
45 |
+
if 'npy' in distance_df_filename:
|
46 |
+
|
47 |
+
adj_mx = np.load(distance_df_filename)
|
48 |
+
|
49 |
+
return adj_mx, None
|
50 |
+
|
51 |
+
else:
|
52 |
+
|
53 |
+
import csv
|
54 |
+
|
55 |
+
A = np.zeros((int(num_of_vertices), int(num_of_vertices)),
|
56 |
+
dtype=np.float32)
|
57 |
+
|
58 |
+
distaneA = np.zeros((int(num_of_vertices), int(num_of_vertices)),
|
59 |
+
dtype=np.float32)
|
60 |
+
|
61 |
+
if id_filename:
|
62 |
+
|
63 |
+
with open(id_filename, 'r') as f:
|
64 |
+
id_dict = {int(i): idx for idx, i in enumerate(f.read().strip().split('\n'))} # 把节点id(idx)映射成从0开始的索引
|
65 |
+
|
66 |
+
with open(distance_df_filename, 'r') as f:
|
67 |
+
f.readline()
|
68 |
+
reader = csv.reader(f)
|
69 |
+
for row in reader:
|
70 |
+
if len(row) != 3:
|
71 |
+
continue
|
72 |
+
i, j, distance = int(row[0]), int(row[1]), float(row[2])
|
73 |
+
A[id_dict[i], id_dict[j]] = 1
|
74 |
+
distaneA[id_dict[i], id_dict[j]] = distance
|
75 |
+
return A, distaneA
|
76 |
+
|
77 |
+
else:
|
78 |
+
|
79 |
+
with open(distance_df_filename, 'r') as f:
|
80 |
+
f.readline()
|
81 |
+
reader = csv.reader(f)
|
82 |
+
for row in reader:
|
83 |
+
if len(row) != 3:
|
84 |
+
continue
|
85 |
+
i, j, distance = int(row[0]), int(row[1]), float(row[2])
|
86 |
+
A[i, j] = 1
|
87 |
+
distaneA[i, j] = distance
|
88 |
+
return A, distaneA
|
89 |
+
|
90 |
+
|
91 |
+
def scaled_Laplacian(W):
|
92 |
+
'''
|
93 |
+
compute \tilde{L}
|
94 |
+
|
95 |
+
Parameters
|
96 |
+
----------
|
97 |
+
W: np.ndarray, shape is (N, N), N is the num of vertices
|
98 |
+
|
99 |
+
Returns
|
100 |
+
----------
|
101 |
+
scaled_Laplacian: np.ndarray, shape (N, N)
|
102 |
+
|
103 |
+
'''
|
104 |
+
|
105 |
+
assert W.shape[0] == W.shape[1]
|
106 |
+
|
107 |
+
D = np.diag(np.sum(W, axis=1))
|
108 |
+
|
109 |
+
L = D - W
|
110 |
+
|
111 |
+
lambda_max = eigs(L, k=1, which='LR')[0].real
|
112 |
+
|
113 |
+
return (2 * L) / lambda_max - np.identity(W.shape[0])
|
114 |
+
|
115 |
+
|
116 |
+
def cheb_polynomial(L_tilde, K):
|
117 |
+
'''
|
118 |
+
compute a list of chebyshev polynomials from T_0 to T_{K-1}
|
119 |
+
|
120 |
+
Parameters
|
121 |
+
----------
|
122 |
+
L_tilde: scaled Laplacian, np.ndarray, shape (N, N)
|
123 |
+
|
124 |
+
K: the maximum order of chebyshev polynomials
|
125 |
+
|
126 |
+
Returns
|
127 |
+
----------
|
128 |
+
cheb_polynomials: list(np.ndarray), length: K, from T_0 to T_{K-1}
|
129 |
+
|
130 |
+
'''
|
131 |
+
|
132 |
+
N = L_tilde.shape[0]
|
133 |
+
|
134 |
+
cheb_polynomials = [np.identity(N), L_tilde.copy()]
|
135 |
+
|
136 |
+
for i in range(2, K):
|
137 |
+
cheb_polynomials.append(2 * L_tilde * cheb_polynomials[i - 1] - cheb_polynomials[i - 2])
|
138 |
+
|
139 |
+
return cheb_polynomials
|
140 |
+
|
141 |
+
|
142 |
+
def load_graphdata_channel1(graph_signal_matrix_filename, num_of_indices, DEVICE, batch_size, shuffle=True):
|
143 |
+
'''
|
144 |
+
这个是为PEMS的数据准备的函数
|
145 |
+
将x,y都处理成归一化到[-1,1]之前的数据;
|
146 |
+
每个样本同时包含所有监测点的数据,所以本函数构造的数据输入时空序列预测模型;
|
147 |
+
该函数会把hour, day, week的时间串起来;
|
148 |
+
注: 从文件读入的数据,x是最大最小归一化的,但是y是真实值
|
149 |
+
这个函数转为mstgcn,astgcn设计,返回的数据x都是通过减均值除方差进行归一化的,y都是真实值
|
150 |
+
:param graph_signal_matrix_filename: str
|
151 |
+
:param num_of_hours: int
|
152 |
+
:param num_of_days: int
|
153 |
+
:param num_of_weeks: int
|
154 |
+
:param DEVICE:
|
155 |
+
:param batch_size: int
|
156 |
+
:return:
|
157 |
+
three DataLoaders, each dataloader contains:
|
158 |
+
test_x_tensor: (B, N_nodes, in_feature, T_input)
|
159 |
+
test_decoder_input_tensor: (B, N_nodes, T_output)
|
160 |
+
test_target_tensor: (B, N_nodes, T_output)
|
161 |
+
|
162 |
+
'''
|
163 |
+
|
164 |
+
file = os.path.basename(graph_signal_matrix_filename).split('.')[0]
|
165 |
+
|
166 |
+
dirpath = os.path.dirname(graph_signal_matrix_filename)
|
167 |
+
|
168 |
+
filename = os.path.join(dirpath,
|
169 |
+
file) +'_astcgn'
|
170 |
+
|
171 |
+
print('load file:', filename)
|
172 |
+
|
173 |
+
file_data = np.load(filename + '.npz')
|
174 |
+
train_x = file_data['train_x'] # (10181, 307, 3, 12)
|
175 |
+
train_x = train_x[:, :, 0:5, :]
|
176 |
+
train_target = file_data['train_target'] # (10181, 307, 12)
|
177 |
+
|
178 |
+
val_x = file_data['val_x']
|
179 |
+
val_x = val_x[:, :, 0:5, :]
|
180 |
+
val_target = file_data['val_target']
|
181 |
+
|
182 |
+
test_x = file_data['test_x']
|
183 |
+
test_x = test_x[:, :, 0:5, :]
|
184 |
+
test_target = file_data['test_target']
|
185 |
+
|
186 |
+
mean = file_data['mean'][:, :, 0:5, :] # (1, 1, 3, 1)
|
187 |
+
std = file_data['std'][:, :, 0:5, :] # (1, 1, 3, 1)
|
188 |
+
|
189 |
+
# ------- train_loader -------
|
190 |
+
train_x_tensor = torch.from_numpy(train_x).type(torch.FloatTensor).to(DEVICE) # (B, N, F, T)
|
191 |
+
train_target_tensor = torch.from_numpy(train_target).type(torch.FloatTensor).to(DEVICE) # (B, N, T)
|
192 |
+
|
193 |
+
train_dataset = torch.utils.data.TensorDataset(train_x_tensor, train_target_tensor)
|
194 |
+
|
195 |
+
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=shuffle)
|
196 |
+
|
197 |
+
# ------- val_loader -------
|
198 |
+
val_x_tensor = torch.from_numpy(val_x).type(torch.FloatTensor).to(DEVICE) # (B, N, F, T)
|
199 |
+
val_target_tensor = torch.from_numpy(val_target).type(torch.FloatTensor).to(DEVICE) # (B, N, T)
|
200 |
+
|
201 |
+
val_dataset = torch.utils.data.TensorDataset(val_x_tensor, val_target_tensor)
|
202 |
+
|
203 |
+
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
|
204 |
+
|
205 |
+
# ------- test_loader -------
|
206 |
+
test_x_tensor = torch.from_numpy(test_x).type(torch.FloatTensor).to(DEVICE) # (B, N, F, T)
|
207 |
+
test_target_tensor = torch.from_numpy(test_target).type(torch.FloatTensor).to(DEVICE) # (B, N, T)
|
208 |
+
|
209 |
+
test_dataset = torch.utils.data.TensorDataset(test_x_tensor, test_target_tensor)
|
210 |
+
|
211 |
+
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
|
212 |
+
|
213 |
+
# print
|
214 |
+
print('train:', train_x_tensor.size(), train_target_tensor.size())
|
215 |
+
print('val:', val_x_tensor.size(), val_target_tensor.size())
|
216 |
+
print('test:', test_x_tensor.size(), test_target_tensor.size())
|
217 |
+
|
218 |
+
return train_loader, train_target_tensor, val_loader, val_target_tensor, test_loader, test_target_tensor, mean, std
|
219 |
+
|
220 |
+
|
221 |
+
def compute_val_loss_mstgcn(net, val_loader, criterion, masked_flag,missing_value,sw, epoch, limit=None):
|
222 |
+
'''
|
223 |
+
for rnn, compute mean loss on validation set
|
224 |
+
:param net: model
|
225 |
+
:param val_loader: torch.utils.data.utils.DataLoader
|
226 |
+
:param criterion: torch.nn.MSELoss
|
227 |
+
:param sw: tensorboardX.SummaryWriter
|
228 |
+
:param global_step: int, current global_step
|
229 |
+
:param limit: int,
|
230 |
+
:return: val_loss
|
231 |
+
'''
|
232 |
+
|
233 |
+
net.train(False) # ensure dropout layers are in evaluation mode
|
234 |
+
|
235 |
+
with torch.no_grad():
|
236 |
+
|
237 |
+
val_loader_length = len(val_loader) # nb of batch
|
238 |
+
|
239 |
+
tmp = [] # 记录了所有batch的loss
|
240 |
+
|
241 |
+
for batch_index, batch_data in enumerate(val_loader):
|
242 |
+
encoder_inputs, labels = batch_data
|
243 |
+
outputs = net(encoder_inputs)
|
244 |
+
if masked_flag:
|
245 |
+
loss = criterion(outputs, labels, missing_value)
|
246 |
+
else:
|
247 |
+
loss = criterion(outputs, labels)
|
248 |
+
|
249 |
+
tmp.append(loss.item())
|
250 |
+
if batch_index % 100 == 0:
|
251 |
+
print('validation batch %s / %s, loss: %.2f' % (batch_index + 1, val_loader_length, loss.item()))
|
252 |
+
if (limit is not None) and batch_index >= limit:
|
253 |
+
break
|
254 |
+
|
255 |
+
validation_loss = sum(tmp) / len(tmp)
|
256 |
+
sw.add_scalar('validation_loss', validation_loss, epoch)
|
257 |
+
return validation_loss
|
258 |
+
|
259 |
+
|
260 |
+
# def evaluate_on_test_mstgcn(net, test_loader, test_target_tensor, sw, epoch, _mean, _std):
|
261 |
+
# '''
|
262 |
+
# for rnn, compute MAE, RMSE, MAPE scores of the prediction for every time step on testing set.
|
263 |
+
#
|
264 |
+
# :param net: model
|
265 |
+
# :param test_loader: torch.utils.data.utils.DataLoader
|
266 |
+
# :param test_target_tensor: torch.tensor (B, N_nodes, T_output, out_feature)=(B, N_nodes, T_output, 1)
|
267 |
+
# :param sw:
|
268 |
+
# :param epoch: int, current epoch
|
269 |
+
# :param _mean: (1, 1, 3(features), 1)
|
270 |
+
# :param _std: (1, 1, 3(features), 1)
|
271 |
+
# '''
|
272 |
+
#
|
273 |
+
# net.train(False) # ensure dropout layers are in test mode
|
274 |
+
#
|
275 |
+
# with torch.no_grad():
|
276 |
+
#
|
277 |
+
# test_loader_length = len(test_loader)
|
278 |
+
#
|
279 |
+
# test_target_tensor = test_target_tensor.cpu().numpy()
|
280 |
+
#
|
281 |
+
# prediction = [] # 存储所有batch的output
|
282 |
+
#
|
283 |
+
# for batch_index, batch_data in enumerate(test_loader):
|
284 |
+
#
|
285 |
+
# encoder_inputs, labels = batch_data
|
286 |
+
#
|
287 |
+
# outputs = net(encoder_inputs)
|
288 |
+
#
|
289 |
+
# prediction.append(outputs.detach().cpu().numpy())
|
290 |
+
#
|
291 |
+
# if batch_index % 100 == 0:
|
292 |
+
# print('predicting testing set batch %s / %s' % (batch_index + 1, test_loader_length))
|
293 |
+
#
|
294 |
+
# prediction = np.concatenate(prediction, 0) # (batch, T', 1)
|
295 |
+
# prediction_length = prediction.shape[2]
|
296 |
+
#
|
297 |
+
# for i in range(prediction_length):
|
298 |
+
# assert test_target_tensor.shape[0] == prediction.shape[0]
|
299 |
+
# print('current epoch: %s, predict %s points' % (epoch, i))
|
300 |
+
# mae = mean_absolute_error(test_target_tensor[:, :, i], prediction[:, :, i])
|
301 |
+
# rmse = mean_squared_error(test_target_tensor[:, :, i], prediction[:, :, i]) ** 0.5
|
302 |
+
# mape = masked_mape_np(test_target_tensor[:, :, i], prediction[:, :, i], 0)
|
303 |
+
# print('MAE: %.2f' % (mae))
|
304 |
+
# print('RMSE: %.2f' % (rmse))
|
305 |
+
# print('MAPE: %.2f' % (mape))
|
306 |
+
# print()
|
307 |
+
# if sw:
|
308 |
+
# sw.add_scalar('MAE_%s_points' % (i), mae, epoch)
|
309 |
+
# sw.add_scalar('RMSE_%s_points' % (i), rmse, epoch)
|
310 |
+
# sw.add_scalar('MAPE_%s_points' % (i), mape, epoch)
|
311 |
+
|
312 |
+
|
313 |
+
def predict_and_save_results_mstgcn(net, data_loader, data_target_tensor, global_step, metric_method,_mean, _std, params_path, type):
|
314 |
+
'''
|
315 |
+
|
316 |
+
:param net: nn.Module
|
317 |
+
:param data_loader: torch.utils.data.utils.DataLoader
|
318 |
+
:param data_target_tensor: tensor
|
319 |
+
:param epoch: int
|
320 |
+
:param _mean: (1, 1, 3, 1)
|
321 |
+
:param _std: (1, 1, 3, 1)
|
322 |
+
:param params_path: the path for saving the results
|
323 |
+
:return:
|
324 |
+
'''
|
325 |
+
net.train(False) # ensure dropout layers are in test mode
|
326 |
+
|
327 |
+
with torch.no_grad():
|
328 |
+
|
329 |
+
data_target_tensor = data_target_tensor.cpu().numpy()
|
330 |
+
|
331 |
+
loader_length = len(data_loader) # nb of batch
|
332 |
+
|
333 |
+
prediction = [] # 存储所有batch的output
|
334 |
+
|
335 |
+
input = [] # 存储所有batch的input
|
336 |
+
|
337 |
+
for batch_index, batch_data in enumerate(data_loader):
|
338 |
+
|
339 |
+
encoder_inputs, labels = batch_data
|
340 |
+
|
341 |
+
input.append(encoder_inputs[:, :, 0:1].cpu().numpy()) # (batch, T', 1)
|
342 |
+
|
343 |
+
outputs = net(encoder_inputs)
|
344 |
+
|
345 |
+
prediction.append(outputs.detach().cpu().numpy())
|
346 |
+
|
347 |
+
if batch_index % 100 == 0:
|
348 |
+
print('predicting data set batch %s / %s' % (batch_index + 1, loader_length))
|
349 |
+
|
350 |
+
input = np.concatenate(input, 0)
|
351 |
+
|
352 |
+
input = re_normalization(input, _mean, _std)
|
353 |
+
|
354 |
+
prediction = np.concatenate(prediction, 0) # (batch, T', 1)
|
355 |
+
|
356 |
+
print('input:', input.shape)
|
357 |
+
print('prediction:', prediction.shape)
|
358 |
+
print('data_target_tensor:', data_target_tensor.shape)
|
359 |
+
output_filename = os.path.join(params_path, 'output_epoch_%s_%s' % (global_step, type))
|
360 |
+
np.savez(output_filename, input=input, prediction=prediction, data_target_tensor=data_target_tensor)
|
361 |
+
|
362 |
+
# 计算误差
|
363 |
+
excel_list = []
|
364 |
+
prediction_length = prediction.shape[2]
|
365 |
+
|
366 |
+
for i in range(prediction_length):
|
367 |
+
assert data_target_tensor.shape[0] == prediction.shape[0]
|
368 |
+
print('current epoch: %s, predict %s points' % (global_step, i))
|
369 |
+
if metric_method == 'mask':
|
370 |
+
mae = masked_mae_test(data_target_tensor[:, :, i], prediction[:, :, i],0.0)
|
371 |
+
rmse = masked_rmse_test(data_target_tensor[:, :, i], prediction[:, :, i],0.0)
|
372 |
+
mape = masked_mape_np(data_target_tensor[:, :, i], prediction[:, :, i], 0)
|
373 |
+
else :
|
374 |
+
mae = mean_absolute_error(data_target_tensor[:, :, i], prediction[:, :, i])
|
375 |
+
rmse = mean_squared_error(data_target_tensor[:, :, i], prediction[:, :, i]) ** 0.5
|
376 |
+
mape = masked_mape_np(data_target_tensor[:, :, i], prediction[:, :, i], 0)
|
377 |
+
print('MAE: %.2f' % (mae))
|
378 |
+
print('RMSE: %.2f' % (rmse))
|
379 |
+
print('MAPE: %.2f' % (mape))
|
380 |
+
excel_list.extend([mae, rmse, mape])
|
381 |
+
|
382 |
+
# print overall results
|
383 |
+
if metric_method == 'mask':
|
384 |
+
mae = masked_mae_test(data_target_tensor.reshape(-1, 1), prediction.reshape(-1, 1), 0.0)
|
385 |
+
rmse = masked_rmse_test(data_target_tensor.reshape(-1, 1), prediction.reshape(-1, 1), 0.0)
|
386 |
+
mape = masked_mape_np(data_target_tensor.reshape(-1, 1), prediction.reshape(-1, 1), 0)
|
387 |
+
else :
|
388 |
+
mae = mean_absolute_error(data_target_tensor.reshape(-1, 1), prediction.reshape(-1, 1))
|
389 |
+
rmse = mean_squared_error(data_target_tensor.reshape(-1, 1), prediction.reshape(-1, 1)) ** 0.5
|
390 |
+
mape = masked_mape_np(data_target_tensor.reshape(-1, 1), prediction.reshape(-1, 1), 0)
|
391 |
+
print('all MAE: %.2f' % (mae))
|
392 |
+
print('all RMSE: %.2f' % (rmse))
|
393 |
+
print('all MAPE: %.2f' % (mape))
|
394 |
+
excel_list.extend([mae, rmse, mape])
|
395 |
+
print(excel_list)
|
396 |
+
|
397 |
+
|