Illinois_load_datasets / example_dataset.py
Shourya Bose
add example
9b93f60
import os, sys
import torch
import numpy as np
# import the dataset generation functions
from custom_dataset import get_data_and_generate_train_val_test_sets as multivariate_dataset
from custom_dataset_univariate import get_data_and_generate_train_val_test_sets as univariate_dataset
# names of features
feat_names = ['energy consumption (kwh)', '15-min interval of day [0..96]', 'day of week [0..6]', 'temperature (celsius)', 'windspeed (m/s)', 'floor area (ft2)', 'wall area (m2)', 'window area (m2)']
# load raw numpy data
heterogenous_data, homogenous_data = np.load('./IllinoisHeterogenous.npz')['data'], np.load('./IllinoisHomogenous.npz')['data']
# generate train-val-test datasets
# CASE 1: multivariate, with the time indices also normalized. heterogenous dataset
train_1, val_1, test_1, mean_1, std_1 = multivariate_dataset(
data_array=heterogenous_data, # choose the appropriate file - homogenous or heterogenous
split_ratios=[0.8,0.1,0.1], # ratios that add up to 1 - the split is made along all buildings' time axis
dataset_kwargs={
'num_bldg': heterogenous_data.shape[0],
'lookback': 512,
'lookahead': 48,
'normalize': True,
'dtype': torch.float32,
'transformer': False # time indices are not normalized - use in non-Transformer scenarios where index embedding is not needed
}
)
# CASE 2: multivariate, with the time indices not normalized. heterogenous dataset
train_2, val_2, test_2, mean_2, std_2 = multivariate_dataset(
data_array=heterogenous_data, # choose the appropriate file - homogenous or heterogenous
split_ratios=[0.8,0.1,0.1], # ratios that add up to 1 - the split is made along all buildings' time axis
dataset_kwargs={
'num_bldg': heterogenous_data.shape[0],
'lookback': 512,
'lookahead': 48,
'normalize': True,
'dtype': torch.float32,
'transformer': True # time indices are normalized - use in Transformer scenarios where index is embedded
}
)
# CASE 3: univariate. heterogenous dataset
train_3, val_3, test_3, mean_3, std_3 = univariate_dataset(
data_array=heterogenous_data, # choose the appropriate file - homogenous or heterogenous
split_ratios=[0.8,0.1,0.1], # ratios that add up to 1 - the split is made along all buildings' time axis
dataset_kwargs={
'num_bldg': heterogenous_data.shape[0],
'lookback': 512,
'lookahead': 48,
'normalize': True,
'dtype': torch.float32,
}
)
# CASE 4: multivariate, with the time indices also normalized. homogenous dataset
train_4, val_4, test_4, mean_4, std_4 = multivariate_dataset(
data_array=homogenous_data, # choose the appropriate file - homogenous or heterogenous
split_ratios=[0.8,0.1,0.1], # ratios that add up to 1 - the split is made along all buildings' time axis
dataset_kwargs={
'num_bldg': homogenous_data.shape[0],
'lookback': 512,
'lookahead': 48,
'normalize': True,
'dtype': torch.float32,
'transformer': False # time indices are not normalized - use in non-Transformer scenarios where index embedding is not needed
}
)
# CASE 5: multivariate, with the time indices not normalized. homogenous dataset
train_5, val_5, test_5, mean_5, std_5 = multivariate_dataset(
data_array=homogenous_data, # choose the appropriate file - homogenous or heterogenous
split_ratios=[0.8,0.1,0.1], # ratios that add up to 1 - the split is made along all buildings' time axis
dataset_kwargs={
'num_bldg': homogenous_data.shape[0],
'lookback': 512,
'lookahead': 48,
'normalize': True,
'dtype': torch.float32,
'transformer': True # time indices are normalized - use in Transformer scenarios where index is embedded
}
)
# CASE 6: multivariate. heterogenous dataset
train_6, val_6, test_6, mean_6, std_6 = univariate_dataset(
data_array=homogenous_data, # choose the appropriate file - homogenous or heterogenous
split_ratios=[0.8,0.1,0.1], # ratios that add up to 1 - the split is made along all buildings' time axis
dataset_kwargs={
'num_bldg': homogenous_data.shape[0],
'lookback': 512,
'lookahead': 48,
'normalize': True,
'dtype': torch.float32,
}
)
if __name__ == "__main__":
# Create dataloaders
dl_1 = torch.utils.data.DataLoader(train_1, batch_size=32, shuffle=False)
dl_2 = torch.utils.data.DataLoader(train_2, batch_size=32, shuffle=False)
dl_3 = torch.utils.data.DataLoader(train_3, batch_size=32, shuffle=False)
dl_4 = torch.utils.data.DataLoader(train_4, batch_size=32, shuffle=False)
dl_5 = torch.utils.data.DataLoader(train_5, batch_size=32, shuffle=False)
dl_6 = torch.utils.data.DataLoader(train_6, batch_size=32, shuffle=False)
# print out of the shapes of elements in the first dataloader
for inp, label, future_time in dl_1:
print("Case 1: Each dataloader item contains input, label, future_time. Here time indices are normalized. Dataset is IL-HET.")
print(f"Input shape is (including batch size of 32): {inp.shape}.")
print(f"Label shape is (including batch size of 32): {label.shape}.")
print(f"Future time shape is (including batch size of 32): {future_time.shape}.\n")
for m,s,n,i in zip(mean_1.flatten().tolist(),std_1.flatten().tolist(), feat_names, range(1,len(feat_names)+1)):
print(f"Feature number: {i}, name: {n}, mean: {m}, std: {s}."+("(unnormalized)" if m==0 and s==1 else ""))
print('----------------\n')
break
# print out of the shapes of elements in the second dataloader
for inp, label, future_time in dl_2:
print("Case 2: Each dataloader item contains input, label, future_time. Here time indices are not normalized to allow embedding. Dataset is IL-HET.")
print(f"Input shape is (including batch size of 32): {inp.shape}.")
print(f"Label shape is (including batch size of 32): {label.shape}.")
print(f"Future time shape is (including batch size of 32): {future_time.shape}.\n")
for m,s,n,i in zip(mean_2.flatten().tolist(),std_2.flatten().tolist(), feat_names, range(1,len(feat_names)+1)):
print(f"Feature number: {i}, name: {n}, mean: {m}, std: {s}."+("(unnormalized)" if m==0 and s==1 else ""))
print('----------------\n')
break
# print out of the shapes of elements in the third dataloader
for inp, label in dl_3:
print("Case 3: Each dataloader item contains input, label. Dataset is IL-HET.")
print(f"Input shape is (including batch size of 32): {inp.shape}.")
print(f"Label shape is (including batch size of 32): {label.shape}.\n")
print(f"Feature number: 1, name: {feat_names[0]}, mean: {mean_3.item()}, std: {std_3.item()}.")
print('----------------\n')
break
# print out of the shapes of elements in the first dataloader
for inp, label, future_time in dl_4:
print("Case 4: Each dataloader item contains input, label, future_time. Here time indices are normalized. Dataset is IL-HOM.")
print(f"Input shape is (including batch size of 32): {inp.shape}.")
print(f"Label shape is (including batch size of 32): {label.shape}.")
print(f"Future time shape is (including batch size of 32): {future_time.shape}.\n")
for m,s,n,i in zip(mean_4.flatten().tolist(),std_4.flatten().tolist(), feat_names, range(1,len(feat_names)+1)):
print(f"Feature number: {i}, name: {n}, mean: {m}, std: {s}."+("(unnormalized)" if m==0 and s==1 else ""))
print('----------------\n')
break
# print out of the shapes of elements in the second dataloader
for inp, label, future_time in dl_5:
print("Case 5: Each dataloader item contains input, label, future_time. Here time indices are not normalized to allow embedding. Dataset is IL-HOM.")
print(f"Input shape is (including batch size of 32): {inp.shape}.")
print(f"Label shape is (including batch size of 32): {label.shape}.")
print(f"Future time shape is (including batch size of 32): {future_time.shape}.\n")
for m,s,n,i in zip(mean_5.flatten().tolist(),std_5.flatten().tolist(), feat_names, range(1,len(feat_names)+1)):
print(f"Feature number: {i}, name: {n}, mean: {m}, std: {s}."+("(unnormalized)" if m==0 and s==1 else ""))
print('----------------\n')
break
# print out of the shapes of elements in the third dataloader
for inp, label in dl_6:
print("Case 6: Each dataloader item contains input, label. Dataset is IL-HOM.")
print(f"Input shape is (including batch size of 32): {inp.shape}.")
print(f"Label shape is (including batch size of 32): {label.shape}.")
print(f"Feature number: 1, name: {feat_names[0]}, mean: {mean_6.item()}, std: {std_6.item()}.")
print('----------------\n')
break