File size: 8,957 Bytes
9b93f60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
import os, sys
import torch
import numpy as np

# import the dataset generation functions
from custom_dataset import get_data_and_generate_train_val_test_sets as multivariate_dataset
from custom_dataset_univariate import get_data_and_generate_train_val_test_sets as univariate_dataset

# names of features
feat_names = ['energy consumption (kwh)', '15-min interval of day [0..96]', 'day of week [0..6]', 'temperature (celsius)', 'windspeed (m/s)', 'floor area (ft2)', 'wall area (m2)', 'window area (m2)']

# load raw numpy data
heterogenous_data, homogenous_data = np.load('./IllinoisHeterogenous.npz')['data'], np.load('./IllinoisHomogenous.npz')['data']

# generate train-val-test datasets
# CASE 1: multivariate, with the time indices also normalized. heterogenous dataset
train_1, val_1, test_1, mean_1, std_1 = multivariate_dataset(
    data_array=heterogenous_data, # choose the appropriate file - homogenous or heterogenous
    split_ratios=[0.8,0.1,0.1], # ratios that add up to 1 - the split is made along all buildings' time axis
    dataset_kwargs={
        'num_bldg': heterogenous_data.shape[0],
        'lookback': 512,
        'lookahead': 48,
        'normalize': True,
        'dtype': torch.float32,
        'transformer': False # time indices are not normalized - use in non-Transformer scenarios where index embedding is not needed
    }
)
# CASE 2: multivariate, with the time indices not normalized. heterogenous dataset
train_2, val_2, test_2, mean_2, std_2 = multivariate_dataset(
    data_array=heterogenous_data, # choose the appropriate file - homogenous or heterogenous
    split_ratios=[0.8,0.1,0.1], # ratios that add up to 1 - the split is made along all buildings' time axis
    dataset_kwargs={
        'num_bldg': heterogenous_data.shape[0],
        'lookback': 512,
        'lookahead': 48,
        'normalize': True,
        'dtype': torch.float32,
        'transformer': True # time indices are normalized - use in Transformer scenarios where index is embedded
    }
)
# CASE 3: univariate. heterogenous dataset
train_3, val_3, test_3, mean_3, std_3 = univariate_dataset(
    data_array=heterogenous_data, # choose the appropriate file - homogenous or heterogenous
    split_ratios=[0.8,0.1,0.1], # ratios that add up to 1 - the split is made along all buildings' time axis
    dataset_kwargs={
        'num_bldg': heterogenous_data.shape[0],
        'lookback': 512,
        'lookahead': 48,
        'normalize': True,
        'dtype': torch.float32,
    }
)
# CASE 4: multivariate, with the time indices also normalized. homogenous dataset
train_4, val_4, test_4, mean_4, std_4 = multivariate_dataset(
    data_array=homogenous_data, # choose the appropriate file - homogenous or heterogenous
    split_ratios=[0.8,0.1,0.1], # ratios that add up to 1 - the split is made along all buildings' time axis
    dataset_kwargs={
        'num_bldg': homogenous_data.shape[0],
        'lookback': 512,
        'lookahead': 48,
        'normalize': True,
        'dtype': torch.float32,
        'transformer': False # time indices are not normalized - use in non-Transformer scenarios where index embedding is not needed
    }
)
# CASE 5: multivariate, with the time indices not normalized. homogenous dataset
train_5, val_5, test_5, mean_5, std_5 = multivariate_dataset(
    data_array=homogenous_data, # choose the appropriate file - homogenous or heterogenous
    split_ratios=[0.8,0.1,0.1], # ratios that add up to 1 - the split is made along all buildings' time axis
    dataset_kwargs={
        'num_bldg': homogenous_data.shape[0],
        'lookback': 512,
        'lookahead': 48,
        'normalize': True,
        'dtype': torch.float32,
        'transformer': True # time indices are normalized - use in Transformer scenarios where index is embedded
    }
)
# CASE 6: multivariate. heterogenous dataset
train_6, val_6, test_6, mean_6, std_6 = univariate_dataset(
    data_array=homogenous_data, # choose the appropriate file - homogenous or heterogenous
    split_ratios=[0.8,0.1,0.1], # ratios that add up to 1 - the split is made along all buildings' time axis
    dataset_kwargs={
        'num_bldg': homogenous_data.shape[0],
        'lookback': 512,
        'lookahead': 48,
        'normalize': True,
        'dtype': torch.float32,
    }
)


if __name__ == "__main__":

    # Create dataloaders
    dl_1 = torch.utils.data.DataLoader(train_1, batch_size=32, shuffle=False)
    dl_2 = torch.utils.data.DataLoader(train_2, batch_size=32, shuffle=False)
    dl_3 = torch.utils.data.DataLoader(train_3, batch_size=32, shuffle=False)
    dl_4 = torch.utils.data.DataLoader(train_4, batch_size=32, shuffle=False)
    dl_5 = torch.utils.data.DataLoader(train_5, batch_size=32, shuffle=False)
    dl_6 = torch.utils.data.DataLoader(train_6, batch_size=32, shuffle=False)

    # print out of the shapes of elements in the first dataloader
    for inp, label, future_time in dl_1:
        print("Case 1: Each dataloader item contains input, label, future_time. Here time indices are normalized. Dataset is IL-HET.")
        print(f"Input shape is (including batch size of 32): {inp.shape}.")
        print(f"Label shape is (including batch size of 32): {label.shape}.")
        print(f"Future time shape is (including batch size of 32): {future_time.shape}.\n")
        for m,s,n,i in zip(mean_1.flatten().tolist(),std_1.flatten().tolist(), feat_names, range(1,len(feat_names)+1)):
            print(f"Feature number: {i}, name: {n}, mean: {m}, std: {s}."+("(unnormalized)" if m==0 and s==1 else ""))
        print('----------------\n')
        break

    # print out of the shapes of elements in the second dataloader
    for inp, label, future_time in dl_2:
        print("Case 2: Each dataloader item contains input, label, future_time. Here time indices are not normalized to allow embedding. Dataset is IL-HET.")
        print(f"Input shape is (including batch size of 32): {inp.shape}.")
        print(f"Label shape is (including batch size of 32): {label.shape}.")
        print(f"Future time shape is (including batch size of 32): {future_time.shape}.\n")
        for m,s,n,i in zip(mean_2.flatten().tolist(),std_2.flatten().tolist(), feat_names, range(1,len(feat_names)+1)):
            print(f"Feature number: {i}, name: {n}, mean: {m}, std: {s}."+("(unnormalized)" if m==0 and s==1 else ""))
        print('----------------\n')
        break

    # print out of the shapes of elements in the third dataloader
    for inp, label in dl_3:
        print("Case 3: Each dataloader item contains input, label. Dataset is IL-HET.")
        print(f"Input shape is (including batch size of 32): {inp.shape}.")
        print(f"Label shape is (including batch size of 32): {label.shape}.\n")
        print(f"Feature number: 1, name: {feat_names[0]}, mean: {mean_3.item()}, std: {std_3.item()}.")
        print('----------------\n')
        break

    # print out of the shapes of elements in the first dataloader
    for inp, label, future_time in dl_4:
        print("Case 4: Each dataloader item contains input, label, future_time. Here time indices are normalized. Dataset is IL-HOM.")
        print(f"Input shape is (including batch size of 32): {inp.shape}.")
        print(f"Label shape is (including batch size of 32): {label.shape}.")
        print(f"Future time shape is (including batch size of 32): {future_time.shape}.\n")
        for m,s,n,i in zip(mean_4.flatten().tolist(),std_4.flatten().tolist(), feat_names, range(1,len(feat_names)+1)):
            print(f"Feature number: {i}, name: {n}, mean: {m}, std: {s}."+("(unnormalized)" if m==0 and s==1 else ""))
        print('----------------\n')
        break

    # print out of the shapes of elements in the second dataloader
    for inp, label, future_time in dl_5:
        print("Case 5: Each dataloader item contains input, label, future_time. Here time indices are not normalized to allow embedding. Dataset is IL-HOM.")
        print(f"Input shape is (including batch size of 32): {inp.shape}.")
        print(f"Label shape is (including batch size of 32): {label.shape}.")
        print(f"Future time shape is (including batch size of 32): {future_time.shape}.\n")
        for m,s,n,i in zip(mean_5.flatten().tolist(),std_5.flatten().tolist(), feat_names, range(1,len(feat_names)+1)):
            print(f"Feature number: {i}, name: {n}, mean: {m}, std: {s}."+("(unnormalized)" if m==0 and s==1 else ""))
        print('----------------\n')
        break

    # print out of the shapes of elements in the third dataloader
    for inp, label in dl_6:
        print("Case 6: Each dataloader item contains input, label. Dataset is IL-HOM.")
        print(f"Input shape is (including batch size of 32): {inp.shape}.")
        print(f"Label shape is (including batch size of 32): {label.shape}.")
        print(f"Feature number: 1, name: {feat_names[0]}, mean: {mean_6.item()}, std: {std_6.item()}.")
        print('----------------\n')
        break