TobiTob commited on
Commit
743e3bb
1 Parent(s): ea37106

Delete evaluate_transformer.py

Browse files
Files changed (1) hide show
  1. evaluate_transformer.py +0 -217
evaluate_transformer.py DELETED
@@ -1,217 +0,0 @@
1
- import numpy as np
2
- import time
3
- import torch
4
- from MyDecisionTransformer import MyDecisionTransformer
5
- from citylearn.citylearn import CityLearnEnv
6
-
7
- """
8
- This file is used to evaluate a decision transformer loaded form https://huggingface.co/TobiTob/model_name
9
- """
10
-
11
-
12
- class Constants:
13
- """Environment Constants"""
14
- episodes = 1 # amount of environment resets
15
- state_dim = 28 # size of state space
16
- action_dim = 1 # size of action space
17
- schema_path = './data/citylearn_challenge_2022_phase_1/schema.json'
18
-
19
- """Model Constants"""
20
- load_model = "TobiTob/decision_transformer_2"
21
- force_download = False
22
- device = "cpu"
23
- TARGET_RETURN = -2500 # vllt Vector aus 5 Werten
24
- # mean and std computed from training dataset these are available in the model card for each model.
25
-
26
- state_mean = np.array(
27
- [6.525973284621532, 3.9928073981048064, 12.498801233017467, 16.836990550577212, 16.837287388159297,
28
- 16.83684213167729, 16.837161803003287, 73.00388172165772, 73.00331088023746, 73.00445256307798,
29
- 73.00331088023746, 208.30597100125584, 208.30597100125584, 208.20287704075807, 208.30597100125584,
30
- 201.25448110514898, 201.25448110514898, 201.16189062678387, 201.25448110514898, 0.15652765849893777,
31
- 1.0663012570140091, 0.6994348432433195, 0.5023924181838172, 0.49339119658209996, 0.2731373418679261,
32
- 0.2731373418679261, 0.2731373418679261, 0.2731373418679261])
33
- state_std = np.array(
34
- [3.448045414453991, 2.0032677368929734, 6.921673394725967, 3.564552828057008, 3.5647828974724476,
35
- 3.5643565817901974, 3.564711987899257, 16.480221141108398, 16.480030755727572, 16.480238315742053,
36
- 16.480030755727565, 292.79094956097464, 292.79094956097464, 292.70528837855596, 292.79094956097543,
37
- 296.18549714910006, 296.18549714910023, 296.1216266457902, 296.18549714910006, 0.035369600587780235,
38
- 0.8889958578862672, 1.0171468928300462, 0.40202104980478576, 2.6674362928093682, 0.11780233435944305,
39
- 0.11780233435944333, 0.11780233435944351, 0.11780233435944402])
40
-
41
-
42
- def preprocess_states(state_list_of_lists, amount_buildings):
43
- for bi in range(amount_buildings):
44
- for si in range(Constants.state_dim):
45
- state_list_of_lists[bi][si] = (state_list_of_lists[bi][si] - Constants.state_mean[si]) / Constants.state_std[si]
46
-
47
- return state_list_of_lists
48
-
49
-
50
- def evaluate():
51
- print("========================= Start Evaluation ========================")
52
- print("==> Model:", Constants.load_model)
53
- print()
54
-
55
- env = CityLearnEnv(schema=Constants.schema_path)
56
-
57
- agent = MyDecisionTransformer(load_from=Constants.load_model, force_download=Constants.force_download,
58
- device=Constants.device)
59
-
60
- context_length = agent.model.config.max_length
61
- amount_buildings = len(env.buildings)
62
-
63
- scale = 1000.0 # normalization for rewards/returns
64
- target_return = Constants.TARGET_RETURN / scale
65
-
66
- print("Target Return:", Constants.TARGET_RETURN)
67
- print("Context Length:", context_length)
68
-
69
- # Initialize Tensors
70
- episode_return = np.zeros(amount_buildings)
71
- state_list_of_lists = env.reset()
72
- state_list_of_lists = preprocess_states(state_list_of_lists, amount_buildings)
73
-
74
- state_list_of_tensors = []
75
- target_return_list_of_tensors = []
76
- action_list_of_tensors = []
77
- reward_list_of_tensors = []
78
-
79
- for bi in range(amount_buildings):
80
- state_bi = torch.from_numpy(np.array(state_list_of_lists[bi])).reshape(1, Constants.state_dim).to(
81
- device=Constants.device,
82
- dtype=torch.float32)
83
- target_return_bi = torch.tensor(target_return, device=Constants.device, dtype=torch.float32).reshape(1, 1)
84
- action_bi = torch.zeros((0, Constants.action_dim), device=Constants.device, dtype=torch.float32)
85
- reward_bi = torch.zeros(0, device=Constants.device, dtype=torch.float32)
86
-
87
- state_list_of_tensors.append(state_bi)
88
- target_return_list_of_tensors.append(target_return_bi)
89
- action_list_of_tensors.append(action_bi)
90
- reward_list_of_tensors.append(reward_bi)
91
-
92
- timesteps = torch.tensor(0, device=Constants.device, dtype=torch.long).reshape(1, 1)
93
- # print(state_list_of_tensors) Liste mit 5 Tensoren, jeder Tensor enthält einen State s der Länge 28
94
- # print(action_list_of_tensors) Liste mit 5 leeren Tensoren mit size (0,1)
95
- # print(reward_list_of_tensors) Liste mit 5 leeren Tensoren ohne size
96
- # print(target_return_list_of_tensors) Liste mit 5 leeren Tensoren, jeder Tensor enthält den target_return / scale
97
- # print(timesteps) enthält einen Tensor mit 0: tensor([[0]])
98
-
99
- episodes_completed = 0
100
- num_steps = 0
101
- t = 0
102
- agent_time_elapsed = 0
103
- episode_metrics = []
104
-
105
- while True:
106
-
107
- next_actions = []
108
- for bi in range(amount_buildings):
109
- action_list_of_tensors[bi] = torch.cat(
110
- [action_list_of_tensors[bi], torch.zeros((1, Constants.action_dim), device=Constants.device)], dim=0)
111
- reward_list_of_tensors[bi] = torch.cat(
112
- [reward_list_of_tensors[bi], torch.zeros(1, device=Constants.device)])
113
-
114
- # get actions for all buildings
115
- step_start = time.perf_counter()
116
- action_bi = agent.get_action(
117
- state_list_of_tensors[bi],
118
- action_list_of_tensors[bi],
119
- reward_list_of_tensors[bi],
120
- target_return_list_of_tensors[bi],
121
- timesteps,
122
- )
123
- agent_time_elapsed += time.perf_counter() - step_start
124
-
125
- action_list_of_tensors[bi][-1] = action_bi
126
- action_bi = action_bi.detach().cpu().numpy()
127
- next_actions.append(action_bi)
128
-
129
- # Interaction with the environment
130
- state_list_of_lists, reward_list_of_lists, done, _ = env.step(next_actions)
131
- state_list_of_lists = preprocess_states(state_list_of_lists, amount_buildings)
132
-
133
- if done:
134
- episodes_completed += 1
135
- metrics_t = env.evaluate()
136
- metrics = {"price_cost": metrics_t[0], "emmision_cost": metrics_t[1], "grid_cost": metrics_t[2]}
137
- if np.any(np.isnan(metrics_t)):
138
- raise ValueError("Episode metrics are nan, please contant organizers")
139
- episode_metrics.append(metrics)
140
- print(f"Episode complete: {episodes_completed} | Latest episode metrics: {metrics}", )
141
- print("Episode Return:", episode_return)
142
-
143
- # new Initialization and env Reset
144
- t = 0
145
- episode_return = np.zeros(amount_buildings)
146
- state_list_of_lists = env.reset()
147
- state_list_of_lists = preprocess_states(state_list_of_lists, amount_buildings)
148
-
149
- state_list_of_tensors = []
150
- target_return_list_of_tensors = []
151
- action_list_of_tensors = []
152
- reward_list_of_tensors = []
153
-
154
- for bi in range(amount_buildings):
155
- state_bi = torch.from_numpy(np.array(state_list_of_lists[bi])).reshape(1, Constants.state_dim).to(
156
- device=Constants.device, dtype=torch.float32)
157
- target_return_bi = torch.tensor(target_return, device=Constants.device, dtype=torch.float32).reshape(1,
158
- 1)
159
- action_bi = torch.zeros((0, Constants.action_dim), device=Constants.device, dtype=torch.float32)
160
- reward_bi = torch.zeros(0, device=Constants.device, dtype=torch.float32)
161
-
162
- state_list_of_tensors.append(state_bi)
163
- target_return_list_of_tensors.append(target_return_bi)
164
- action_list_of_tensors.append(action_bi)
165
- reward_list_of_tensors.append(reward_bi)
166
-
167
- timesteps = torch.tensor(0, device=Constants.device, dtype=torch.long).reshape(1, 1)
168
-
169
- else:
170
- # Process data for next step
171
- for bi in range(amount_buildings):
172
- cur_state = torch.from_numpy(np.array(state_list_of_lists[bi])).to(device=Constants.device).reshape(1,
173
- Constants.state_dim)
174
- state_list_of_tensors[bi] = torch.cat([state_list_of_tensors[bi], cur_state], dim=0)
175
- reward_list_of_tensors[bi][-1] = reward_list_of_lists[bi]
176
-
177
- pred_return = target_return_list_of_tensors[bi][0, -1] - (reward_list_of_lists[bi] / scale)
178
- target_return_list_of_tensors[bi] = torch.cat(
179
- [target_return_list_of_tensors[bi], pred_return.reshape(1, 1)], dim=1)
180
-
181
- episode_return[bi] += reward_list_of_lists[bi]
182
-
183
- timesteps = torch.cat([timesteps, torch.ones((1, 1), device=Constants.device, dtype=torch.long) * (t + 1)],
184
- dim=1)
185
-
186
- if timesteps.size(dim=1) > context_length:
187
- # Store only the last values according to context_length
188
- timesteps = timesteps[:, -context_length:]
189
- for bi in range(amount_buildings):
190
- state_list_of_tensors[bi] = state_list_of_tensors[bi][-context_length:]
191
- action_list_of_tensors[bi] = action_list_of_tensors[bi][-context_length:]
192
- reward_list_of_tensors[bi] = reward_list_of_tensors[bi][-context_length:]
193
- target_return_list_of_tensors[bi] = target_return_list_of_tensors[bi][:, -context_length:]
194
-
195
- num_steps += 1
196
- t += 1
197
- if num_steps % 100 == 0:
198
- print(f"Num Steps: {num_steps}, Num episodes: {episodes_completed}")
199
-
200
- if episodes_completed >= Constants.episodes:
201
- break
202
-
203
- print("========================= Evaluation Done ========================")
204
- print("Total number of steps:", num_steps)
205
- if len(episode_metrics) > 0:
206
- price_cost = np.mean([e['price_cost'] for e in episode_metrics])
207
- emission_cost = np.mean([e['emmision_cost'] for e in episode_metrics])
208
- grid_cost = np.mean([e['grid_cost'] for e in episode_metrics])
209
- print("Average Price Cost:", price_cost)
210
- print("Average Emission Cost:", emission_cost)
211
- print("Average Grid Cost:", grid_cost)
212
- print("==>", (price_cost+emission_cost+grid_cost)/3)
213
- print(f"Total time taken by agent: {agent_time_elapsed}s")
214
-
215
-
216
- if __name__ == '__main__':
217
- evaluate()