File size: 4,542 Bytes
db1e627
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
from ast import Raise
from re import S
import re
import gym

import matplotlib.pyplot as plt

from citylearn.citylearn import CityLearnEnv
import numpy as np
import pandas as pd
import os

from collections import deque
import argparse 
import random
# import logger
import logging
from sys import stdout
from copy import deepcopy


class Constants:
    episodes = 3
    schema_path = '/home/aicrowd/data/citylearn_challenge_2022_phase_1/schema.json'
    variables_to_forecast = ['solar_generation', 'non_shiftable_load', 'electricity_pricing', 'carbon_intensity', "electricity_consumption_crude",
                                     'hour', 'month']

    additional_variable = ['hour', "month"]


# create env from citylearn
env = CityLearnEnv(schema=Constants.schema_path)

def action_space_to_dict(aspace):
    """ Only for box space """
    return { "high": aspace.high,
             "low": aspace.low,
             "shape": aspace.shape,
             "dtype": str(aspace.dtype)
    }

def env_reset(env):
    observations = env.reset()
    action_space = env.action_space
    observation_space = env.observation_space
    building_info = env.get_building_information()
    building_info = list(building_info.values())
    action_space_dicts = [action_space_to_dict(asp) for asp in action_space]
    observation_space_dicts = [action_space_to_dict(osp) for osp in observation_space]
    obs_dict = {"action_space": action_space_dicts,
                "observation_space": observation_space_dicts,
                "building_info": building_info,
                "observation": observations }
    return obs_dict

## env wrapper for stable baselines
class EnvCityGym(gym.Env):
    """
    Env wrapper coming from the gym library.
    """
    def __init__(self, env):
        self.env = env

        # get the number of buildings
        self.num_buildings = len(env.action_space)
        print("num_buildings: ", self.num_buildings)

        self.action_space = gym.spaces.Box(low=np.array([-0.2]), high=np.array([0.2]), dtype=np.float32)

        self.observation_space = gym.spaces.MultiDiscrete(np.array([25, 13]))

    def reset(self):
        obs_dict = env_reset(self.env)
        obs = self.env.reset()

        observation = [o for o in obs]

        return observation

    def step(self, action):
        """
        we apply the same action for all the buildings
        """
        obs, reward, done, info = self.env.step(action)

        observation = [o for o in obs]

        return observation, reward, done, info
        
    def render(self, mode='human'):
        return self.env.render(mode)




def env_run_without_action(actions_all=None):
    """
    This function is used to run the environment without applying any action.
    and return the dataset
    """
    # create env from citylearn
    env = CityLearnEnv(schema=Constants.schema_path)

    # get the number of buildings
    num_buildings = len(env.action_space)
    print("num_buildings: ", num_buildings)

    # create env wrapper
    env = EnvCityGym(env)

    # reset the environment
    obs = env.reset()

    infos = []

    for id_building in range(num_buildings):
        # run the environment
        obs = env.reset()

        for i in range(8759):

            info_tmp = env.env.buildings[id_building].observations.copy()

            if actions_all is not None:

                action = [[actions_all[i + 8759 * b]] for b in range(num_buildings)]

            else:
                # we get the action
                action = np.zeros((5, )) # 5 is the number of buildings

                # reshape action into form like [[0], [0], [0], [0], [0]]
                action = [[a] for a in action]

            #print(action)

            obs, reward, done, info = env.step(action)

            info_tmp['reward'] = reward[id_building]
            info_tmp['building_id'] = id_building
            infos.append(info_tmp)

            if done:
                obs = env.reset()

    # create the data
    data_pd = {}

    for info in infos:
        for i, v in info.items():
            try:
                data_pd[i].append(v)
            except:
                data_pd[i] = [v]

    data = pd.DataFrame(infos)

    return data

if __name__ == "__main__":

    # data generation
    data = env_run_without_action()

    # we only normalize month and hour
    data['hour'] = data['hour']/24
    data['month'] = data['month']/12

    # save the data into the data_histo folder into parquet format
    data.to_parquet("data_histo/data.parquet")