File size: 5,915 Bytes
b33ee2f 8c8ab30 6760541 8c8ab30 67508ed b33ee2f 9d5d2b9 8e3eacd 19d34a7 b5a306a 3325fe0 6760541 dfb8e6c 6289df1 dfb8e6c 6289df1 dfb8e6c b33ee2f 890f548 0a2304d b33ee2f 9d5d2b9 6760541 9d5d2b9 8e3eacd 6760541 8e3eacd 19d34a7 6760541 19d34a7 b5a306a 6760541 b5a306a 3325fe0 6760541 3325fe0 4b0fd8d 6760541 4b0fd8d dfb8e6c 6289df1 dfb8e6c 6289df1 dfb8e6c b33ee2f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 |
import pickle
import datasets
import numpy as np
_DESCRIPTION = """The dataset consists of tuples of (observations, actions, rewards, dones) sampled by agents
interacting with the CityLearn 2022 Phase 1 environment (only first 5 buildings)"""
_BASE_URL = "https://huggingface.co/datasets/TobiTob/CityLearn/resolve/main"
_URLS = {
"random_230": f"{_BASE_URL}/random_230x5x38.pkl",
"f_230": f"{_BASE_URL}/f_230x5x38.pkl",
"f_24": f"{_BASE_URL}/f_24x5x364.pkl",
"fr_24": f"{_BASE_URL}/fr_24x5x364.pkl",
"fn_24": f"{_BASE_URL}/fn_24x5x3649.pkl",
"fn_230": f"{_BASE_URL}/fnn_230x5x380.pkl",
"rb_24": f"{_BASE_URL}/rb_24x5x364.pkl",
"rb_50": f"{_BASE_URL}/rb_50x5x175.pkl",
"rb_108": f"{_BASE_URL}/rb_108x5x81.pkl",
"rb_230": f"{_BASE_URL}/rb_230x5x38.pkl",
"rb_461": f"{_BASE_URL}/rb_461x5x19.pkl",
"rb_973": f"{_BASE_URL}/rb_973x5x9.pkl",
"rb_2189": f"{_BASE_URL}/rb_2189x5x4.pkl",
"rbn_24": f"{_BASE_URL}/rb_24x5x18247.pkl",
}
class DecisionTransformerCityLearnDataset(datasets.GeneratorBasedBuilder):
# You will be able to load one configuration in the following list with
# data = datasets.load_dataset('TobiTob/CityLearn', 'data_name')
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="random_230",
description="Random environment interactions. Sequence length = 230, Buildings = 5, Episodes = 1 ",
),
datasets.BuilderConfig(
name="f_230",
description="Data sampled from an expert LSTM policy. Sequence length = 230, Buildings = 5, Episodes = 1 ",
),
datasets.BuilderConfig(
name="f_24",
description="Data sampled from an expert LSTM policy. Used the old reward function. Sequence length = 24, Buildings = 5, Episodes = 1 ",
),
datasets.BuilderConfig(
name="fr_24",
description="Data sampled from an expert LSTM policy. Used the new reward function. Sequence length = 24, Buildings = 5, Episodes = 1 ",
),
datasets.BuilderConfig(
name="fn_24",
description="Data sampled from an expert LSTM policy, extended with noise. Sequence length = 24, Buildings = 5, Episodes = 10 ",
),
datasets.BuilderConfig(
name="fn_230",
description="Data sampled from an expert LSTM policy, extended with noise. Sequence length = 230, Buildings = 5, Episodes = 10 ",
),
datasets.BuilderConfig(
name="rb_24",
description="Data sampled from a simple rule based policy. Used the new reward function. Sequence length = 24, Buildings = 5, Episodes = 1 ",
),
datasets.BuilderConfig(
name="rb_50",
description="Data sampled from a simple rule based policy. Used the new reward function. Sequence length = 50, Buildings = 5, Episodes = 1 ",
),
datasets.BuilderConfig(
name="rb_108",
description="Data sampled from a simple rule based policy. Used the new reward function. Sequence length = 108, Buildings = 5, Episodes = 1 ",
),
datasets.BuilderConfig(
name="rb_230",
description="Data sampled from a simple rule based policy. Used the new reward function. Sequence length = 230, Buildings = 5, Episodes = 1 ",
),
datasets.BuilderConfig(
name="rb_461",
description="Data sampled from a simple rule based policy. Used the new reward function. Sequence length = 461, Buildings = 5, Episodes = 1 ",
),
datasets.BuilderConfig(
name="rb_973",
description="Data sampled from a simple rule based policy. Used the new reward function. Sequence length = 973, Buildings = 5, Episodes = 1 ",
),
datasets.BuilderConfig(
name="rb_2189",
description="Data sampled from a simple rule based policy. Used the new reward function. Sequence length = 2189, Buildings = 5, Episodes = 1 ",
),
datasets.BuilderConfig(
name="rbn_24",
description="Data sampled from a simple rule based policy. Used the new reward function and changed some interactions with noise. Sequence length = 24, Buildings = 5, Episodes = 50 ",
),
]
def _info(self):
features = datasets.Features(
{
"observations": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))),
"actions": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))),
"rewards": datasets.Sequence(datasets.Value("float32")),
"dones": datasets.Sequence(datasets.Value("bool")),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
)
def _split_generators(self, dl_manager):
urls = _URLS[self.config.name]
data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": data_dir,
"split": "train",
},
)
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, filepath, split):
with open(filepath, "rb") as f:
trajectories = pickle.load(f)
for idx, traj in enumerate(trajectories):
yield idx, {
"observations": traj["observations"],
"actions": traj["actions"],
"rewards": np.expand_dims(traj["rewards"], axis=1),
"dones": np.expand_dims(traj.get("dones", traj.get("terminals")), axis=1),
}
|