File size: 4,583 Bytes
b33ee2f 8c8ab30 67508ed b33ee2f d8c1348 fd3caf5 80e2bad f535b63 b55815e d30e6b1 b5a306a 3325fe0 b33ee2f 890f548 0a2304d b33ee2f d8c1348 f40bd14 f573b09 fd3caf5 f40bd14 fd3caf5 f40bd14 b33ee2f f40bd14 b33ee2f f535b63 b55815e d30e6b1 b5a306a 3325fe0 b33ee2f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 |
import pickle
import datasets
import numpy as np
_DESCRIPTION = """The dataset consists of tuples of (observations, actions, rewards, dones) sampled by agents
interacting with the CityLearn 2022 Phase 1 environment"""
_BASE_URL = "https://huggingface.co/datasets/TobiTob/CityLearn/resolve/main"
_URLS = {
"s_test": f"{_BASE_URL}/s_test.pkl",
"s_week": f"{_BASE_URL}/s_week.pkl",
"s_month": f"{_BASE_URL}/s_month.pkl",
"s_random": f"{_BASE_URL}/s_random.pkl",
"s_random2": f"{_BASE_URL}/s_random2.pkl",
"s_random3": f"{_BASE_URL}/s_random3.pkl",
"s_random4": f"{_BASE_URL}/s_random4.pkl",
"f_50": f"{_BASE_URL}/f_50x5x1750.pkl",
"fr_24": f"{_BASE_URL}/fr_24x5x364.pkl",
"fn_24": f"{_BASE_URL}/fn_24x5x3649.pkl",
}
class DecisionTransformerCityLearnDataset(datasets.GeneratorBasedBuilder):
# You will be able to load one configuration in the following list with
# data = datasets.load_dataset('TobiTob/CityLearn', 'data_name')
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="s_test",
description="Small dataset sampled from an expert policy in CityLearn environment. Data size 10x8",
),
datasets.BuilderConfig(
name="s_week",
description="Data sampled from an expert policy in CityLearn environment. Data size 260x168",
),
datasets.BuilderConfig(
name="s_month",
description="Data sampled from an expert policy in CityLearn environment. Data size 60x720",
),
datasets.BuilderConfig(
name="s_random",
description="Random environment interactions in CityLearn environment. Data size 950x461",
),
datasets.BuilderConfig(
name="s_random2",
description="Random environment interactions in CityLearn environment. Data size 43795x10",
),
datasets.BuilderConfig(
name="s_random3",
description="Random environment interactions in CityLearn environment. Data size 23050x19",
),
datasets.BuilderConfig(
name="s_random4",
description="Random environment interactions in CityLearn environment. Data size 437950x1",
),
datasets.BuilderConfig(
name="f_50",
description="Data sampled from an expert policy in CityLearn environment. Sequence length = 50, Buildings = 5, Episodes = 10 ",
),
datasets.BuilderConfig(
name="fr_24",
description="Data sampled from an expert policy in CityLearn environment. Used the new reward function. Sequence length = 24, Buildings = 5, Episodes = 1 ",
),
datasets.BuilderConfig(
name="fn_24",
description="Data sampled from an expert policy in CityLearn environment. Used the new reward function and changed some interactions with noise. Sequence length = 24, Buildings = 5, Episodes = 10 ",
),
]
def _info(self):
features = datasets.Features(
{
"observations": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))),
"actions": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))),
"rewards": datasets.Sequence(datasets.Value("float32")),
"dones": datasets.Sequence(datasets.Value("bool")),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
)
def _split_generators(self, dl_manager):
urls = _URLS[self.config.name]
data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": data_dir,
"split": "train",
},
)
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, filepath, split):
with open(filepath, "rb") as f:
trajectories = pickle.load(f)
for idx, traj in enumerate(trajectories):
yield idx, {
"observations": traj["observations"],
"actions": traj["actions"],
"rewards": np.expand_dims(traj["rewards"], axis=1),
"dones": np.expand_dims(traj.get("dones", traj.get("terminals")), axis=1),
}
|