|
import os |
|
from easydict import EasyDict |
|
|
|
module_path = os.path.dirname(__file__) |
|
|
|
collector_env_num = 8 |
|
evaluator_env_num = 8 |
|
expert_replay_buffer_size = int(5e3) |
|
"""agent config""" |
|
lunarlander_r2d3_config = dict( |
|
exp_name='lunarlander_r2d3_ppoexpert_seed0', |
|
env=dict( |
|
|
|
collector_env_num=collector_env_num, |
|
evaluator_env_num=evaluator_env_num, |
|
env_id='LunarLander-v2', |
|
n_evaluator_episode=8, |
|
stop_value=200, |
|
), |
|
policy=dict( |
|
cuda=True, |
|
on_policy=False, |
|
priority=True, |
|
priority_IS_weight=True, |
|
model=dict( |
|
obs_shape=8, |
|
action_shape=4, |
|
encoder_hidden_size_list=[128, 128, 512], |
|
), |
|
discount_factor=0.997, |
|
nstep=5, |
|
burnin_step=2, |
|
|
|
|
|
|
|
learn_unroll_len=40, |
|
learn=dict( |
|
|
|
|
|
|
|
|
|
|
|
value_rescale=True, |
|
update_per_collect=8, |
|
batch_size=64, |
|
learning_rate=0.0005, |
|
target_update_theta=0.001, |
|
|
|
lambda1=1.0, |
|
lambda2=1.0, |
|
lambda3=1e-5, |
|
lambda_one_step_td=1, |
|
margin_function=0.8, |
|
per_train_iter_k=0, |
|
), |
|
collect=dict( |
|
|
|
|
|
|
|
|
|
|
|
|
|
n_sample=32, |
|
traj_len_inf=True, |
|
env_num=collector_env_num, |
|
|
|
|
|
pho=1 / 4., |
|
), |
|
eval=dict(env_num=evaluator_env_num, ), |
|
other=dict( |
|
eps=dict( |
|
type='exp', |
|
start=0.95, |
|
end=0.05, |
|
decay=100000, |
|
), |
|
replay_buffer=dict( |
|
replay_buffer_size=int(1e5), |
|
|
|
alpha=0.6, |
|
|
|
beta=0.4, |
|
) |
|
), |
|
), |
|
) |
|
lunarlander_r2d3_config = EasyDict(lunarlander_r2d3_config) |
|
main_config = lunarlander_r2d3_config |
|
lunarlander_r2d3_create_config = dict( |
|
env=dict( |
|
type='lunarlander', |
|
import_names=['dizoo.box2d.lunarlander.envs.lunarlander_env'], |
|
), |
|
env_manager=dict(type='subprocess'), |
|
policy=dict(type='r2d3'), |
|
) |
|
lunarlander_r2d3_create_config = EasyDict(lunarlander_r2d3_create_config) |
|
create_config = lunarlander_r2d3_create_config |
|
"""export config""" |
|
|
|
expert_lunarlander_r2d3_config = dict( |
|
exp_name='expert_lunarlander_r2d3_ppoexpert_seed0', |
|
env=dict( |
|
|
|
manager=dict(shared_memory=True, reset_inplace=True), |
|
collector_env_num=collector_env_num, |
|
evaluator_env_num=evaluator_env_num, |
|
n_evaluator_episode=5, |
|
stop_value=200, |
|
), |
|
policy=dict( |
|
cuda=True, |
|
on_policy=False, |
|
priority=True, |
|
model=dict( |
|
obs_shape=8, |
|
action_shape=4, |
|
encoder_hidden_size_list=[128, 128, 64], |
|
), |
|
discount_factor=0.997, |
|
burnin_step=2, |
|
nstep=5, |
|
learn=dict(expert_replay_buffer_size=expert_replay_buffer_size, ), |
|
collect=dict( |
|
|
|
|
|
|
|
model_path='model_path_placeholder', |
|
|
|
|
|
unroll_len=42, |
|
env_num=collector_env_num, |
|
), |
|
eval=dict(env_num=evaluator_env_num, ), |
|
other=dict( |
|
replay_buffer=dict( |
|
replay_buffer_size=expert_replay_buffer_size, |
|
|
|
alpha=0.9, |
|
|
|
beta=0.4, |
|
) |
|
), |
|
), |
|
) |
|
expert_lunarlander_r2d3_config = EasyDict(expert_lunarlander_r2d3_config) |
|
expert_main_config = expert_lunarlander_r2d3_config |
|
expert_lunarlander_r2d3_create_config = dict( |
|
env=dict( |
|
type='lunarlander', |
|
import_names=['dizoo.box2d.lunarlander.envs.lunarlander_env'], |
|
), |
|
env_manager=dict(type='subprocess'), |
|
policy=dict(type='offppo_collect_traj'), |
|
) |
|
expert_lunarlander_r2d3_create_config = EasyDict(expert_lunarlander_r2d3_create_config) |
|
expert_create_config = expert_lunarlander_r2d3_create_config |
|
|
|
if __name__ == "__main__": |
|
from ding.entry import serial_pipeline_r2d3 |
|
serial_pipeline_r2d3([main_config, create_config], [expert_main_config, expert_create_config], seed=0) |
|
|