|
from easydict import EasyDict |
|
|
|
agent_num = 10 |
|
collector_env_num = 8 |
|
evaluator_env_num = 8 |
|
special_global_state = True, |
|
|
|
main_config = dict( |
|
exp_name='smac_MMM_mappo_seed0', |
|
env=dict( |
|
map_name='MMM', |
|
difficulty=7, |
|
reward_only_positive=True, |
|
mirror_opponent=False, |
|
agent_num=agent_num, |
|
collector_env_num=collector_env_num, |
|
evaluator_env_num=evaluator_env_num, |
|
n_evaluator_episode=32, |
|
stop_value=0.99, |
|
death_mask=False, |
|
special_global_state=special_global_state, |
|
manager=dict( |
|
shared_memory=False, |
|
reset_timeout=6000, |
|
), |
|
), |
|
policy=dict( |
|
cuda=True, |
|
multi_agent=True, |
|
action_space='discrete', |
|
model=dict( |
|
|
|
|
|
agent_num=agent_num, |
|
|
|
|
|
|
|
|
|
agent_obs_shape=186, |
|
|
|
global_obs_shape=389, |
|
|
|
|
|
|
|
action_shape=16, |
|
|
|
|
|
action_space='discrete', |
|
), |
|
|
|
learn=dict( |
|
epoch_per_collect=5, |
|
batch_size=320, |
|
learning_rate=5e-4, |
|
|
|
|
|
|
|
|
|
value_weight=0.5, |
|
|
|
entropy_weight=0.01, |
|
|
|
clip_ratio=0.2, |
|
|
|
adv_norm=False, |
|
value_norm=True, |
|
ppo_param_init=True, |
|
grad_clip_type='clip_norm', |
|
grad_clip_value=10, |
|
ignore_done=False, |
|
), |
|
collect=dict(env_num=collector_env_num, n_sample=3200), |
|
eval=dict(env_num=evaluator_env_num, evaluator=dict(eval_freq=50, )), |
|
), |
|
) |
|
main_config = EasyDict(main_config) |
|
create_config = dict( |
|
env=dict( |
|
type='smac', |
|
import_names=['dizoo.smac.envs.smac_env'], |
|
), |
|
env_manager=dict(type='base'), |
|
policy=dict(type='ppo'), |
|
) |
|
create_config = EasyDict(create_config) |
|
|
|
if __name__ == '__main__': |
|
|
|
from ding.entry import serial_pipeline_onpolicy |
|
serial_pipeline_onpolicy((main_config, create_config), seed=0) |
|
|