walker2d-v3-PPO / policy_config.py
Aron751's picture
Upload policy_config.py with huggingface_hub
8bfbc16
raw
history blame
597 Bytes
exp_config = {
'type': 'ppo',
'on_policy': True,
'cuda': True,
'action_space': 'continuous',
'discount_factor': 0.99,
'gae_lambda': 0.95,
'epoch_per_collect': 10,
'batch_size': 320,
'learning_rate': 0.0003,
'lr_scheduler': None,
'weight_decay': 0,
'value_weight': 0.5,
'entropy_weight': 0.01,
'clip_ratio': 0.2,
'adv_norm': True,
'value_norm': 'baseline',
'ppo_param_init': True,
'grad_norm': 0.5,
'n_sample': 3200,
'unroll_len': 1,
'deterministic_eval': True,
'model': {},
'cfg_type': 'PPOFPolicyDict'
}