ppo-SnowballTarget / configuration.yaml
neatbullshit's picture
First training of SnowballTarget
6175d24
default_settings: null
behaviors:
SnowballTarget:
trainer_type: ppo
hyperparameters:
batch_size: 128
buffer_size: 2048
learning_rate: 0.0003
beta: 0.005
epsilon: 0.2
lambd: 0.95
num_epoch: 3
learning_rate_schedule: linear
beta_schedule: linear
epsilon_schedule: linear
network_settings:
normalize: false
hidden_units: 256
num_layers: 2
vis_encode_type: simple
memory: null
goal_conditioning_type: hyper
deterministic: false
reward_signals:
extrinsic:
gamma: 0.99
strength: 0.99
network_settings:
normalize: false
hidden_units: 128
num_layers: 2
vis_encode_type: simple
memory: null
goal_conditioning_type: hyper
deterministic: false
init_path: null
keep_checkpoints: 10
checkpoint_interval: 50000
max_steps: 100000
time_horizon: 64
summary_freq: 10000
threaded: true
self_play: null
behavioral_cloning: null
env_settings:
env_path: ./training-envs-executables/linux/SnowballTarget/SnowballTarget
env_args: null
base_port: 5005
num_envs: 1
num_areas: 1
seed: -1
max_lifetime_restarts: 10
restarts_rate_limit_n: 1
restarts_rate_limit_period_s: 60
engine_settings:
width: 84
height: 84
quality_level: 5
time_scale: 20
target_frame_rate: -1
capture_frame_rate: 60
no_graphics: true
environment_parameters: null
checkpoint_settings:
run_id: SnowballTarget1
initialize_from: null
load_model: false
resume: false
force: false
train_model: false
inference: false
results_dir: results
torch_settings:
device: null
debug: false