emrumo's picture
First Push
28485df verified
raw
history blame
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8018127083778381,
"min": 0.8018127083778381,
"max": 2.8752317428588867,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7655.7080078125,
"min": 7655.7080078125,
"max": 29476.875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.912261962890625,
"min": 0.20996831357479095,
"max": 12.912261962890625,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2517.89111328125,
"min": 40.73385238647461,
"max": 2621.166015625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06110212280573786,
"min": 0.06110212280573786,
"max": 0.0767654090756753,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.24440849122295144,
"min": 0.24440849122295144,
"max": 0.3736472436359298,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20481103087556274,
"min": 0.08439051947675133,
"max": 0.2766444039695403,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.819244123502251,
"min": 0.3375620779070053,
"max": 1.3832220198477017,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.106097298000005e-06,
"min": 8.106097298000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.242438919200002e-05,
"min": 3.242438919200002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10270200000000002,
"min": 0.10270200000000002,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41080800000000006,
"min": 0.41080800000000006,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00014482980000000007,
"min": 0.00014482980000000007,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005793192000000003,
"min": 0.0005793192000000003,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.363636363636363,
"min": 2.3863636363636362,
"max": 25.545454545454547,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1116.0,
"min": 105.0,
"max": 1405.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.363636363636363,
"min": 2.3863636363636362,
"max": 25.545454545454547,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1116.0,
"min": 105.0,
"max": 1405.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1718885042",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1718885520"
},
"total": 477.68522344300004,
"count": 1,
"self": 0.44284610100010013,
"children": {
"run_training.setup": {
"total": 0.058045656999979656,
"count": 1,
"self": 0.058045656999979656
},
"TrainerController.start_learning": {
"total": 477.18433168499996,
"count": 1,
"self": 0.5995798739963902,
"children": {
"TrainerController._reset_env": {
"total": 2.9844894799999793,
"count": 1,
"self": 2.9844894799999793
},
"TrainerController.advance": {
"total": 473.50162696400366,
"count": 18201,
"self": 0.28672123501473834,
"children": {
"env_step": {
"total": 473.2149057289889,
"count": 18201,
"self": 307.27699667399315,
"children": {
"SubprocessEnvManager._take_step": {
"total": 165.6441674020001,
"count": 18201,
"self": 1.5570199299951355,
"children": {
"TorchPolicy.evaluate": {
"total": 164.08714747200497,
"count": 18201,
"self": 164.08714747200497
}
}
},
"workers": {
"total": 0.29374165299566357,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 475.9074375439899,
"count": 18201,
"is_parallel": true,
"self": 241.28477054398445,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007964906000097471,
"count": 1,
"is_parallel": true,
"self": 0.006195148000074369,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017697580000231028,
"count": 10,
"is_parallel": true,
"self": 0.0017697580000231028
}
}
},
"UnityEnvironment.step": {
"total": 0.0419054110000161,
"count": 1,
"is_parallel": true,
"self": 0.0007316600000422113,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004371290000335648,
"count": 1,
"is_parallel": true,
"self": 0.0004371290000335648
},
"communicator.exchange": {
"total": 0.0387422640000068,
"count": 1,
"is_parallel": true,
"self": 0.0387422640000068
},
"steps_from_proto": {
"total": 0.001994357999933527,
"count": 1,
"is_parallel": true,
"self": 0.000381039999638233,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001613318000295294,
"count": 10,
"is_parallel": true,
"self": 0.001613318000295294
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 234.62266700000544,
"count": 18200,
"is_parallel": true,
"self": 10.747182185038469,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.727377361982917,
"count": 18200,
"is_parallel": true,
"self": 5.727377361982917
},
"communicator.exchange": {
"total": 182.35019312999611,
"count": 18200,
"is_parallel": true,
"self": 182.35019312999611
},
"steps_from_proto": {
"total": 35.797914322987936,
"count": 18200,
"is_parallel": true,
"self": 6.840094086995919,
"children": {
"_process_rank_one_or_two_observation": {
"total": 28.957820235992017,
"count": 182000,
"is_parallel": true,
"self": 28.957820235992017
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00022942499992950616,
"count": 1,
"self": 0.00022942499992950616,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 467.5247330800488,
"count": 699289,
"is_parallel": true,
"self": 15.494626534036229,
"children": {
"process_trajectory": {
"total": 258.39921885701233,
"count": 699289,
"is_parallel": true,
"self": 257.45391789101234,
"children": {
"RLTrainer._checkpoint": {
"total": 0.9453009659999907,
"count": 4,
"is_parallel": true,
"self": 0.9453009659999907
}
}
},
"_update_policy": {
"total": 193.63088768900025,
"count": 90,
"is_parallel": true,
"self": 61.5901347340033,
"children": {
"TorchPPOOptimizer.update": {
"total": 132.04075295499695,
"count": 4581,
"is_parallel": true,
"self": 132.04075295499695
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09840594199999941,
"count": 1,
"self": 0.0010247200000321754,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09738122199996724,
"count": 1,
"self": 0.09738122199996724
}
}
}
}
}
}
}