shahafw's picture
First Push
62dff8f
raw
history blame
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9829342365264893,
"min": 0.9829342365264893,
"max": 2.865917205810547,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9395.8681640625,
"min": 9395.8681640625,
"max": 29381.3828125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.623763084411621,
"min": 0.36927610635757446,
"max": 12.623763084411621,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2461.6337890625,
"min": 71.63956451416016,
"max": 2525.593017578125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07280002598246724,
"min": 0.06108100912971685,
"max": 0.07529431707956188,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.29120010392986895,
"min": 0.25601398065405456,
"max": 0.36138179990880687,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21106607992859447,
"min": 0.12309441370569973,
"max": 0.2818194264874738,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8442643197143779,
"min": 0.49237765482279894,
"max": 1.4090971324373691,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.113636363636363,
"min": 3.25,
"max": 25.29090909090909,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1105.0,
"min": 143.0,
"max": 1391.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.113636363636363,
"min": 3.25,
"max": 25.29090909090909,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1105.0,
"min": 143.0,
"max": 1391.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1686342282",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget11 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1686342783"
},
"total": 500.689434057,
"count": 1,
"self": 0.42702736600000435,
"children": {
"run_training.setup": {
"total": 0.04125100900000689,
"count": 1,
"self": 0.04125100900000689
},
"TrainerController.start_learning": {
"total": 500.221155682,
"count": 1,
"self": 0.5725422419995994,
"children": {
"TrainerController._reset_env": {
"total": 3.908960798999999,
"count": 1,
"self": 3.908960798999999
},
"TrainerController.advance": {
"total": 495.5872944550004,
"count": 18204,
"self": 0.28807763799363784,
"children": {
"env_step": {
"total": 495.29921681700677,
"count": 18204,
"self": 360.16837258400165,
"children": {
"SubprocessEnvManager._take_step": {
"total": 134.84929339200482,
"count": 18204,
"self": 1.8046705460168369,
"children": {
"TorchPolicy.evaluate": {
"total": 133.044622845988,
"count": 18204,
"self": 133.044622845988
}
}
},
"workers": {
"total": 0.28155084100029626,
"count": 18204,
"self": 0.0,
"children": {
"worker_root": {
"total": 498.527873404001,
"count": 18204,
"is_parallel": true,
"self": 236.90754103900332,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0027379279999877326,
"count": 1,
"is_parallel": true,
"self": 0.0007037219999688205,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002034206000018912,
"count": 10,
"is_parallel": true,
"self": 0.002034206000018912
}
}
},
"UnityEnvironment.step": {
"total": 0.049086267000006956,
"count": 1,
"is_parallel": true,
"self": 0.000500081999973645,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003881110000065746,
"count": 1,
"is_parallel": true,
"self": 0.0003881110000065746
},
"communicator.exchange": {
"total": 0.0459134449999965,
"count": 1,
"is_parallel": true,
"self": 0.0459134449999965
},
"steps_from_proto": {
"total": 0.002284629000030236,
"count": 1,
"is_parallel": true,
"self": 0.0005715790001090681,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001713049999921168,
"count": 10,
"is_parallel": true,
"self": 0.001713049999921168
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 261.6203323649977,
"count": 18203,
"is_parallel": true,
"self": 10.724119716005873,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.7101396779913784,
"count": 18203,
"is_parallel": true,
"self": 5.7101396779913784
},
"communicator.exchange": {
"total": 206.99653442599777,
"count": 18203,
"is_parallel": true,
"self": 206.99653442599777
},
"steps_from_proto": {
"total": 38.18953854500268,
"count": 18203,
"is_parallel": true,
"self": 7.022838214011188,
"children": {
"_process_rank_one_or_two_observation": {
"total": 31.16670033099149,
"count": 182030,
"is_parallel": true,
"self": 31.16670033099149
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.224300004007091e-05,
"count": 1,
"self": 9.224300004007091e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 491.62902670200634,
"count": 466500,
"is_parallel": true,
"self": 10.759211416984726,
"children": {
"process_trajectory": {
"total": 268.79954234602116,
"count": 466500,
"is_parallel": true,
"self": 267.48715303802123,
"children": {
"RLTrainer._checkpoint": {
"total": 1.312389307999922,
"count": 4,
"is_parallel": true,
"self": 1.312389307999922
}
}
},
"_update_policy": {
"total": 212.07027293900046,
"count": 90,
"is_parallel": true,
"self": 85.47357697099812,
"children": {
"TorchPPOOptimizer.update": {
"total": 126.59669596800234,
"count": 4587,
"is_parallel": true,
"self": 126.59669596800234
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.15226594299997487,
"count": 1,
"self": 0.0009300809999785997,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15133586199999627,
"count": 1,
"self": 0.15133586199999627
}
}
}
}
}
}
}