Vladislav-HuggingFace's picture
First Push
9bb90db
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.6226257681846619,
"min": 0.620304524898529,
"max": 2.8372557163238525,
"count": 21
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6396.857421875,
"min": 5977.5458984375,
"max": 29056.3359375,
"count": 21
},
"SnowballTarget.Step.mean": {
"value": 209960.0,
"min": 9952.0,
"max": 209960.0,
"count": 21
},
"SnowballTarget.Step.sum": {
"value": 209960.0,
"min": 9952.0,
"max": 209960.0,
"count": 21
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.263633728027344,
"min": 0.39488837122917175,
"max": 13.263633728027344,
"count": 21
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2705.78125,
"min": 76.60834503173828,
"max": 2705.78125,
"count": 21
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 21
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 21
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06997738431490885,
"min": 0.059487532345833725,
"max": 0.07472340965554503,
"count": 21
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.34988692157454426,
"min": 0.2441071335072361,
"max": 0.37361704827772513,
"count": 21
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17841658849342196,
"min": 0.12847362314898741,
"max": 0.27699856014520513,
"count": 21
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8920829424671097,
"min": 0.5138944925959497,
"max": 1.3512257483660006,
"count": 21
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.84009738666666e-06,
"min": 7.84009738666666e-06,
"max": 0.0002922685740057142,
"count": 21
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.9200486933333306e-05,
"min": 3.9200486933333306e-05,
"max": 0.001390628607885714,
"count": 21
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10522666666666666,
"min": 0.10522666666666666,
"max": 0.2948457142857142,
"count": 21
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5261333333333333,
"min": 0.4586209523809524,
"max": 1.427085714285714,
"count": 21
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001404053333333333,
"min": 0.0001404053333333333,
"max": 0.004871400571428571,
"count": 21
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0007020266666666664,
"min": 0.0007020266666666664,
"max": 0.023180788571428572,
"count": 21
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.036363636363635,
"min": 3.8636363636363638,
"max": 26.2,
"count": 21
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1432.0,
"min": 170.0,
"max": 1441.0,
"count": 21
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.036363636363635,
"min": 3.8636363636363638,
"max": 26.2,
"count": 21
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1432.0,
"min": 170.0,
"max": 1441.0,
"count": 21
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 21
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 21
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690129996",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/ppo/SnowBallTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget2 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690130510"
},
"total": 514.1412236760002,
"count": 1,
"self": 0.4237673900001937,
"children": {
"run_training.setup": {
"total": 0.031407599999965896,
"count": 1,
"self": 0.031407599999965896
},
"TrainerController.start_learning": {
"total": 513.686048686,
"count": 1,
"self": 0.6228016099885281,
"children": {
"TrainerController._reset_env": {
"total": 3.819593683999983,
"count": 1,
"self": 3.819593683999983
},
"TrainerController.advance": {
"total": 509.1037372320118,
"count": 19132,
"self": 0.3237350810177304,
"children": {
"env_step": {
"total": 508.78000215099405,
"count": 19132,
"self": 370.07295090698835,
"children": {
"SubprocessEnvManager._take_step": {
"total": 138.40103954301026,
"count": 19132,
"self": 1.8614811930137876,
"children": {
"TorchPolicy.evaluate": {
"total": 136.53955834999647,
"count": 19132,
"self": 136.53955834999647
}
}
},
"workers": {
"total": 0.30601170099544106,
"count": 19132,
"self": 0.0,
"children": {
"worker_root": {
"total": 511.9925751229921,
"count": 19132,
"is_parallel": true,
"self": 242.29485541897157,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002013957999906779,
"count": 1,
"is_parallel": true,
"self": 0.0005863250000857079,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014276329998210713,
"count": 10,
"is_parallel": true,
"self": 0.0014276329998210713
}
}
},
"UnityEnvironment.step": {
"total": 0.03691802100001951,
"count": 1,
"is_parallel": true,
"self": 0.0006617259998620284,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004418729999997595,
"count": 1,
"is_parallel": true,
"self": 0.0004418729999997595
},
"communicator.exchange": {
"total": 0.033350719000054596,
"count": 1,
"is_parallel": true,
"self": 0.033350719000054596
},
"steps_from_proto": {
"total": 0.0024637030001031235,
"count": 1,
"is_parallel": true,
"self": 0.0005211100001361046,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019425929999670188,
"count": 10,
"is_parallel": true,
"self": 0.0019425929999670188
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 269.6977197040205,
"count": 19131,
"is_parallel": true,
"self": 11.380331342014642,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.857023839993872,
"count": 19131,
"is_parallel": true,
"self": 5.857023839993872
},
"communicator.exchange": {
"total": 212.90137025000763,
"count": 19131,
"is_parallel": true,
"self": 212.90137025000763
},
"steps_from_proto": {
"total": 39.558994272004384,
"count": 19131,
"is_parallel": true,
"self": 7.518606220011634,
"children": {
"_process_rank_one_or_two_observation": {
"total": 32.04038805199275,
"count": 191310,
"is_parallel": true,
"self": 32.04038805199275
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0003203819999271218,
"count": 1,
"self": 0.0003203819999271218,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 504.694393257942,
"count": 471296,
"is_parallel": true,
"self": 11.167094038973573,
"children": {
"process_trajectory": {
"total": 277.3305322769686,
"count": 471296,
"is_parallel": true,
"self": 275.8505188519687,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4800134249999246,
"count": 4,
"is_parallel": true,
"self": 1.4800134249999246
}
}
},
"_update_policy": {
"total": 216.19676694199984,
"count": 95,
"is_parallel": true,
"self": 84.72941203900609,
"children": {
"TorchPPOOptimizer.update": {
"total": 131.46735490299375,
"count": 4842,
"is_parallel": true,
"self": 131.46735490299375
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1395957779998298,
"count": 1,
"self": 0.0008726839998871583,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13872309399994265,
"count": 1,
"self": 0.13872309399994265
}
}
}
}
}
}
}