BWangila's picture
First Push
2f1b68c verified
raw
history blame
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9762190580368042,
"min": 0.9755625128746033,
"max": 2.8792080879211426,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9320.939453125,
"min": 9320.939453125,
"max": 29485.970703125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.823467254638672,
"min": 0.28154247999191284,
"max": 12.823467254638672,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2500.576171875,
"min": 54.619239807128906,
"max": 2591.286865234375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0720371722924813,
"min": 0.06407953511887028,
"max": 0.07371681388073793,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2881486891699252,
"min": 0.2697323913778425,
"max": 0.36671964720410277,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21963737905025482,
"min": 0.1026726399454763,
"max": 0.3016506288127572,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8785495162010193,
"min": 0.4106905597819052,
"max": 1.3419327104792875,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.204545454545453,
"min": 3.0454545454545454,
"max": 25.345454545454544,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1109.0,
"min": 134.0,
"max": 1394.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.204545454545453,
"min": 3.0454545454545454,
"max": 25.345454545454544,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1109.0,
"min": 134.0,
"max": 1394.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1713144784",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=../training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1713145214"
},
"total": 430.1065551070001,
"count": 1,
"self": 0.4352247830001943,
"children": {
"run_training.setup": {
"total": 0.051813315999879705,
"count": 1,
"self": 0.051813315999879705
},
"TrainerController.start_learning": {
"total": 429.619517008,
"count": 1,
"self": 0.5234175960317771,
"children": {
"TrainerController._reset_env": {
"total": 1.9357260090000636,
"count": 1,
"self": 1.9357260090000636
},
"TrainerController.advance": {
"total": 427.0710663899681,
"count": 18199,
"self": 0.250295360977816,
"children": {
"env_step": {
"total": 426.8207710289903,
"count": 18199,
"self": 272.7054329159773,
"children": {
"SubprocessEnvManager._take_step": {
"total": 153.8506171009908,
"count": 18199,
"self": 1.4067258389686685,
"children": {
"TorchPolicy.evaluate": {
"total": 152.44389126202213,
"count": 18199,
"self": 152.44389126202213
}
}
},
"workers": {
"total": 0.26472101202216436,
"count": 18199,
"self": 0.0,
"children": {
"worker_root": {
"total": 428.52180088099794,
"count": 18199,
"is_parallel": true,
"self": 215.78107422199082,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021773650000795897,
"count": 1,
"is_parallel": true,
"self": 0.0006145360002847156,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001562828999794874,
"count": 10,
"is_parallel": true,
"self": 0.001562828999794874
}
}
},
"UnityEnvironment.step": {
"total": 0.037762431999908586,
"count": 1,
"is_parallel": true,
"self": 0.0006618439997509995,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004125340001337463,
"count": 1,
"is_parallel": true,
"self": 0.0004125340001337463
},
"communicator.exchange": {
"total": 0.03474637299996175,
"count": 1,
"is_parallel": true,
"self": 0.03474637299996175
},
"steps_from_proto": {
"total": 0.0019416810000620899,
"count": 1,
"is_parallel": true,
"self": 0.00036929600082658,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00157238499923551,
"count": 10,
"is_parallel": true,
"self": 0.00157238499923551
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 212.74072665900712,
"count": 18198,
"is_parallel": true,
"self": 9.94486589698181,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.1811700920452495,
"count": 18198,
"is_parallel": true,
"self": 5.1811700920452495
},
"communicator.exchange": {
"total": 165.21308734798095,
"count": 18198,
"is_parallel": true,
"self": 165.21308734798095
},
"steps_from_proto": {
"total": 32.40160332199912,
"count": 18198,
"is_parallel": true,
"self": 6.003392198945221,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.398211123053898,
"count": 181980,
"is_parallel": true,
"self": 26.398211123053898
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00018660700015971088,
"count": 1,
"self": 0.00018660700015971088,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 421.5347157050387,
"count": 661566,
"is_parallel": true,
"self": 13.932437474222752,
"children": {
"process_trajectory": {
"total": 232.94877017981617,
"count": 661566,
"is_parallel": true,
"self": 232.1817329988162,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7670371809999779,
"count": 4,
"is_parallel": true,
"self": 0.7670371809999779
}
}
},
"_update_policy": {
"total": 174.6535080509998,
"count": 90,
"is_parallel": true,
"self": 49.58315617201333,
"children": {
"TorchPPOOptimizer.update": {
"total": 125.07035187898646,
"count": 4587,
"is_parallel": true,
"self": 125.07035187898646
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.08912040599989268,
"count": 1,
"self": 0.0011585830002331932,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08796182299965949,
"count": 1,
"self": 0.08796182299965949
}
}
}
}
}
}
}