Adulala20's picture
First Push
06e98f0
raw
history blame
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.836672842502594,
"min": 0.836672842502594,
"max": 1.8354339599609375,
"count": 14
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8043.77294921875,
"min": 8043.77294921875,
"max": 17563.78515625,
"count": 14
},
"SnowballTarget.Step.mean": {
"value": 199960.0,
"min": 69968.0,
"max": 199960.0,
"count": 14
},
"SnowballTarget.Step.sum": {
"value": 199960.0,
"min": 69968.0,
"max": 199960.0,
"count": 14
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.781302452087402,
"min": 7.125820159912109,
"max": 12.781302452087402,
"count": 14
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2492.35400390625,
"min": 833.720947265625,
"max": 2593.813720703125,
"count": 14
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 14
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 4378.0,
"max": 10945.0,
"count": 14
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06581170070064984,
"min": 0.065043459268054,
"max": 0.08029949526946224,
"count": 14
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2632468028025994,
"min": 0.1312331338658445,
"max": 0.4014974763473112,
"count": 14
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1939256189792764,
"min": 0.18633982268910781,
"max": 0.25311718063027255,
"count": 14
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7757024759171056,
"min": 0.5029456494558675,
"max": 1.2505602845374275,
"count": 14
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.214097262000005e-06,
"min": 8.214097262000005e-06,
"max": 0.00019961403346200002,
"count": 14
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.285638904800002e-05,
"min": 3.285638904800002e-05,
"max": 0.00094032018656,
"count": 14
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10273800000000001,
"min": 0.10273800000000001,
"max": 0.16653800000000002,
"count": 14
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41095200000000004,
"min": 0.33307600000000004,
"max": 0.8134400000000002,
"count": 14
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001466262000000001,
"min": 0.0001466262000000001,
"max": 0.0033302462000000007,
"count": 14
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005865048000000004,
"min": 0.0005865048000000004,
"max": 0.015690656000000004,
"count": 14
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.09090909090909,
"min": 17.727272727272727,
"max": 25.431818181818183,
"count": 14
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1104.0,
"min": 390.0,
"max": 1388.0,
"count": 14
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.09090909090909,
"min": 17.727272727272727,
"max": 25.431818181818183,
"count": 14
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1104.0,
"min": 390.0,
"max": 1388.0,
"count": 14
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 14
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 14
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1692545544",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/snowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1692545861"
},
"total": 317.6313503980001,
"count": 1,
"self": 0.7840919160000794,
"children": {
"run_training.setup": {
"total": 0.04170700500003477,
"count": 1,
"self": 0.04170700500003477
},
"TrainerController.start_learning": {
"total": 316.805551477,
"count": 1,
"self": 0.38119174001519696,
"children": {
"TrainerController._reset_env": {
"total": 3.9177380400000175,
"count": 1,
"self": 3.9177380400000175
},
"TrainerController.advance": {
"total": 312.27118551198475,
"count": 12424,
"self": 0.18747485698986566,
"children": {
"env_step": {
"total": 312.0837106549949,
"count": 12424,
"self": 228.62752408799213,
"children": {
"SubprocessEnvManager._take_step": {
"total": 83.26534969100692,
"count": 12424,
"self": 1.2040676890179611,
"children": {
"TorchPolicy.evaluate": {
"total": 82.06128200198896,
"count": 12424,
"self": 82.06128200198896
}
}
},
"workers": {
"total": 0.1908368759958421,
"count": 12424,
"self": 0.0,
"children": {
"worker_root": {
"total": 315.55860512299535,
"count": 12424,
"is_parallel": true,
"self": 147.2086571580037,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018325449999565535,
"count": 1,
"is_parallel": true,
"self": 0.0005223669999168123,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013101780000397412,
"count": 10,
"is_parallel": true,
"self": 0.0013101780000397412
}
}
},
"UnityEnvironment.step": {
"total": 0.03423124400001143,
"count": 1,
"is_parallel": true,
"self": 0.0005997139998044076,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004083150000724345,
"count": 1,
"is_parallel": true,
"self": 0.0004083150000724345
},
"communicator.exchange": {
"total": 0.031070490000047357,
"count": 1,
"is_parallel": true,
"self": 0.031070490000047357
},
"steps_from_proto": {
"total": 0.00215272500008723,
"count": 1,
"is_parallel": true,
"self": 0.0005045410002821882,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016481839998050418,
"count": 10,
"is_parallel": true,
"self": 0.0016481839998050418
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 168.34994796499166,
"count": 12423,
"is_parallel": true,
"self": 7.057714106998219,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3.5819989389972307,
"count": 12423,
"is_parallel": true,
"self": 3.5819989389972307
},
"communicator.exchange": {
"total": 133.4748783099982,
"count": 12423,
"is_parallel": true,
"self": 133.4748783099982
},
"steps_from_proto": {
"total": 24.235356608998018,
"count": 12423,
"is_parallel": true,
"self": 4.414643972016165,
"children": {
"_process_rank_one_or_two_observation": {
"total": 19.820712636981852,
"count": 124230,
"is_parallel": true,
"self": 19.820712636981852
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00011547899998731737,
"count": 1,
"self": 0.00011547899998731737,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 309.8994989799986,
"count": 283728,
"is_parallel": true,
"self": 6.091263030016762,
"children": {
"process_trajectory": {
"total": 169.19533811698216,
"count": 283728,
"is_parallel": true,
"self": 167.65719715898217,
"children": {
"RLTrainer._checkpoint": {
"total": 1.5381409579999854,
"count": 3,
"is_parallel": true,
"self": 1.5381409579999854
}
}
},
"_update_policy": {
"total": 134.6128978329997,
"count": 61,
"is_parallel": true,
"self": 53.76158820299975,
"children": {
"TorchPPOOptimizer.update": {
"total": 80.85130962999995,
"count": 3108,
"is_parallel": true,
"self": 80.85130962999995
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.23532070600003863,
"count": 1,
"self": 0.0019433209999988321,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2333773850000398,
"count": 1,
"self": 0.2333773850000398
}
}
}
}
}
}
}