jeliasherrero's picture
First Push
4afed8b verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8785006999969482,
"min": 0.8760251402854919,
"max": 2.2489287853240967,
"count": 46
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8474.896484375,
"min": 8474.896484375,
"max": 20444.947265625,
"count": 46
},
"SnowballTarget.Step.mean": {
"value": 499952.0,
"min": 49936.0,
"max": 499952.0,
"count": 46
},
"SnowballTarget.Step.sum": {
"value": 499952.0,
"min": 49936.0,
"max": 499952.0,
"count": 46
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.292645454406738,
"min": 5.827279567718506,
"max": 14.420307159423828,
"count": 46
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2787.06591796875,
"min": 844.9555053710938,
"max": 2940.49462890625,
"count": 46
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 46
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 6567.0,
"max": 10945.0,
"count": 46
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07369226362860383,
"min": 0.05860852830057197,
"max": 0.07806807999212668,
"count": 46
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2947690545144153,
"min": 0.19623087732607553,
"max": 0.3746745030375143,
"count": 46
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.12998476065695286,
"min": 0.12520532667929052,
"max": 0.24657660112077115,
"count": 46
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.5199390426278114,
"min": 0.5008213067171621,
"max": 1.2328830056038558,
"count": 46
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 2.193698903200001e-06,
"min": 2.193698903200001e-06,
"max": 0.0001812736093632,
"count": 46
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 8.774795612800005e-06,
"min": 8.774795612800005e-06,
"max": 0.0008711680644160002,
"count": 46
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.1010968,
"min": 0.1010968,
"max": 0.19063680000000005,
"count": 46
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.4043872,
"min": 0.4043872,
"max": 0.9355840000000001,
"count": 46
},
"SnowballTarget.Policy.Beta.mean": {
"value": 6.473032000000003e-05,
"min": 6.473032000000003e-05,
"max": 0.00453277632,
"count": 46
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00025892128000000014,
"min": 0.00025892128000000014,
"max": 0.0217856416,
"count": 46
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 28.272727272727273,
"min": 15.393939393939394,
"max": 28.509090909090908,
"count": 46
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1244.0,
"min": 508.0,
"max": 1568.0,
"count": 46
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 28.272727272727273,
"min": 15.393939393939394,
"max": 28.509090909090908,
"count": 46
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1244.0,
"min": 508.0,
"max": 1568.0,
"count": 46
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 46
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 46
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1713899334",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1713900236"
},
"total": 902.4398860050001,
"count": 1,
"self": 0.32148621200008165,
"children": {
"run_training.setup": {
"total": 0.052702590999956556,
"count": 1,
"self": 0.052702590999956556
},
"TrainerController.start_learning": {
"total": 902.065697202,
"count": 1,
"self": 1.047607644012487,
"children": {
"TrainerController._reset_env": {
"total": 1.8630781790000128,
"count": 1,
"self": 1.8630781790000128
},
"TrainerController.advance": {
"total": 899.0180193389875,
"count": 41612,
"self": 0.5070902269683302,
"children": {
"env_step": {
"total": 898.5109291120192,
"count": 41612,
"self": 619.6688795220143,
"children": {
"SubprocessEnvManager._take_step": {
"total": 278.32785056801345,
"count": 41612,
"self": 2.8185278930254754,
"children": {
"TorchPolicy.evaluate": {
"total": 275.50932267498797,
"count": 41612,
"self": 275.50932267498797
}
}
},
"workers": {
"total": 0.5141990219914305,
"count": 41612,
"self": 0.0,
"children": {
"worker_root": {
"total": 900.6625719539838,
"count": 41612,
"is_parallel": true,
"self": 473.5189549909878,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022677320000639156,
"count": 1,
"is_parallel": true,
"self": 0.000670738999929199,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015969930001347166,
"count": 10,
"is_parallel": true,
"self": 0.0015969930001347166
}
}
},
"UnityEnvironment.step": {
"total": 0.028552360000048793,
"count": 1,
"is_parallel": true,
"self": 0.00045093900018855493,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004014699999288496,
"count": 1,
"is_parallel": true,
"self": 0.0004014699999288496
},
"communicator.exchange": {
"total": 0.02644259599992438,
"count": 1,
"is_parallel": true,
"self": 0.02644259599992438
},
"steps_from_proto": {
"total": 0.00125735500000701,
"count": 1,
"is_parallel": true,
"self": 0.0002572660001760596,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010000889998309503,
"count": 10,
"is_parallel": true,
"self": 0.0010000889998309503
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 427.143616962996,
"count": 41611,
"is_parallel": true,
"self": 15.177523103978729,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 8.136478518004083,
"count": 41611,
"is_parallel": true,
"self": 8.136478518004083
},
"communicator.exchange": {
"total": 354.27928298599454,
"count": 41611,
"is_parallel": true,
"self": 354.27928298599454
},
"steps_from_proto": {
"total": 49.55033235501867,
"count": 41611,
"is_parallel": true,
"self": 9.535304745025655,
"children": {
"_process_rank_one_or_two_observation": {
"total": 40.01502760999301,
"count": 416110,
"is_parallel": true,
"self": 40.01502760999301
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00012238500016792386,
"count": 1,
"self": 0.00012238500016792386,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 897.7540536789697,
"count": 426923,
"is_parallel": true,
"self": 5.43576744600341,
"children": {
"process_trajectory": {
"total": 538.2202289099657,
"count": 426923,
"is_parallel": true,
"self": 536.4171077899658,
"children": {
"RLTrainer._checkpoint": {
"total": 1.8031211199999007,
"count": 10,
"is_parallel": true,
"self": 1.8031211199999007
}
}
},
"_update_policy": {
"total": 354.0980573230006,
"count": 207,
"is_parallel": true,
"self": 82.3815759209972,
"children": {
"TorchPPOOptimizer.update": {
"total": 271.7164814020034,
"count": 10548,
"is_parallel": true,
"self": 271.7164814020034
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13686965499982762,
"count": 1,
"self": 0.0015136479999000585,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13535600699992756,
"count": 1,
"self": 0.13535600699992756
}
}
}
}
}
}
}