mttgermano's picture
First Push
72c64a4 verified
raw
history blame
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.9315381050109863,
"min": 1.9315381050109863,
"max": 2.8702282905578613,
"count": 7
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 19504.671875,
"min": 19504.671875,
"max": 29551.87109375,
"count": 7
},
"SnowballTarget.Step.mean": {
"value": 69992.0,
"min": 9952.0,
"max": 69992.0,
"count": 7
},
"SnowballTarget.Step.sum": {
"value": 69992.0,
"min": 9952.0,
"max": 69992.0,
"count": 7
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 6.592403888702393,
"min": 0.3914852440357208,
"max": 6.592403888702393,
"count": 7
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1285.518798828125,
"min": 75.94813537597656,
"max": 1285.518798828125,
"count": 7
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 7
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 7
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06854227489398623,
"min": 0.06675874077543495,
"max": 0.07348860295247042,
"count": 7
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27416909957594493,
"min": 0.2670349631017398,
"max": 0.35985593154684603,
"count": 7
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.28034259451954974,
"min": 0.1218956601805985,
"max": 0.28531403934546545,
"count": 7
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.121370378078199,
"min": 0.487582640722394,
"max": 1.3624117485448426,
"count": 7
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 0.00020278203240600001,
"min": 0.00020278203240600001,
"max": 0.000291882002706,
"count": 7
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.0008111281296240001,
"min": 0.0008111281296240001,
"max": 0.00138516003828,
"count": 7
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.16759400000000002,
"min": 0.16759400000000002,
"max": 0.19729400000000002,
"count": 7
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.6703760000000001,
"min": 0.6703760000000001,
"max": 0.96172,
"count": 7
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.003382940600000001,
"min": 0.003382940600000001,
"max": 0.0048649706,
"count": 7
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.013531762400000005,
"min": 0.013531762400000005,
"max": 0.023089828,
"count": 7
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 15.931818181818182,
"min": 3.1136363636363638,
"max": 15.931818181818182,
"count": 7
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 701.0,
"min": 137.0,
"max": 774.0,
"count": 7
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 15.931818181818182,
"min": 3.1136363636363638,
"max": 15.931818181818182,
"count": 7
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 701.0,
"min": 137.0,
"max": 774.0,
"count": 7
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 7
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 7
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1719350783",
"python_version": "3.10.12 (main, Jun 25 2024, 18:12:47) [GCC 14.1.1 20240522]",
"command_line_arguments": "/home/matt/docs/howTo/ai/u5/hf-env/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1719350846"
},
"total": 62.97646521600018,
"count": 1,
"self": 0.1737509720005619,
"children": {
"run_training.setup": {
"total": 0.012011805999463832,
"count": 1,
"self": 0.012011805999463832
},
"TrainerController.start_learning": {
"total": 62.79070243800015,
"count": 1,
"self": 0.09770760198171047,
"children": {
"TrainerController._reset_env": {
"total": 0.8941725790000419,
"count": 1,
"self": 0.8941725790000419
},
"TrainerController.advance": {
"total": 61.75479988301777,
"count": 6556,
"self": 0.04473474206406536,
"children": {
"env_step": {
"total": 61.7100651409537,
"count": 6556,
"self": 44.44529195897121,
"children": {
"SubprocessEnvManager._take_step": {
"total": 17.216592010026034,
"count": 6556,
"self": 0.2523318360263147,
"children": {
"TorchPolicy.evaluate": {
"total": 16.96426017399972,
"count": 6556,
"self": 16.96426017399972
}
}
},
"workers": {
"total": 0.04818117195645755,
"count": 6555,
"self": 0.0,
"children": {
"worker_root": {
"total": 62.655246266022914,
"count": 6555,
"is_parallel": true,
"self": 33.381031464014086,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0009580320001987275,
"count": 1,
"is_parallel": true,
"self": 0.00031027200111566344,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000647759999083064,
"count": 10,
"is_parallel": true,
"self": 0.000647759999083064
}
}
},
"UnityEnvironment.step": {
"total": 0.014263753000705037,
"count": 1,
"is_parallel": true,
"self": 0.00024890900022001006,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00029059400003461633,
"count": 1,
"is_parallel": true,
"self": 0.00029059400003461633
},
"communicator.exchange": {
"total": 0.01266807800038805,
"count": 1,
"is_parallel": true,
"self": 0.01266807800038805
},
"steps_from_proto": {
"total": 0.001056172000062361,
"count": 1,
"is_parallel": true,
"self": 0.0002604080000310205,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007957640000313404,
"count": 10,
"is_parallel": true,
"self": 0.0007957640000313404
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 29.274214802008828,
"count": 6554,
"is_parallel": true,
"self": 1.1769603450593422,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.6377088219951474,
"count": 6554,
"is_parallel": true,
"self": 0.6377088219951474
},
"communicator.exchange": {
"total": 23.746717012966656,
"count": 6554,
"is_parallel": true,
"self": 23.746717012966656
},
"steps_from_proto": {
"total": 3.7128286219876827,
"count": 6554,
"is_parallel": true,
"self": 0.7385545441675276,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2.974274077820155,
"count": 65540,
"is_parallel": true,
"self": 2.974274077820155
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.6499000341573264e-05,
"count": 1,
"self": 2.6499000341573264e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 61.67237820009723,
"count": 37409,
"is_parallel": true,
"self": 0.4198227440856499,
"children": {
"process_trajectory": {
"total": 35.119402948012976,
"count": 37409,
"is_parallel": true,
"self": 35.01569295801255,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10370999000042502,
"count": 1,
"is_parallel": true,
"self": 0.10370999000042502
}
}
},
"_update_policy": {
"total": 26.133152507998602,
"count": 32,
"is_parallel": true,
"self": 7.7936852750335675,
"children": {
"TorchPPOOptimizer.update": {
"total": 18.339467232965035,
"count": 1629,
"is_parallel": true,
"self": 18.339467232965035
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.04399587500029156,
"count": 1,
"self": 0.00040369700036535505,
"children": {
"RLTrainer._checkpoint": {
"total": 0.04359217799992621,
"count": 1,
"self": 0.04359217799992621
}
}
}
}
}
}
}