kalmi901's picture
First Push
6974206 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.5326029062271118,
"min": 0.5227048397064209,
"max": 0.9090038537979126,
"count": 80
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 5061.85791015625,
"min": 4967.78662109375,
"max": 9239.115234375,
"count": 80
},
"SnowballTarget.Step.mean": {
"value": 999992.0,
"min": 209936.0,
"max": 999992.0,
"count": 80
},
"SnowballTarget.Step.sum": {
"value": 999992.0,
"min": 209936.0,
"max": 999992.0,
"count": 80
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.173223495483398,
"min": 13.16577434539795,
"max": 14.405982971191406,
"count": 80
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2763.778564453125,
"min": 2567.235595703125,
"max": 2953.2265625,
"count": 80
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 80
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 80
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06899481696722004,
"min": 0.06391147776853091,
"max": 0.0725305397531723,
"count": 80
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27597926786888016,
"min": 0.26089074859860645,
"max": 0.3626526987658615,
"count": 80
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.14936638727565021,
"min": 0.13474033800337243,
"max": 0.20730669712921718,
"count": 80
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.5974655491026009,
"min": 0.5846467292593682,
"max": 1.0152903094011196,
"count": 80
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.4028995324000038e-06,
"min": 1.4028995324000038e-06,
"max": 0.0002383428205524,
"count": 80
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 5.611598129600015e-06,
"min": 5.611598129600015e-06,
"max": 0.001176864107712,
"count": 80
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10046760000000002,
"min": 0.10046760000000002,
"max": 0.17944760000000004,
"count": 80
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.40187040000000007,
"min": 0.40187040000000007,
"max": 0.8922880000000001,
"count": 80
},
"SnowballTarget.Policy.Beta.mean": {
"value": 3.3333240000000065e-05,
"min": 3.3333240000000065e-05,
"max": 0.003974435239999999,
"count": 80
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00013333296000000026,
"min": 0.00013333296000000026,
"max": 0.0196251712,
"count": 80
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 27.863636363636363,
"min": 25.10909090909091,
"max": 28.381818181818183,
"count": 80
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1226.0,
"min": 1108.0,
"max": 1561.0,
"count": 80
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 27.863636363636363,
"min": 25.10909090909091,
"max": 28.381818181818183,
"count": 80
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1226.0,
"min": 1108.0,
"max": 1561.0,
"count": 80
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 80
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 80
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1726854827",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./ml-agents/config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1726857174"
},
"total": 2346.2864499879997,
"count": 1,
"self": 0.4358599130005132,
"children": {
"run_training.setup": {
"total": 0.07445213099981629,
"count": 1,
"self": 0.07445213099981629
},
"TrainerController.start_learning": {
"total": 2345.7761379439994,
"count": 1,
"self": 2.251796450945676,
"children": {
"TrainerController._reset_env": {
"total": 2.3781630909998057,
"count": 1,
"self": 2.3781630909998057
},
"TrainerController.advance": {
"total": 2341.061490449054,
"count": 72736,
"self": 1.0147861170685246,
"children": {
"env_step": {
"total": 2340.0467043319854,
"count": 72736,
"self": 1657.531445365043,
"children": {
"SubprocessEnvManager._take_step": {
"total": 681.2458301628749,
"count": 72736,
"self": 5.593772210789666,
"children": {
"TorchPolicy.evaluate": {
"total": 675.6520579520852,
"count": 72736,
"self": 675.6520579520852
}
}
},
"workers": {
"total": 1.269428804067502,
"count": 72736,
"self": 0.0,
"children": {
"worker_root": {
"total": 2341.6077854111395,
"count": 72736,
"is_parallel": true,
"self": 1505.3031205361635,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0030896049997863884,
"count": 1,
"is_parallel": true,
"self": 0.0008106050004244025,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002278999999361986,
"count": 10,
"is_parallel": true,
"self": 0.002278999999361986
}
}
},
"UnityEnvironment.step": {
"total": 0.05363281099971573,
"count": 1,
"is_parallel": true,
"self": 0.0006811249995735125,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004004070001428772,
"count": 1,
"is_parallel": true,
"self": 0.0004004070001428772
},
"communicator.exchange": {
"total": 0.04846765399997821,
"count": 1,
"is_parallel": true,
"self": 0.04846765399997821
},
"steps_from_proto": {
"total": 0.004083625000021129,
"count": 1,
"is_parallel": true,
"self": 0.0003947330001210503,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003688891999900079,
"count": 10,
"is_parallel": true,
"self": 0.003688891999900079
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 836.304664874976,
"count": 72735,
"is_parallel": true,
"self": 38.80943300871513,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 20.712948599077208,
"count": 72735,
"is_parallel": true,
"self": 20.712948599077208
},
"communicator.exchange": {
"total": 651.1329285991415,
"count": 72735,
"is_parallel": true,
"self": 651.1329285991415
},
"steps_from_proto": {
"total": 125.64935466804218,
"count": 72735,
"is_parallel": true,
"self": 23.57902727997498,
"children": {
"_process_rank_one_or_two_observation": {
"total": 102.0703273880672,
"count": 727350,
"is_parallel": true,
"self": 102.0703273880672
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001593429997228668,
"count": 1,
"self": 0.0001593429997228668,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 2318.9619156790004,
"count": 2749667,
"is_parallel": true,
"self": 57.1321107081294,
"children": {
"process_trajectory": {
"total": 954.4814090508658,
"count": 2749667,
"is_parallel": true,
"self": 952.0762051158654,
"children": {
"RLTrainer._checkpoint": {
"total": 2.4052039350003724,
"count": 16,
"is_parallel": true,
"self": 2.4052039350003724
}
}
},
"_update_policy": {
"total": 1307.3483959200053,
"count": 363,
"is_parallel": true,
"self": 508.36442521995286,
"children": {
"TorchPPOOptimizer.update": {
"total": 798.9839707000524,
"count": 49360,
"is_parallel": true,
"self": 798.9839707000524
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.08452861000023404,
"count": 1,
"self": 0.001199008000185131,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08332960200004891,
"count": 1,
"self": 0.08332960200004891
}
}
}
}
}
}
}