namedotpg's picture
First push - no custom parameters
3f058e7
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.879950761795044,
"min": 0.8770568370819092,
"max": 2.8611738681793213,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8421.12890625,
"min": 8421.12890625,
"max": 29364.2265625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.181851387023926,
"min": 0.35812652111053467,
"max": 12.181851387023926,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2375.4609375,
"min": 69.47654724121094,
"max": 2458.21630859375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06578473363690279,
"min": 0.06104409383154359,
"max": 0.07204656235733316,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.26313893454761117,
"min": 0.24996326636206573,
"max": 0.3521124338246106,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18599631746902184,
"min": 0.12493811619659775,
"max": 0.29603005747000377,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7439852698760874,
"min": 0.499752464786391,
"max": 1.4801502873500187,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.454545454545453,
"min": 3.659090909090909,
"max": 24.454545454545453,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1076.0,
"min": 161.0,
"max": 1326.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.454545454545453,
"min": 3.659090909090909,
"max": 24.454545454545453,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1076.0,
"min": 161.0,
"max": 1326.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688562288",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1688562865"
},
"total": 577.340425889,
"count": 1,
"self": 1.3032689899999923,
"children": {
"run_training.setup": {
"total": 0.05394492400000672,
"count": 1,
"self": 0.05394492400000672
},
"TrainerController.start_learning": {
"total": 575.983211975,
"count": 1,
"self": 0.7291718290081235,
"children": {
"TrainerController._reset_env": {
"total": 8.083745072000056,
"count": 1,
"self": 8.083745072000056
},
"TrainerController.advance": {
"total": 566.770455424992,
"count": 18224,
"self": 0.3045157259896314,
"children": {
"env_step": {
"total": 566.4659396990023,
"count": 18224,
"self": 446.97664401999896,
"children": {
"SubprocessEnvManager._take_step": {
"total": 119.17767893599068,
"count": 18224,
"self": 2.427424819990051,
"children": {
"TorchPolicy.evaluate": {
"total": 116.75025411600063,
"count": 18224,
"self": 116.75025411600063
}
}
},
"workers": {
"total": 0.31161674301267794,
"count": 18224,
"self": 0.0,
"children": {
"worker_root": {
"total": 573.889255592,
"count": 18224,
"is_parallel": true,
"self": 289.0214273979914,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002859631000092122,
"count": 1,
"is_parallel": true,
"self": 0.00065043400002196,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002209197000070162,
"count": 10,
"is_parallel": true,
"self": 0.002209197000070162
}
}
},
"UnityEnvironment.step": {
"total": 0.06086758700007522,
"count": 1,
"is_parallel": true,
"self": 0.0007088100002192732,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00041236099991692754,
"count": 1,
"is_parallel": true,
"self": 0.00041236099991692754
},
"communicator.exchange": {
"total": 0.05734966499994698,
"count": 1,
"is_parallel": true,
"self": 0.05734966499994698
},
"steps_from_proto": {
"total": 0.0023967509999920367,
"count": 1,
"is_parallel": true,
"self": 0.00064580800017211,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017509429998199266,
"count": 10,
"is_parallel": true,
"self": 0.0017509429998199266
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 284.86782819400867,
"count": 18223,
"is_parallel": true,
"self": 10.90878671998712,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.445710648016643,
"count": 18223,
"is_parallel": true,
"self": 5.445710648016643
},
"communicator.exchange": {
"total": 233.56450457798655,
"count": 18223,
"is_parallel": true,
"self": 233.56450457798655
},
"steps_from_proto": {
"total": 34.94882624801835,
"count": 18223,
"is_parallel": true,
"self": 6.084756735016867,
"children": {
"_process_rank_one_or_two_observation": {
"total": 28.864069513001482,
"count": 182230,
"is_parallel": true,
"self": 28.864069513001482
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00013691899994228152,
"count": 1,
"self": 0.00013691899994228152,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 561.6932250749885,
"count": 733942,
"is_parallel": true,
"self": 12.932831888045826,
"children": {
"process_trajectory": {
"total": 298.6188046529419,
"count": 733942,
"is_parallel": true,
"self": 295.55843375694167,
"children": {
"RLTrainer._checkpoint": {
"total": 3.0603708960002223,
"count": 4,
"is_parallel": true,
"self": 3.0603708960002223
}
}
},
"_update_policy": {
"total": 250.1415885340008,
"count": 90,
"is_parallel": true,
"self": 160.7100799240119,
"children": {
"TorchPPOOptimizer.update": {
"total": 89.4315086099889,
"count": 4584,
"is_parallel": true,
"self": 89.4315086099889
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.3997027299999445,
"count": 1,
"self": 0.001869759999863163,
"children": {
"RLTrainer._checkpoint": {
"total": 0.39783297000008133,
"count": 1,
"self": 0.39783297000008133
}
}
}
}
}
}
}