AlkQ's picture
First Push
1f79aa2 verified
raw
history blame
18.4 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.449623703956604,
"min": 1.449623703956604,
"max": 2.8849942684173584,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 13761.27734375,
"min": 13761.27734375,
"max": 29576.9609375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 10.13458251953125,
"min": 0.08002053201198578,
"max": 10.13458251953125,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1976.2435302734375,
"min": 15.523983001708984,
"max": 1995.594482421875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 22.5,
"min": 2.5454545454545454,
"max": 22.5,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 990.0,
"min": 112.0,
"max": 1202.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 22.5,
"min": 2.5454545454545454,
"max": 22.5,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 990.0,
"min": 112.0,
"max": 1202.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07745642748654714,
"min": 0.06349215880956628,
"max": 0.07830129082298581,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.07745642748654714,
"min": 0.06659385215838455,
"max": 0.15660258164597163,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2620480860151896,
"min": 0.09901946910198718,
"max": 0.3015144609641738,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.2620480860151896,
"min": 0.09901946910198718,
"max": 0.6030289219283476,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.16009728e-06,
"min": 8.16009728e-06,
"max": 0.000291864002712,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 8.16009728e-06,
"min": 8.16009728e-06,
"max": 0.000559476013508,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10271999999999998,
"min": 0.10271999999999998,
"max": 0.197288,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.10271999999999998,
"min": 0.10271999999999998,
"max": 0.38649200000000006,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.000145728,
"min": 0.000145728,
"max": 0.004864671200000002,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.000145728,
"min": 0.000145728,
"max": 0.0093259508,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1715698902",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1715699544"
},
"total": 642.2993072929999,
"count": 1,
"self": 0.547578313999793,
"children": {
"run_training.setup": {
"total": 0.07407654200005709,
"count": 1,
"self": 0.07407654200005709
},
"TrainerController.start_learning": {
"total": 641.677652437,
"count": 1,
"self": 0.8727776469845594,
"children": {
"TrainerController._reset_env": {
"total": 3.450707217000172,
"count": 1,
"self": 3.450707217000172
},
"TrainerController.advance": {
"total": 637.2583127920152,
"count": 18200,
"self": 0.43052954599261284,
"children": {
"env_step": {
"total": 636.8277832460226,
"count": 18200,
"self": 504.16455248603575,
"children": {
"SubprocessEnvManager._take_step": {
"total": 132.22874845199317,
"count": 18200,
"self": 2.6503738919877833,
"children": {
"TorchPolicy.evaluate": {
"total": 129.5783745600054,
"count": 18200,
"self": 129.5783745600054
}
}
},
"workers": {
"total": 0.43448230799367593,
"count": 18200,
"self": 0.0,
"children": {
"worker_root": {
"total": 639.5463916009958,
"count": 18200,
"is_parallel": true,
"self": 313.7082834289886,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.010619183000017074,
"count": 1,
"is_parallel": true,
"self": 0.005523105000065698,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.005096077999951376,
"count": 10,
"is_parallel": true,
"self": 0.005096077999951376
}
}
},
"UnityEnvironment.step": {
"total": 0.04711228999985906,
"count": 1,
"is_parallel": true,
"self": 0.000814790999811521,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005903520000174467,
"count": 1,
"is_parallel": true,
"self": 0.0005903520000174467
},
"communicator.exchange": {
"total": 0.04325487500000236,
"count": 1,
"is_parallel": true,
"self": 0.04325487500000236
},
"steps_from_proto": {
"total": 0.002452272000027733,
"count": 1,
"is_parallel": true,
"self": 0.0004651319995900849,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019871400004376483,
"count": 10,
"is_parallel": true,
"self": 0.0019871400004376483
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 325.8381081720072,
"count": 18199,
"is_parallel": true,
"self": 15.490814677035587,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.923327464991871,
"count": 18199,
"is_parallel": true,
"self": 7.923327464991871
},
"communicator.exchange": {
"total": 256.3936811379979,
"count": 18199,
"is_parallel": true,
"self": 256.3936811379979
},
"steps_from_proto": {
"total": 46.030284891981864,
"count": 18199,
"is_parallel": true,
"self": 9.362658035920958,
"children": {
"_process_rank_one_or_two_observation": {
"total": 36.667626856060906,
"count": 181990,
"is_parallel": true,
"self": 36.667626856060906
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0009184450000248034,
"count": 1,
"self": 0.0009184450000248034,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 627.9951827019033,
"count": 969053,
"is_parallel": true,
"self": 23.531892721043505,
"children": {
"process_trajectory": {
"total": 395.9316010648606,
"count": 969053,
"is_parallel": true,
"self": 395.2548284968607,
"children": {
"RLTrainer._checkpoint": {
"total": 0.676772567999933,
"count": 4,
"is_parallel": true,
"self": 0.676772567999933
}
}
},
"_update_policy": {
"total": 208.5316889159992,
"count": 36,
"is_parallel": true,
"self": 63.532675385015864,
"children": {
"TorchPPOOptimizer.update": {
"total": 144.99901353098335,
"count": 4506,
"is_parallel": true,
"self": 144.99901353098335
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09493633600004614,
"count": 1,
"self": 0.0013482680001288827,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09358806799991726,
"count": 1,
"self": 0.09358806799991726
}
}
}
}
}
}
}