osmancanyuca's picture
First Push
b9eced2
raw
history blame
18.5 kB
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.49389326572418213,
"min": 0.48974916338920593,
"max": 0.8311858773231506,
"count": 30
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 4780.88671875,
"min": 4665.3505859375,
"max": 8503.03125,
"count": 30
},
"SnowballTarget.Step.mean": {
"value": 499952.0,
"min": 209936.0,
"max": 499952.0,
"count": 30
},
"SnowballTarget.Step.sum": {
"value": 499952.0,
"min": 209936.0,
"max": 499952.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.264318466186523,
"min": 12.78330135345459,
"max": 13.550152778625488,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2705.9208984375,
"min": 2454.393798828125,
"max": 2754.96923828125,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 30
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06444381270489162,
"min": 0.06138828125846225,
"max": 0.07410374682605798,
"count": 30
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3222190635244581,
"min": 0.245553125033849,
"max": 0.35996163713257284,
"count": 30
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.16523896856050865,
"min": 0.16523896856050865,
"max": 0.20906220627766028,
"count": 30
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8261948428025433,
"min": 0.6695908378152287,
"max": 1.0453110313883014,
"count": 30
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 3.1056989647999945e-06,
"min": 3.1056989647999945e-06,
"max": 0.00017668564110480002,
"count": 30
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.5528494823999972e-05,
"min": 1.5528494823999972e-05,
"max": 0.000853728215424,
"count": 30
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10103520000000002,
"min": 0.10103520000000002,
"max": 0.15889520000000001,
"count": 30
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5051760000000001,
"min": 0.4120608,
"max": 0.7845760000000002,
"count": 30
},
"SnowballTarget.Policy.Beta.mean": {
"value": 6.165647999999992e-05,
"min": 6.165647999999992e-05,
"max": 0.002948870480000001,
"count": 30
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0003082823999999996,
"min": 0.0003082823999999996,
"max": 0.0142503424,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.054545454545455,
"min": 25.01818181818182,
"max": 27.045454545454547,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1433.0,
"min": 1106.0,
"max": 1460.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.054545454545455,
"min": 25.01818181818182,
"max": 27.045454545454547,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1433.0,
"min": 1106.0,
"max": 1460.0,
"count": 30
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690890343",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690891037"
},
"total": 694.623827245,
"count": 1,
"self": 0.43983715800004575,
"children": {
"run_training.setup": {
"total": 0.0312753050000083,
"count": 1,
"self": 0.0312753050000083
},
"TrainerController.start_learning": {
"total": 694.152714782,
"count": 1,
"self": 0.7819660919807347,
"children": {
"TrainerController._reset_env": {
"total": 3.913775874999942,
"count": 1,
"self": 3.913775874999942
},
"TrainerController.advance": {
"total": 689.3154025650192,
"count": 27279,
"self": 0.37225476100275046,
"children": {
"env_step": {
"total": 688.9431478040165,
"count": 27279,
"self": 502.8756942400214,
"children": {
"SubprocessEnvManager._take_step": {
"total": 185.61299342100756,
"count": 27279,
"self": 2.58825859700994,
"children": {
"TorchPolicy.evaluate": {
"total": 183.02473482399762,
"count": 27279,
"self": 183.02473482399762
}
}
},
"workers": {
"total": 0.45446014298750015,
"count": 27279,
"self": 0.0,
"children": {
"worker_root": {
"total": 691.9716259699957,
"count": 27279,
"is_parallel": true,
"self": 327.18913087799194,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018565589999752774,
"count": 1,
"is_parallel": true,
"self": 0.000559727999984716,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012968309999905614,
"count": 10,
"is_parallel": true,
"self": 0.0012968309999905614
}
}
},
"UnityEnvironment.step": {
"total": 0.03499106699996446,
"count": 1,
"is_parallel": true,
"self": 0.0006187319999071406,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004812870000705516,
"count": 1,
"is_parallel": true,
"self": 0.0004812870000705516
},
"communicator.exchange": {
"total": 0.0318589549999615,
"count": 1,
"is_parallel": true,
"self": 0.0318589549999615
},
"steps_from_proto": {
"total": 0.00203209300002527,
"count": 1,
"is_parallel": true,
"self": 0.00048141700005999155,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015506759999652786,
"count": 10,
"is_parallel": true,
"self": 0.0015506759999652786
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 364.7824950920037,
"count": 27278,
"is_parallel": true,
"self": 15.603054116971748,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.835772952041566,
"count": 27278,
"is_parallel": true,
"self": 7.835772952041566
},
"communicator.exchange": {
"total": 290.0039097430006,
"count": 27278,
"is_parallel": true,
"self": 290.0039097430006
},
"steps_from_proto": {
"total": 51.33975827998984,
"count": 27278,
"is_parallel": true,
"self": 9.260755715980281,
"children": {
"_process_rank_one_or_two_observation": {
"total": 42.079002564009556,
"count": 272780,
"is_parallel": true,
"self": 42.079002564009556
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00010633400006554439,
"count": 1,
"self": 0.00010633400006554439,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 684.0919438569363,
"count": 623524,
"is_parallel": true,
"self": 13.70914407186956,
"children": {
"process_trajectory": {
"total": 367.81647221106573,
"count": 623524,
"is_parallel": true,
"self": 366.1795820880658,
"children": {
"RLTrainer._checkpoint": {
"total": 1.6368901229999437,
"count": 6,
"is_parallel": true,
"self": 1.6368901229999437
}
}
},
"_update_policy": {
"total": 302.566327574001,
"count": 136,
"is_parallel": true,
"self": 122.20453180500476,
"children": {
"TorchPPOOptimizer.update": {
"total": 180.36179576899622,
"count": 6930,
"is_parallel": true,
"self": 180.36179576899622
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.14146391600002062,
"count": 1,
"self": 0.0010155050001685595,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14044841099985206,
"count": 1,
"self": 0.14044841099985206
}
}
}
}
}
}
}