Soccertwos / run_logs /timers.json
ExusBurn's picture
First Push`
0cc654a verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.2740094661712646,
"min": 2.2740094661712646,
"max": 3.228038787841797,
"count": 696
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 45916.80078125,
"min": 18638.580078125,
"max": 153739.875,
"count": 696
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 57.46511627906977,
"min": 42.6283185840708,
"max": 999.0,
"count": 696
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19768.0,
"min": 4868.0,
"max": 31936.0,
"count": 696
},
"SoccerTwos.Step.mean": {
"value": 8969983.0,
"min": 2019798.0,
"max": 8969983.0,
"count": 696
},
"SoccerTwos.Step.sum": {
"value": 8969983.0,
"min": 2019798.0,
"max": 8969983.0,
"count": 696
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.07560273259878159,
"min": -0.10971996933221817,
"max": 0.14901091158390045,
"count": 696
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 13.079273223876953,
"min": -20.059728622436523,
"max": 21.654645919799805,
"count": 696
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.07761983573436737,
"min": -0.11212461441755295,
"max": 0.14597201347351074,
"count": 696
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 13.428232192993164,
"min": -20.00739288330078,
"max": 22.211605072021484,
"count": 696
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 696
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 696
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.13010982626435386,
"min": -0.4565333366394043,
"max": 0.494380822736923,
"count": 696
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 22.508999943733215,
"min": -51.88040006160736,
"max": 48.67599993944168,
"count": 696
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.13010982626435386,
"min": -0.4565333366394043,
"max": 0.494380822736923,
"count": 696
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 22.508999943733215,
"min": -51.88040006160736,
"max": 48.67599993944168,
"count": 696
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 696
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 696
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1585.5849241573699,
"min": 1199.2463933028143,
"max": 1588.8697034742543,
"count": 571
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 272720.6069550676,
"min": 2398.4927866056287,
"max": 352162.9990049357,
"count": 571
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01742247586371377,
"min": 0.010967248125234618,
"max": 0.02606669742478213,
"count": 330
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01742247586371377,
"min": 0.010967248125234618,
"max": 0.02606669742478213,
"count": 330
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0973432794213295,
"min": 6.853106962504778e-07,
"max": 0.12235625262061754,
"count": 330
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0973432794213295,
"min": 6.853106962504778e-07,
"max": 0.12235625262061754,
"count": 330
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.09803293993075689,
"min": 6.409208026525448e-07,
"max": 0.12313174655040106,
"count": 330
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.09803293993075689,
"min": 6.409208026525448e-07,
"max": 0.12313174655040106,
"count": 330
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 9.999999999999998e-05,
"min": 9.999999999999998e-05,
"max": 9.999999999999998e-05,
"count": 330
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 9.999999999999998e-05,
"min": 9.999999999999998e-05,
"max": 9.999999999999998e-05,
"count": 330
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 330
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 330
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 330
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 330
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1709789801",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\C:\\Users\\Aaditya\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1709804800"
},
"total": 14998.465334699998,
"count": 1,
"self": 0.015444200005731545,
"children": {
"run_training.setup": {
"total": 0.11793189999298193,
"count": 1,
"self": 0.11793189999298193
},
"TrainerController.start_learning": {
"total": 14998.3319586,
"count": 1,
"self": 8.947752205378492,
"children": {
"TrainerController._reset_env": {
"total": 5.394287500006612,
"count": 36,
"self": 5.394287500006612
},
"TrainerController.advance": {
"total": 14983.760543194614,
"count": 467410,
"self": 9.272817794961156,
"children": {
"env_step": {
"total": 6830.886467399556,
"count": 467410,
"self": 5375.050923096322,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1450.3706617035787,
"count": 467410,
"self": 52.50276399748691,
"children": {
"TorchPolicy.evaluate": {
"total": 1397.8678977060918,
"count": 884956,
"self": 1397.8678977060918
}
}
},
"workers": {
"total": 5.464882599655539,
"count": 467410,
"self": 0.0,
"children": {
"worker_root": {
"total": 14974.810174294224,
"count": 467410,
"is_parallel": true,
"self": 10712.989388593196,
"children": {
"steps_from_proto": {
"total": 0.04524560002028011,
"count": 72,
"is_parallel": true,
"self": 0.009543099964503199,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.03570250005577691,
"count": 288,
"is_parallel": true,
"self": 0.03570250005577691
}
}
},
"UnityEnvironment.step": {
"total": 4261.775540101007,
"count": 467410,
"is_parallel": true,
"self": 221.855687090836,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 199.8188456112839,
"count": 467410,
"is_parallel": true,
"self": 199.8188456112839
},
"communicator.exchange": {
"total": 3147.2272275012656,
"count": 467410,
"is_parallel": true,
"self": 3147.2272275012656
},
"steps_from_proto": {
"total": 692.8737798976217,
"count": 934820,
"is_parallel": true,
"self": 140.57939679456467,
"children": {
"_process_rank_one_or_two_observation": {
"total": 552.294383103057,
"count": 3739280,
"is_parallel": true,
"self": 552.294383103057
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 8143.601258000097,
"count": 467410,
"self": 61.38530629934394,
"children": {
"process_trajectory": {
"total": 1550.7408510009118,
"count": 467410,
"self": 1549.6047140009032,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1361370000086026,
"count": 13,
"self": 1.1361370000086026
}
}
},
"_update_policy": {
"total": 6531.475100699841,
"count": 331,
"self": 656.8360211991239,
"children": {
"TorchPOCAOptimizer.update": {
"total": 5874.639079500717,
"count": 9904,
"self": 5874.639079500717
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.0000006770715117e-06,
"count": 1,
"self": 2.0000006770715117e-06
},
"TrainerController._save_models": {
"total": 0.22937369999999646,
"count": 1,
"self": 0.014897799992468208,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21447590000752825,
"count": 1,
"self": 0.21447590000752825
}
}
}
}
}
}
}