new-Soccer / run_logs /timers.json
mgmeskill's picture
No defaults - 10M
9cec2d2
raw
history blame
20.2 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.591202735900879,
"min": 1.576461672782898,
"max": 3.295790672302246,
"count": 1000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 63037.08984375,
"min": 17617.28515625,
"max": 957413.5,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 17.586206896551722,
"min": 16.62230215827338,
"max": 358.0,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 18360.0,
"min": 15132.0,
"max": 24024.0,
"count": 1000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1447.0320658846629,
"min": 1154.6932713299086,
"max": 1466.8112458584237,
"count": 999
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 755350.738391794,
"min": 7107.293939842906,
"max": 814234.7723575226,
"count": 999
},
"SoccerTwos.Step.mean": {
"value": 9999995.0,
"min": 9957.0,
"max": 9999995.0,
"count": 1000
},
"SoccerTwos.Step.sum": {
"value": 9999995.0,
"min": 9957.0,
"max": 9999995.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.015528060495853424,
"min": -0.10294513404369354,
"max": 0.18965570628643036,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 8.214344024658203,
"min": -42.497703552246094,
"max": 61.23341369628906,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.014794806018471718,
"min": -0.10167635232210159,
"max": 0.1922016590833664,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 7.826452255249023,
"min": -42.01740264892578,
"max": 59.716331481933594,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.04080340305663688,
"min": -0.4218624997884035,
"max": 0.48153333283132976,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 21.585000216960907,
"min": -89.35180050134659,
"max": 125.87880036234856,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.04080340305663688,
"min": -0.4218624997884035,
"max": 0.48153333283132976,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 21.585000216960907,
"min": -89.35180050134659,
"max": 125.87880036234856,
"count": 1000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.019490288386684065,
"min": 0.01022618855155694,
"max": 0.02483719602654067,
"count": 484
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.019490288386684065,
"min": 0.01022618855155694,
"max": 0.02483719602654067,
"count": 484
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10220161899924278,
"min": 0.00556322493745635,
"max": 0.11632043123245239,
"count": 484
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10220161899924278,
"min": 0.00556322493745635,
"max": 0.11632043123245239,
"count": 484
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10278775369127592,
"min": 0.005624144431203603,
"max": 0.11713258946935336,
"count": 484
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10278775369127592,
"min": 0.005624144431203603,
"max": 0.11713258946935336,
"count": 484
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 1.205799397600006e-07,
"min": 1.205799397600006e-07,
"max": 0.00019958164020917997,
"count": 484
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 1.205799397600006e-07,
"min": 1.205799397600006e-07,
"max": 0.00019958164020917997,
"count": 484
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.25,
"min": 0.25,
"max": 0.25,
"count": 484
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.25,
"min": 0.25,
"max": 0.25,
"count": 484
},
"SoccerTwos.Policy.Beta.mean": {
"value": 1.6017976000000026e-05,
"min": 1.6017976000000026e-05,
"max": 0.009979102918000004,
"count": 484
},
"SoccerTwos.Policy.Beta.sum": {
"value": 1.6017976000000026e-05,
"min": 1.6017976000000026e-05,
"max": 0.009979102918000004,
"count": 484
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1693333901",
"python_version": "3.9.17 (main, Jul 5 2023, 20:41:20) \n[GCC 11.2.0]",
"command_line_arguments": "/home/mgmeskill/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=Downstrike-10M --no-graphics --num-envs=32",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1693346598"
},
"total": 12696.901499327272,
"count": 1,
"self": 0.8128237212076783,
"children": {
"run_training.setup": {
"total": 0.1724072340875864,
"count": 1,
"self": 0.1724072340875864
},
"TrainerController.start_learning": {
"total": 12695.916268371977,
"count": 1,
"self": 4.1688310210593045,
"children": {
"TrainerController._reset_env": {
"total": 146.85403133556247,
"count": 495,
"self": 146.85403133556247
},
"TrainerController.advance": {
"total": 12544.567460204009,
"count": 55699,
"self": 1.6526970262639225,
"children": {
"env_step": {
"total": 8165.541931728367,
"count": 55699,
"self": 2764.078356688842,
"children": {
"SubprocessEnvManager._take_step": {
"total": 5394.85639378801,
"count": 1601157,
"self": 131.83333107223734,
"children": {
"TorchPolicy.evaluate": {
"total": 5263.0230627157725,
"count": 2932254,
"self": 5263.0230627157725
}
}
},
"workers": {
"total": 6.607181251514703,
"count": 55699,
"self": 0.0,
"children": {
"worker_root": {
"total": 405506.96444705594,
"count": 1599583,
"is_parallel": true,
"self": 386161.60350995045,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.07886765897274017,
"count": 64,
"is_parallel": true,
"self": 0.0166148510761559,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.06225280789658427,
"count": 256,
"is_parallel": true,
"self": 0.06225280789658427
}
}
},
"UnityEnvironment.step": {
"total": 0.7396135642193258,
"count": 32,
"is_parallel": true,
"self": 0.020251302514225245,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.01759060798212886,
"count": 32,
"is_parallel": true,
"self": 0.01759060798212886
},
"communicator.exchange": {
"total": 0.645212515257299,
"count": 32,
"is_parallel": true,
"self": 0.645212515257299
},
"steps_from_proto": {
"total": 0.05655913846567273,
"count": 64,
"is_parallel": true,
"self": 0.01000463031232357,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.04655450815334916,
"count": 256,
"is_parallel": true,
"self": 0.04655450815334916
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 19305.8035399043,
"count": 1599551,
"is_parallel": true,
"self": 1247.6211247742176,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 786.1591690224595,
"count": 1599551,
"is_parallel": true,
"self": 786.1591690224595
},
"communicator.exchange": {
"total": 13873.461208882276,
"count": 1599551,
"is_parallel": true,
"self": 13873.461208882276
},
"steps_from_proto": {
"total": 3398.562037225347,
"count": 3199102,
"is_parallel": true,
"self": 576.9713781671599,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2821.590659058187,
"count": 12796408,
"is_parallel": true,
"self": 2821.590659058187
}
}
}
}
},
"steps_from_proto": {
"total": 39.557397201191634,
"count": 31616,
"is_parallel": true,
"self": 6.698158380575478,
"children": {
"_process_rank_one_or_two_observation": {
"total": 32.859238820616156,
"count": 126464,
"is_parallel": true,
"self": 32.859238820616156
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 4377.372831449378,
"count": 55699,
"self": 60.90775148430839,
"children": {
"process_trajectory": {
"total": 2136.620560474228,
"count": 55699,
"self": 2135.02693261439,
"children": {
"RLTrainer._checkpoint": {
"total": 1.593627859838307,
"count": 5,
"self": 1.593627859838307
}
}
},
"_update_policy": {
"total": 2179.844519490842,
"count": 484,
"self": 1224.6795570533723,
"children": {
"TorchPOCAOptimizer.update": {
"total": 955.1649624374695,
"count": 14520,
"self": 955.1649624374695
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.513429641723633e-07,
"count": 1,
"self": 5.513429641723633e-07
},
"TrainerController._save_models": {
"total": 0.3259452600032091,
"count": 1,
"self": 0.0019989372231066227,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3239463227801025,
"count": 1,
"self": 0.3239463227801025
}
}
}
}
}
}
}