poca-SoccerTwos / run_logs /timers.json
hossniper's picture
First Push
0d07b71 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.37229585647583,
"min": 1.356644868850708,
"max": 3.2956838607788086,
"count": 5000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 24679.369140625,
"min": 24106.6640625,
"max": 127093.109375,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 62.94871794871795,
"min": 38.536,
"max": 999.0,
"count": 5000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19640.0,
"min": 1488.0,
"max": 31896.0,
"count": 5000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1634.4972260320167,
"min": 1192.5861837779885,
"max": 1719.327431954243,
"count": 4997
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 254981.5672609946,
"min": 2397.945535540182,
"max": 388526.83807022055,
"count": 4997
},
"SoccerTwos.Step.mean": {
"value": 49999936.0,
"min": 9858.0,
"max": 49999936.0,
"count": 5000
},
"SoccerTwos.Step.sum": {
"value": 49999936.0,
"min": 9858.0,
"max": 49999936.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.03524751216173172,
"min": -0.1419283151626587,
"max": 0.13715900480747223,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -5.498611927032471,
"min": -27.3740177154541,
"max": 30.860774993896484,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.03419635444879532,
"min": -0.1438683420419693,
"max": 0.13981013000011444,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -5.334631443023682,
"min": -28.098796844482422,
"max": 31.457279205322266,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.05703589549431434,
"min": -0.5631789470973768,
"max": 0.5324867918806256,
"count": 5000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -8.897599697113037,
"min": -67.57160007953644,
"max": 57.71280038356781,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.05703589549431434,
"min": -0.5631789470973768,
"max": 0.5324867918806256,
"count": 5000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -8.897599697113037,
"min": -67.57160007953644,
"max": 57.71280038356781,
"count": 5000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 5000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.018004724662750958,
"min": 0.01054110166466368,
"max": 0.02588704846954594,
"count": 2424
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.018004724662750958,
"min": 0.01054110166466368,
"max": 0.02588704846954594,
"count": 2424
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.09986016849676768,
"min": 0.0006819044734584168,
"max": 0.12943872114022573,
"count": 2424
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.09986016849676768,
"min": 0.0006819044734584168,
"max": 0.12943872114022573,
"count": 2424
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10056763912240664,
"min": 0.0006829419187852181,
"max": 0.13169152662158012,
"count": 2424
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10056763912240664,
"min": 0.0006829419187852181,
"max": 0.13169152662158012,
"count": 2424
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2424
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 2424
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 2424
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 2424
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 2424
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 2424
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1714225033",
"python_version": "3.10.14 (main, Mar 21 2024, 16:24:04) [GCC 11.2.0]",
"command_line_arguments": "/home/c/miniconda3/envs/up/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./trained-envs-executables/linux/SoccerTwos/SoccerTwos --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1",
"numpy_version": "1.23.5",
"end_time_seconds": "1714257075"
},
"total": 32042.219644875266,
"count": 1,
"self": 0.1668749786913395,
"children": {
"run_training.setup": {
"total": 0.009759558364748955,
"count": 1,
"self": 0.009759558364748955
},
"TrainerController.start_learning": {
"total": 32042.04301033821,
"count": 1,
"self": 33.378361814655364,
"children": {
"TrainerController._reset_env": {
"total": 2.8961273580789566,
"count": 250,
"self": 2.8961273580789566
},
"TrainerController.advance": {
"total": 32005.692719846033,
"count": 3446107,
"self": 32.78416582196951,
"children": {
"env_step": {
"total": 23995.466805511154,
"count": 3446107,
"self": 18154.09656541329,
"children": {
"SubprocessEnvManager._take_step": {
"total": 5820.598215113394,
"count": 3446107,
"self": 223.79183790739626,
"children": {
"TorchPolicy.evaluate": {
"total": 5596.806377205998,
"count": 6279176,
"self": 5596.806377205998
}
}
},
"workers": {
"total": 20.7720249844715,
"count": 3446107,
"self": 0.0,
"children": {
"worker_root": {
"total": 31990.585800303146,
"count": 3446107,
"is_parallel": true,
"self": 17552.246522780508,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0011001769453287125,
"count": 2,
"is_parallel": true,
"self": 0.0002501634880900383,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0008500134572386742,
"count": 8,
"is_parallel": true,
"self": 0.0008500134572386742
}
}
},
"UnityEnvironment.step": {
"total": 0.01166412141174078,
"count": 1,
"is_parallel": true,
"self": 0.00024144165217876434,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021494925022125244,
"count": 1,
"is_parallel": true,
"self": 0.00021494925022125244
},
"communicator.exchange": {
"total": 0.010524923913180828,
"count": 1,
"is_parallel": true,
"self": 0.010524923913180828
},
"steps_from_proto": {
"total": 0.000682806596159935,
"count": 2,
"is_parallel": true,
"self": 0.00013625714927911758,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005465494468808174,
"count": 8,
"is_parallel": true,
"self": 0.0005465494468808174
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 14438.161071165465,
"count": 3446106,
"is_parallel": true,
"self": 812.0409971578047,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 531.2291127564386,
"count": 3446106,
"is_parallel": true,
"self": 531.2291127564386
},
"communicator.exchange": {
"total": 10844.018346969038,
"count": 3446106,
"is_parallel": true,
"self": 10844.018346969038
},
"steps_from_proto": {
"total": 2250.8726142821833,
"count": 6892212,
"is_parallel": true,
"self": 418.27949241828173,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1832.5931218639016,
"count": 27568848,
"is_parallel": true,
"self": 1832.5931218639016
}
}
}
}
},
"steps_from_proto": {
"total": 0.17820635717362165,
"count": 498,
"is_parallel": true,
"self": 0.03396888542920351,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.14423747174441814,
"count": 1992,
"is_parallel": true,
"self": 0.14423747174441814
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 7977.441748512909,
"count": 3446107,
"self": 301.2638487685472,
"children": {
"process_trajectory": {
"total": 3493.1214481806383,
"count": 3446107,
"self": 3484.6526933172718,
"children": {
"RLTrainer._checkpoint": {
"total": 8.468754863366485,
"count": 100,
"self": 8.468754863366485
}
}
},
"_update_policy": {
"total": 4183.056451563723,
"count": 2424,
"self": 2655.9490358438343,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1527.107415719889,
"count": 72732,
"self": 1527.107415719889
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.7997961044311523e-07,
"count": 1,
"self": 3.7997961044311523e-07
},
"TrainerController._save_models": {
"total": 0.07580093946307898,
"count": 1,
"self": 0.0009878072887659073,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07481313217431307,
"count": 1,
"self": 0.07481313217431307
}
}
}
}
}
}
}