fatcat22's picture
First Push
df7f0d8
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.7278201580047607,
"min": 1.6628668308258057,
"max": 3.295703172683716,
"count": 954
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 33782.33984375,
"min": 12201.880859375,
"max": 134216.015625,
"count": 954
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 50.34020618556701,
"min": 41.678260869565214,
"max": 999.0,
"count": 954
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19532.0,
"min": 15712.0,
"max": 27668.0,
"count": 954
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1647.965262524312,
"min": 1196.1047218020362,
"max": 1688.2245775823185,
"count": 903
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 319705.2609297165,
"min": 2396.1576023570833,
"max": 384701.09159814287,
"count": 903
},
"SoccerTwos.Step.mean": {
"value": 9539960.0,
"min": 9068.0,
"max": 9539960.0,
"count": 954
},
"SoccerTwos.Step.sum": {
"value": 9539960.0,
"min": 9068.0,
"max": 9539960.0,
"count": 954
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.018244201317429543,
"min": -0.1156419888138771,
"max": 0.16793082654476166,
"count": 954
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -3.557619333267212,
"min": -20.468631744384766,
"max": 22.617860794067383,
"count": 954
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.01555158942937851,
"min": -0.11710312217473984,
"max": 0.16392634809017181,
"count": 954
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -3.032559871673584,
"min": -20.727252960205078,
"max": 22.099905014038086,
"count": 954
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 954
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 954
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.08950256598301423,
"min": -0.6153846153846154,
"max": 0.5541454524817792,
"count": 954
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 17.453000366687775,
"min": -59.74439990520477,
"max": 55.00579994916916,
"count": 954
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.08950256598301423,
"min": -0.6153846153846154,
"max": 0.5541454524817792,
"count": 954
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 17.453000366687775,
"min": -59.74439990520477,
"max": 55.00579994916916,
"count": 954
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 954
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 954
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016191855875270753,
"min": 0.010119652145294821,
"max": 0.027294784776555996,
"count": 456
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016191855875270753,
"min": 0.010119652145294821,
"max": 0.027294784776555996,
"count": 456
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.11083811670541763,
"min": 7.290177548687401e-06,
"max": 0.12418492510914803,
"count": 456
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.11083811670541763,
"min": 7.290177548687401e-06,
"max": 0.12418492510914803,
"count": 456
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11203602353731791,
"min": 7.066311627568211e-06,
"max": 0.12705841287970543,
"count": 456
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11203602353731791,
"min": 7.066311627568211e-06,
"max": 0.12705841287970543,
"count": 456
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 456
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 456
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 456
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 456
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 456
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 456
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1688634451",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --force --env=./training-envs-executables/SoccerTwos/SoccerTwos --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1688652320"
},
"total": 17869.137366616,
"count": 1,
"self": 0.3206322519981768,
"children": {
"run_training.setup": {
"total": 0.03911399499975232,
"count": 1,
"self": 0.03911399499975232
},
"TrainerController.start_learning": {
"total": 17868.777620369,
"count": 1,
"self": 16.483394439044787,
"children": {
"TrainerController._reset_env": {
"total": 7.0819778290087925,
"count": 48,
"self": 7.0819778290087925
},
"TrainerController.advance": {
"total": 17845.209798765947,
"count": 645962,
"self": 16.812994569594593,
"children": {
"env_step": {
"total": 13727.771489084129,
"count": 645962,
"self": 10615.24885410071,
"children": {
"SubprocessEnvManager._take_step": {
"total": 3102.6773212078597,
"count": 645962,
"self": 99.56094111042876,
"children": {
"TorchPolicy.evaluate": {
"total": 3003.116380097431,
"count": 1211068,
"self": 3003.116380097431
}
}
},
"workers": {
"total": 9.845313775559134,
"count": 645961,
"self": 0.0,
"children": {
"worker_root": {
"total": 17840.96294775486,
"count": 645961,
"is_parallel": true,
"self": 9069.234338501756,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002843189000031998,
"count": 2,
"is_parallel": true,
"self": 0.0007146510001803108,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0021285379998516873,
"count": 8,
"is_parallel": true,
"self": 0.0021285379998516873
}
}
},
"UnityEnvironment.step": {
"total": 0.0313164669996695,
"count": 1,
"is_parallel": true,
"self": 0.000678715999583801,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004814829999304493,
"count": 1,
"is_parallel": true,
"self": 0.0004814829999304493
},
"communicator.exchange": {
"total": 0.028125127999828692,
"count": 1,
"is_parallel": true,
"self": 0.028125127999828692
},
"steps_from_proto": {
"total": 0.002031140000326559,
"count": 2,
"is_parallel": true,
"self": 0.0003818279992628959,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001649312001063663,
"count": 8,
"is_parallel": true,
"self": 0.001649312001063663
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 8771.629741006109,
"count": 645960,
"is_parallel": true,
"self": 480.40989268901103,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 303.55443382467183,
"count": 645960,
"is_parallel": true,
"self": 303.55443382467183
},
"communicator.exchange": {
"total": 6500.537959241616,
"count": 645960,
"is_parallel": true,
"self": 6500.537959241616
},
"steps_from_proto": {
"total": 1487.1274552508094,
"count": 1291920,
"is_parallel": true,
"self": 253.90923089256376,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1233.2182243582456,
"count": 5167680,
"is_parallel": true,
"self": 1233.2182243582456
}
}
}
}
},
"steps_from_proto": {
"total": 0.09886824699515273,
"count": 94,
"is_parallel": true,
"self": 0.017682320009043906,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.08118592698610883,
"count": 376,
"is_parallel": true,
"self": 0.08118592698610883
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 4100.625315112222,
"count": 645961,
"self": 133.12492740823745,
"children": {
"process_trajectory": {
"total": 1526.1360445459832,
"count": 645961,
"self": 1521.3299940979714,
"children": {
"RLTrainer._checkpoint": {
"total": 4.806050448011774,
"count": 19,
"self": 4.806050448011774
}
}
},
"_update_policy": {
"total": 2441.3643431580012,
"count": 456,
"self": 1667.2157048950467,
"children": {
"TorchPOCAOptimizer.update": {
"total": 774.1486382629546,
"count": 13689,
"self": 774.1486382629546
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.295997208217159e-06,
"count": 1,
"self": 1.295997208217159e-06
},
"TrainerController._save_models": {
"total": 0.002448039002047153,
"count": 1,
"self": 4.8124002205440775e-05,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0023999149998417124,
"count": 1,
"self": 0.0023999149998417124
}
}
}
}
}
}
}