Phani0404's picture
pushed model
6a00cb0 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.7232797145843506,
"min": 1.6728121042251587,
"max": 3.2957565784454346,
"count": 730
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 33748.7109375,
"min": 22667.44140625,
"max": 116905.34375,
"count": 730
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 49.66326530612245,
"min": 41.3109243697479,
"max": 999.0,
"count": 730
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19468.0,
"min": 7992.0,
"max": 29072.0,
"count": 730
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1599.9123849400958,
"min": 1196.755694073353,
"max": 1613.0573391639393,
"count": 726
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 313582.82744825876,
"min": 2395.525073055738,
"max": 379673.1886972042,
"count": 726
},
"SoccerTwos.Step.mean": {
"value": 7299990.0,
"min": 9152.0,
"max": 7299990.0,
"count": 730
},
"SoccerTwos.Step.sum": {
"value": 7299990.0,
"min": 9152.0,
"max": 7299990.0,
"count": 730
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.019435759633779526,
"min": -0.10889838635921478,
"max": 0.1597670614719391,
"count": 730
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 3.8288447856903076,
"min": -16.661453247070312,
"max": 24.444360733032227,
"count": 730
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.01677960343658924,
"min": -0.10967861860990524,
"max": 0.16584596037864685,
"count": 730
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 3.30558180809021,
"min": -17.04721450805664,
"max": 25.374431610107422,
"count": 730
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 730
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 730
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.023916751600159002,
"min": -0.5714285714285714,
"max": 0.4763999957787363,
"count": 730
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 4.711600065231323,
"min": -49.76760023832321,
"max": 56.06599974632263,
"count": 730
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.023916751600159002,
"min": -0.5714285714285714,
"max": 0.4763999957787363,
"count": 730
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 4.711600065231323,
"min": -49.76760023832321,
"max": 56.06599974632263,
"count": 730
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 730
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 730
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016865803671923155,
"min": 0.00979326242231764,
"max": 0.023715622761907678,
"count": 352
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016865803671923155,
"min": 0.00979326242231764,
"max": 0.023715622761907678,
"count": 352
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.13096600820620855,
"min": 0.0002933660138902875,
"max": 0.13096600820620855,
"count": 352
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.13096600820620855,
"min": 0.0002933660138902875,
"max": 0.13096600820620855,
"count": 352
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.133536392947038,
"min": 0.0002905653440393507,
"max": 0.133536392947038,
"count": 352
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.133536392947038,
"min": 0.0002905653440393507,
"max": 0.133536392947038,
"count": 352
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 352
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 352
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 352
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 352
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 352
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 352
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1733036235",
"python_version": "3.10.12 (main, Jul 5 2023, 15:34:07) [Clang 14.0.6 ]",
"command_line_arguments": "/Users/phani/opt/anaconda3/envs/mlagents/bin/mlagents-learn /Users/phani/Desktop/DRL Project/ml-agents/config/poca/SoccerTwos.yaml --env=/Users/phani/Desktop/DRL Project/ml-agents/training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos1 --no-graphics",
"mlagents_version": "1.1.0",
"mlagents_envs_version": "1.1.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.2",
"numpy_version": "1.23.5",
"end_time_seconds": "1733076092"
},
"total": 39857.47461125,
"count": 1,
"self": 1.2161720830044942,
"children": {
"run_training.setup": {
"total": 0.04163325000263285,
"count": 1,
"self": 0.04163325000263285
},
"TrainerController.start_learning": {
"total": 39856.21680591699,
"count": 1,
"self": 9.281200823315885,
"children": {
"TrainerController._reset_env": {
"total": 7.424017751007341,
"count": 37,
"self": 7.424017751007341
},
"TrainerController.advance": {
"total": 39839.31304496767,
"count": 504742,
"self": 7.693917524651624,
"children": {
"env_step": {
"total": 31149.273974980213,
"count": 504742,
"self": 29732.165288989578,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1410.9758962121123,
"count": 504742,
"self": 40.33518818105222,
"children": {
"TorchPolicy.evaluate": {
"total": 1370.64070803106,
"count": 919002,
"self": 1370.64070803106
}
}
},
"workers": {
"total": 6.132789778523147,
"count": 504741,
"self": 0.0,
"children": {
"worker_root": {
"total": 39837.38792682841,
"count": 504741,
"is_parallel": true,
"self": 11060.15326839345,
"children": {
"steps_from_proto": {
"total": 0.08274971108767204,
"count": 74,
"is_parallel": true,
"self": 0.010355452046496794,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.07239425904117525,
"count": 296,
"is_parallel": true,
"self": 0.07239425904117525
}
}
},
"UnityEnvironment.step": {
"total": 28777.15190872387,
"count": 504741,
"is_parallel": true,
"self": 87.74790836765897,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 510.94240672825254,
"count": 504741,
"is_parallel": true,
"self": 510.94240672825254
},
"communicator.exchange": {
"total": 27144.328013899998,
"count": 504741,
"is_parallel": true,
"self": 27144.328013899998
},
"steps_from_proto": {
"total": 1034.1335797279608,
"count": 1009482,
"is_parallel": true,
"self": 128.67839261557674,
"children": {
"_process_rank_one_or_two_observation": {
"total": 905.455187112384,
"count": 4037928,
"is_parallel": true,
"self": 905.455187112384
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 8682.345152462804,
"count": 504741,
"self": 72.98913394028204,
"children": {
"process_trajectory": {
"total": 1633.2521751615423,
"count": 504741,
"self": 1630.118024870535,
"children": {
"RLTrainer._checkpoint": {
"total": 3.134150291007245,
"count": 14,
"self": 3.134150291007245
}
}
},
"_update_policy": {
"total": 6976.103843360979,
"count": 353,
"self": 710.8879533162981,
"children": {
"TorchPOCAOptimizer.update": {
"total": 6265.215890044681,
"count": 10602,
"self": 6265.215890044681
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.33999365568161e-07,
"count": 1,
"self": 8.33999365568161e-07
},
"TrainerController._save_models": {
"total": 0.19854154100175947,
"count": 1,
"self": 0.005812207993585616,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19272933300817385,
"count": 1,
"self": 0.19272933300817385
}
}
}
}
}
}
}