poca-SoccerTwos / run_logs /timers.json
sabretoothedhugs's picture
First Push`
4b43b15 verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.2307558059692383,
"min": 3.189295530319214,
"max": 3.2956700325012207,
"count": 50
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 25846.046875,
"min": 17381.15625,
"max": 113628.8203125,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 868.8333333333334,
"min": 438.6363636363636,
"max": 999.0,
"count": 50
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20852.0,
"min": 4656.0,
"max": 30336.0,
"count": 50
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1198.1698407227689,
"min": 1190.897651941846,
"max": 1200.8234089738921,
"count": 45
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 2396.3396814455377,
"min": 2388.3316671163593,
"max": 19054.362431069538,
"count": 45
},
"SoccerTwos.Step.mean": {
"value": 499622.0,
"min": 9184.0,
"max": 499622.0,
"count": 50
},
"SoccerTwos.Step.sum": {
"value": 499622.0,
"min": 9184.0,
"max": 499622.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.007917487062513828,
"min": -0.021535025909543037,
"max": 0.06929423660039902,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -0.09500984102487564,
"min": -0.27282702922821045,
"max": 0.9701193571090698,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.007759167347103357,
"min": -0.025264939293265343,
"max": 0.06929536908864975,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.09311001002788544,
"min": -0.3537091612815857,
"max": 0.9701351523399353,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.13019999861717224,
"min": -0.5,
"max": 0.30142856921468464,
"count": 50
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 1.562399983406067,
"min": -6.0,
"max": 4.219999969005585,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.13019999861717224,
"min": -0.5,
"max": 0.30142856921468464,
"count": 50
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 1.562399983406067,
"min": -6.0,
"max": 4.219999969005585,
"count": 50
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.019040864354853207,
"min": 0.012935439681556696,
"max": 0.021922126782532685,
"count": 22
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.019040864354853207,
"min": 0.012935439681556696,
"max": 0.021922126782532685,
"count": 22
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0012082137662218883,
"min": 3.267044758104021e-05,
"max": 0.006921788863837719,
"count": 22
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0012082137662218883,
"min": 3.267044758104021e-05,
"max": 0.006921788863837719,
"count": 22
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0012087124516256154,
"min": 3.298023524015055e-05,
"max": 0.00709499263515075,
"count": 22
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0012087124516256154,
"min": 3.298023524015055e-05,
"max": 0.00709499263515075,
"count": 22
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 22
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 22
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 22
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 22
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 22
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 22
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1705951555",
"python_version": "3.10.12 | packaged by Anaconda, Inc. | (main, Jul 5 2023, 19:01:18) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "\\\\?\\F:\\Conda\\envs\\RL\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1705953786"
},
"total": 2230.2840806999593,
"count": 1,
"self": 0.8073485999484546,
"children": {
"run_training.setup": {
"total": 0.13687530002789572,
"count": 1,
"self": 0.13687530002789572
},
"TrainerController.start_learning": {
"total": 2229.339856799983,
"count": 1,
"self": 1.0265328017994761,
"children": {
"TrainerController._reset_env": {
"total": 6.748816500068642,
"count": 3,
"self": 6.748816500068642
},
"TrainerController.advance": {
"total": 2221.1911213981803,
"count": 32618,
"self": 1.1233880972722545,
"children": {
"env_step": {
"total": 985.4585832996527,
"count": 32618,
"self": 770.8795617991127,
"children": {
"SubprocessEnvManager._take_step": {
"total": 213.88915079960134,
"count": 32618,
"self": 7.231876100355294,
"children": {
"TorchPolicy.evaluate": {
"total": 206.65727469924605,
"count": 64782,
"self": 206.65727469924605
}
}
},
"workers": {
"total": 0.6898707009386271,
"count": 32618,
"self": 0.0,
"children": {
"worker_root": {
"total": 2219.80031540012,
"count": 32618,
"is_parallel": true,
"self": 1610.1796274032677,
"children": {
"steps_from_proto": {
"total": 0.01154320000205189,
"count": 6,
"is_parallel": true,
"self": 0.003289099840912968,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.008254100161138922,
"count": 24,
"is_parallel": true,
"self": 0.008254100161138922
}
}
},
"UnityEnvironment.step": {
"total": 609.6091447968502,
"count": 32618,
"is_parallel": true,
"self": 34.53558190073818,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 30.31031720264582,
"count": 32618,
"is_parallel": true,
"self": 30.31031720264582
},
"communicator.exchange": {
"total": 442.8577170924982,
"count": 32618,
"is_parallel": true,
"self": 442.8577170924982
},
"steps_from_proto": {
"total": 101.90552860096795,
"count": 65236,
"is_parallel": true,
"self": 20.771148308413103,
"children": {
"_process_rank_one_or_two_observation": {
"total": 81.13438029255485,
"count": 260944,
"is_parallel": true,
"self": 81.13438029255485
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1234.6091500012553,
"count": 32618,
"self": 7.173557702219114,
"children": {
"process_trajectory": {
"total": 177.4076076990459,
"count": 32618,
"self": 177.02247139904648,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3851362999994308,
"count": 1,
"self": 0.3851362999994308
}
}
},
"_update_policy": {
"total": 1050.0279845999903,
"count": 22,
"self": 108.91440960031468,
"children": {
"TorchPOCAOptimizer.update": {
"total": 941.1135749996756,
"count": 672,
"self": 941.1135749996756
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.2999593056738377e-06,
"count": 1,
"self": 2.2999593056738377e-06
},
"TrainerController._save_models": {
"total": 0.37338379997527227,
"count": 1,
"self": 0.02222109993454069,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3511627000407316,
"count": 1,
"self": 0.3511627000407316
}
}
}
}
}
}
}