poca-SoccerTwos / run_logs /timers.json
nafizshahriar's picture
First Push
17ca16d verified
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.1133804321289062,
"min": 3.102661371231079,
"max": 3.2957539558410645,
"count": 100
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 27597.00390625,
"min": 25997.5390625,
"max": 143487.828125,
"count": 100
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 397.54545454545456,
"min": 199.2608695652174,
"max": 999.0,
"count": 100
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 17492.0,
"min": 16480.0,
"max": 25084.0,
"count": 100
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1260.470114148714,
"min": 1200.84953632169,
"max": 1260.470114148714,
"count": 92
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 20167.521826379423,
"min": 2403.8802455184264,
"max": 55031.69379006797,
"count": 92
},
"SoccerTwos.Step.mean": {
"value": 999941.0,
"min": 9560.0,
"max": 999941.0,
"count": 100
},
"SoccerTwos.Step.sum": {
"value": 999941.0,
"min": 9560.0,
"max": 999941.0,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.03812256455421448,
"min": -0.03356579691171646,
"max": 0.08793723583221436,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.8768189549446106,
"min": -0.607160210609436,
"max": 4.133049964904785,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.037627846002578735,
"min": -0.031118910759687424,
"max": 0.09117133915424347,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.8654404282569885,
"min": -0.5502427816390991,
"max": 4.28505277633667,
"count": 100
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 100
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.30168695553489355,
"min": -0.4842333334187667,
"max": 0.5116765981024884,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 6.938799977302551,
"min": -10.953200101852417,
"max": 24.048800110816956,
"count": 100
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.30168695553489355,
"min": -0.4842333334187667,
"max": 0.5116765981024884,
"count": 100
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 6.938799977302551,
"min": -10.953200101852417,
"max": 24.048800110816956,
"count": 100
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01982781187398359,
"min": 0.011926824614056386,
"max": 0.023017469389985005,
"count": 46
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01982781187398359,
"min": 0.011926824614056386,
"max": 0.023017469389985005,
"count": 46
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.021284908739229044,
"min": 4.099821368678628e-05,
"max": 0.022463938097159068,
"count": 46
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.021284908739229044,
"min": 4.099821368678628e-05,
"max": 0.022463938097159068,
"count": 46
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.02187429883827766,
"min": 4.1442839938099495e-05,
"max": 0.02337551526725292,
"count": 46
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.02187429883827766,
"min": 4.1442839938099495e-05,
"max": 0.02337551526725292,
"count": 46
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 46
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 46
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 46
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 46
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 46
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 46
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1723019403",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos --run-id=SoccerTwosNew --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1723021625"
},
"total": 2221.40331031,
"count": 1,
"self": 0.442573055000139,
"children": {
"run_training.setup": {
"total": 0.054269316000045364,
"count": 1,
"self": 0.054269316000045364
},
"TrainerController.start_learning": {
"total": 2220.906467939,
"count": 1,
"self": 1.5256875920467792,
"children": {
"TrainerController._reset_env": {
"total": 2.4312839539998095,
"count": 5,
"self": 2.4312839539998095
},
"TrainerController.advance": {
"total": 2216.7255466939537,
"count": 64894,
"self": 1.681418279042191,
"children": {
"env_step": {
"total": 1820.7204620000302,
"count": 64894,
"self": 1387.735235109335,
"children": {
"SubprocessEnvManager._take_step": {
"total": 432.04261078575337,
"count": 64894,
"self": 12.062182696675336,
"children": {
"TorchPolicy.evaluate": {
"total": 419.98042808907803,
"count": 128466,
"self": 419.98042808907803
}
}
},
"workers": {
"total": 0.9426161049418624,
"count": 64894,
"self": 0.0,
"children": {
"worker_root": {
"total": 2216.235230492861,
"count": 64894,
"is_parallel": true,
"self": 1056.4223267198568,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003293138000117324,
"count": 2,
"is_parallel": true,
"self": 0.0008867410006132559,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002406396999504068,
"count": 8,
"is_parallel": true,
"self": 0.002406396999504068
}
}
},
"UnityEnvironment.step": {
"total": 0.042546780000066065,
"count": 1,
"is_parallel": true,
"self": 0.0012115459999222367,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0008224739999604935,
"count": 1,
"is_parallel": true,
"self": 0.0008224739999604935
},
"communicator.exchange": {
"total": 0.03540516400016713,
"count": 1,
"is_parallel": true,
"self": 0.03540516400016713
},
"steps_from_proto": {
"total": 0.005107596000016201,
"count": 2,
"is_parallel": true,
"self": 0.0005829379997521755,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.004524658000264026,
"count": 8,
"is_parallel": true,
"self": 0.004524658000264026
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1159.8032615460043,
"count": 64893,
"is_parallel": true,
"self": 70.22577767103985,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 49.13416495598494,
"count": 64893,
"is_parallel": true,
"self": 49.13416495598494
},
"communicator.exchange": {
"total": 811.3958589559675,
"count": 64893,
"is_parallel": true,
"self": 811.3958589559675
},
"steps_from_proto": {
"total": 229.047459963012,
"count": 129786,
"is_parallel": true,
"self": 37.85822161413034,
"children": {
"_process_rank_one_or_two_observation": {
"total": 191.18923834888164,
"count": 519144,
"is_parallel": true,
"self": 191.18923834888164
}
}
}
}
},
"steps_from_proto": {
"total": 0.009642226999858394,
"count": 8,
"is_parallel": true,
"self": 0.0018408130008538137,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0078014139990045805,
"count": 32,
"is_parallel": true,
"self": 0.0078014139990045805
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 394.32366641488125,
"count": 64894,
"self": 13.735710694818863,
"children": {
"process_trajectory": {
"total": 112.82515536706251,
"count": 64894,
"self": 112.41186320606266,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4132921609998448,
"count": 2,
"self": 0.4132921609998448
}
}
},
"_update_policy": {
"total": 267.7628003529999,
"count": 46,
"self": 158.34779366198472,
"children": {
"TorchPOCAOptimizer.update": {
"total": 109.41500669101515,
"count": 1380,
"self": 109.41500669101515
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.049999789567664e-07,
"count": 1,
"self": 9.049999789567664e-07
},
"TrainerController._save_models": {
"total": 0.2239487939996252,
"count": 1,
"self": 0.0017336199998680968,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2222151739997571,
"count": 1,
"self": 0.2222151739997571
}
}
}
}
}
}
}