poca-SoccerTwos / run_logs /timers.json
samilcoban's picture
First Push
801855c
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.5289312601089478,
"min": 1.4429744482040405,
"max": 1.6524065732955933,
"count": 437
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 31508.21484375,
"min": 10421.470703125,
"max": 36688.703125,
"count": 437
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 65.6103896103896,
"min": 48.48979591836735,
"max": 114.5111111111111,
"count": 437
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20208.0,
"min": 4812.0,
"max": 21704.0,
"count": 437
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1591.266926676015,
"min": 1525.7500144565668,
"max": 1634.7649431157508,
"count": 437
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 245055.1067081063,
"min": 60774.17314216623,
"max": 313938.9115618683,
"count": 437
},
"SoccerTwos.Step.mean": {
"value": 18089958.0,
"min": 13729936.0,
"max": 18089958.0,
"count": 437
},
"SoccerTwos.Step.sum": {
"value": 18089958.0,
"min": 13729936.0,
"max": 18089958.0,
"count": 437
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.007700234651565552,
"min": -0.13673512637615204,
"max": 0.07944316416978836,
"count": 437
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 1.178135871887207,
"min": -18.185771942138672,
"max": 12.552020072937012,
"count": 437
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.007211917545646429,
"min": -0.13792335987091064,
"max": 0.081109918653965,
"count": 437
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 1.1034233570098877,
"min": -18.343807220458984,
"max": 12.815367698669434,
"count": 437
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 437
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 437
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.03309281277500726,
"min": -0.3374048784011748,
"max": 0.32358507300490763,
"count": 437
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -5.063200354576111,
"min": -55.334400057792664,
"max": 43.36039978265762,
"count": 437
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.03309281277500726,
"min": -0.3374048784011748,
"max": 0.32358507300490763,
"count": 437
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -5.063200354576111,
"min": -55.334400057792664,
"max": 43.36039978265762,
"count": 437
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 437
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 437
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.019048793388841054,
"min": 0.01202231189866628,
"max": 0.023315804599163433,
"count": 211
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.019048793388841054,
"min": 0.01202231189866628,
"max": 0.023315804599163433,
"count": 211
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10422521059711774,
"min": 0.07008710379401843,
"max": 0.10756902545690536,
"count": 211
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10422521059711774,
"min": 0.07008710379401843,
"max": 0.10756902545690536,
"count": 211
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10599818527698517,
"min": 0.07135874889791012,
"max": 0.10912942091623942,
"count": 211
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10599818527698517,
"min": 0.07135874889791012,
"max": 0.10912942091623942,
"count": 211
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 211
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 211
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 211
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 211
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 211
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 211
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1702620132",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/home/samil/anaconda3/envs/unity/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --resume --torch-device cuda",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1702627737"
},
"total": 7605.147426823,
"count": 1,
"self": 0.42629649300033634,
"children": {
"run_training.setup": {
"total": 0.044025450999995996,
"count": 1,
"self": 0.044025450999995996
},
"TrainerController.start_learning": {
"total": 7604.677104879,
"count": 1,
"self": 6.384801841059016,
"children": {
"TrainerController._reset_env": {
"total": 3.543140044001973,
"count": 23,
"self": 3.543140044001973
},
"TrainerController.advance": {
"total": 7594.35423294594,
"count": 299615,
"self": 6.412759897923934,
"children": {
"env_step": {
"total": 5653.755548863008,
"count": 299615,
"self": 4278.739194771819,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1370.8643595179715,
"count": 299615,
"self": 35.6024519718776,
"children": {
"TorchPolicy.evaluate": {
"total": 1335.2619075460939,
"count": 548149,
"self": 1335.2619075460939
}
}
},
"workers": {
"total": 4.151994573217664,
"count": 299614,
"self": 0.0,
"children": {
"worker_root": {
"total": 7596.223032310018,
"count": 299614,
"is_parallel": true,
"self": 4014.40834585581,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0025739310000005844,
"count": 2,
"is_parallel": true,
"self": 0.001017920999998978,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015560100000016064,
"count": 8,
"is_parallel": true,
"self": 0.0015560100000016064
}
}
},
"UnityEnvironment.step": {
"total": 0.02281602400000793,
"count": 1,
"is_parallel": true,
"self": 0.0005066710000249941,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003378149999946345,
"count": 1,
"is_parallel": true,
"self": 0.0003378149999946345
},
"communicator.exchange": {
"total": 0.02049500199998988,
"count": 1,
"is_parallel": true,
"self": 0.02049500199998988
},
"steps_from_proto": {
"total": 0.0014765359999984184,
"count": 2,
"is_parallel": true,
"self": 0.0002987140000243471,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011778219999740713,
"count": 8,
"is_parallel": true,
"self": 0.0011778219999740713
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.03414245899863033,
"count": 44,
"is_parallel": true,
"self": 0.006506948000961188,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.027635510997669144,
"count": 176,
"is_parallel": true,
"self": 0.027635510997669144
}
}
},
"UnityEnvironment.step": {
"total": 3581.78054399521,
"count": 299613,
"is_parallel": true,
"self": 196.83538217598698,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 106.80310568586641,
"count": 299613,
"is_parallel": true,
"self": 106.80310568586641
},
"communicator.exchange": {
"total": 2738.338005463118,
"count": 299613,
"is_parallel": true,
"self": 2738.338005463118
},
"steps_from_proto": {
"total": 539.8040506702387,
"count": 599226,
"is_parallel": true,
"self": 100.28991740625202,
"children": {
"_process_rank_one_or_two_observation": {
"total": 439.51413326398665,
"count": 2396904,
"is_parallel": true,
"self": 439.51413326398665
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1934.1859241850077,
"count": 299614,
"self": 41.93469052002342,
"children": {
"process_trajectory": {
"total": 736.4015649759863,
"count": 299614,
"self": 733.8347796079853,
"children": {
"RLTrainer._checkpoint": {
"total": 2.5667853680009785,
"count": 9,
"self": 2.5667853680009785
}
}
},
"_update_policy": {
"total": 1155.849668688998,
"count": 211,
"self": 526.311828841966,
"children": {
"TorchPOCAOptimizer.update": {
"total": 629.537839847032,
"count": 6330,
"self": 629.537839847032
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3119997674948536e-06,
"count": 1,
"self": 1.3119997674948536e-06
},
"TrainerController._save_models": {
"total": 0.3949287359992013,
"count": 1,
"self": 0.0016341169994120719,
"children": {
"RLTrainer._checkpoint": {
"total": 0.39329461899978924,
"count": 1,
"self": 0.39329461899978924
}
}
}
}
}
}
}