poca-SoccerTwos / run_logs /timers.json
foreverip's picture
First Push
013acfa
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.899707317352295,
"min": 1.8666845560073853,
"max": 2.2551064491271973,
"count": 206
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 36231.21875,
"min": 14427.431640625,
"max": 48437.265625,
"count": 206
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 60.109756097560975,
"min": 43.75229357798165,
"max": 80.62295081967213,
"count": 206
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19716.0,
"min": 5484.0,
"max": 20484.0,
"count": 206
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1270.6737923228902,
"min": 1262.5812209069436,
"max": 1304.2159453733468,
"count": 206
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 208390.501940954,
"min": 60869.15219965749,
"max": 275722.76527779544,
"count": 206
},
"SoccerTwos.Step.mean": {
"value": 10649974.0,
"min": 8599961.0,
"max": 10649974.0,
"count": 206
},
"SoccerTwos.Step.sum": {
"value": 10649974.0,
"min": 8599961.0,
"max": 10649974.0,
"count": 206
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.012548467144370079,
"min": -0.14724032580852509,
"max": 0.07918369770050049,
"count": 206
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -2.057948589324951,
"min": -23.85293197631836,
"max": 13.698780059814453,
"count": 206
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.010434726253151894,
"min": -0.14370520412921906,
"max": 0.08216734975576401,
"count": 206
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -1.7112951278686523,
"min": -23.280242919921875,
"max": 14.214951515197754,
"count": 206
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 206
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 206
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.17152195151259259,
"min": -0.2927771418435233,
"max": 0.30314271324243974,
"count": 206
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -28.129600048065186,
"min": -51.97079956531525,
"max": 60.325399935245514,
"count": 206
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.17152195151259259,
"min": -0.2927771418435233,
"max": 0.30314271324243974,
"count": 206
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -28.129600048065186,
"min": -51.97079956531525,
"max": 60.325399935245514,
"count": 206
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 206
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 206
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017357751993646767,
"min": 0.011962342753152673,
"max": 0.023411121196113528,
"count": 99
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017357751993646767,
"min": 0.011962342753152673,
"max": 0.023411121196113528,
"count": 99
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.11244878272215525,
"min": 0.08854094247023264,
"max": 0.11486384173234304,
"count": 99
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.11244878272215525,
"min": 0.08854094247023264,
"max": 0.11486384173234304,
"count": 99
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11469231645266215,
"min": 0.08988887096444766,
"max": 0.11737564106782278,
"count": 99
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11469231645266215,
"min": 0.08988887096444766,
"max": 0.11737564106782278,
"count": 99
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 99
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 99
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 99
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 99
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 99
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 99
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1697582174",
"python_version": "3.10.13 (main, Sep 11 2023, 13:44:35) [GCC 11.2.0]",
"command_line_arguments": "/home/foreverip/miniconda3/envs/huggingface-rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1697588133"
},
"total": 5958.1983035700005,
"count": 1,
"self": 0.00577861400142865,
"children": {
"run_training.setup": {
"total": 0.06641858099919773,
"count": 1,
"self": 0.06641858099919773
},
"TrainerController.start_learning": {
"total": 5958.126106375,
"count": 1,
"self": 3.9599314957204115,
"children": {
"TrainerController._reset_env": {
"total": 7.414259091000531,
"count": 12,
"self": 7.414259091000531
},
"TrainerController.advance": {
"total": 5946.400784525277,
"count": 143194,
"self": 4.034141674756938,
"children": {
"env_step": {
"total": 3913.242215924148,
"count": 143194,
"self": 3009.8085575492078,
"children": {
"SubprocessEnvManager._take_step": {
"total": 900.8909132520566,
"count": 143194,
"self": 23.254369332249553,
"children": {
"TorchPolicy.evaluate": {
"total": 877.6365439198071,
"count": 258306,
"self": 877.6365439198071
}
}
},
"workers": {
"total": 2.542745122883389,
"count": 143194,
"self": 0.0,
"children": {
"worker_root": {
"total": 5942.4556625693085,
"count": 143194,
"is_parallel": true,
"self": 3374.4597361534816,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002415871999801311,
"count": 2,
"is_parallel": true,
"self": 0.0005861650015503983,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018297069982509129,
"count": 8,
"is_parallel": true,
"self": 0.0018297069982509129
}
}
},
"UnityEnvironment.step": {
"total": 0.024754271000347217,
"count": 1,
"is_parallel": true,
"self": 0.0005256350013951305,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004785609999089502,
"count": 1,
"is_parallel": true,
"self": 0.0004785609999089502
},
"communicator.exchange": {
"total": 0.02202964299976884,
"count": 1,
"is_parallel": true,
"self": 0.02202964299976884
},
"steps_from_proto": {
"total": 0.0017204319992742967,
"count": 2,
"is_parallel": true,
"self": 0.0004128049995415495,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013076269997327472,
"count": 8,
"is_parallel": true,
"self": 0.0013076269997327472
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.02348342799996317,
"count": 22,
"is_parallel": true,
"self": 0.004550374004793412,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.018933053995169757,
"count": 88,
"is_parallel": true,
"self": 0.018933053995169757
}
}
},
"UnityEnvironment.step": {
"total": 2567.972442987827,
"count": 143193,
"is_parallel": true,
"self": 125.09512984815319,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 70.23697277520569,
"count": 143193,
"is_parallel": true,
"self": 70.23697277520569
},
"communicator.exchange": {
"total": 2033.0428889390814,
"count": 143193,
"is_parallel": true,
"self": 2033.0428889390814
},
"steps_from_proto": {
"total": 339.5974514253867,
"count": 286386,
"is_parallel": true,
"self": 62.99657596209545,
"children": {
"_process_rank_one_or_two_observation": {
"total": 276.60087546329123,
"count": 1145544,
"is_parallel": true,
"self": 276.60087546329123
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2029.1244269263725,
"count": 143194,
"self": 27.13517088637036,
"children": {
"process_trajectory": {
"total": 564.4148560900085,
"count": 143194,
"self": 562.889965740007,
"children": {
"RLTrainer._checkpoint": {
"total": 1.5248903500014421,
"count": 4,
"self": 1.5248903500014421
}
}
},
"_update_policy": {
"total": 1437.5743999499937,
"count": 100,
"self": 333.9592402840408,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1103.615159665953,
"count": 2986,
"self": 1103.615159665953
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3370008673518896e-06,
"count": 1,
"self": 1.3370008673518896e-06
},
"TrainerController._save_models": {
"total": 0.3511299260007945,
"count": 1,
"self": 0.0020111730009375606,
"children": {
"RLTrainer._checkpoint": {
"total": 0.34911875299985695,
"count": 1,
"self": 0.34911875299985695
}
}
}
}
}
}
}