poca-SoccerTwos / run_logs /timers.json
polyconnect's picture
First Push
0304e45 verified
raw
history blame
20.2 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.619049310684204,
"min": 1.619049310684204,
"max": 3.2956883907318115,
"count": 1000
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 31396.603515625,
"min": 18282.5,
"max": 115885.4609375,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 46.660377358490564,
"min": 45.66355140186916,
"max": 999.0,
"count": 1000
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19784.0,
"min": 9156.0,
"max": 31876.0,
"count": 1000
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1583.2908266567183,
"min": 1174.4644358660264,
"max": 1600.7497911642322,
"count": 990
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 335657.6552512243,
"min": 2370.8536334395567,
"max": 339084.33217810746,
"count": 990
},
"SoccerTwos.Step.mean": {
"value": 9999982.0,
"min": 9954.0,
"max": 9999982.0,
"count": 1000
},
"SoccerTwos.Step.sum": {
"value": 9999982.0,
"min": 9954.0,
"max": 9999982.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.00607228372246027,
"min": -0.11751111596822739,
"max": 0.21167558431625366,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -1.2812519073486328,
"min": -18.80177879333496,
"max": 22.583396911621094,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.002361964900046587,
"min": -0.11942289024591446,
"max": 0.20918215811252594,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -0.4983745813369751,
"min": -19.08892822265625,
"max": 22.341575622558594,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.09993649376512138,
"min": -0.6316222217347887,
"max": 0.6275224503205747,
"count": 1000
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -21.086600184440613,
"min": -52.99939978122711,
"max": 61.49720013141632,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.09993649376512138,
"min": -0.6316222217347887,
"max": 0.6275224503205747,
"count": 1000
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -21.086600184440613,
"min": -52.99939978122711,
"max": 61.49720013141632,
"count": 1000
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1000
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.02385382152472933,
"min": 0.010645401534687456,
"max": 0.02385382152472933,
"count": 480
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.02385382152472933,
"min": 0.010645401534687456,
"max": 0.02385382152472933,
"count": 480
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.1010389839609464,
"min": 0.00015815873339306564,
"max": 0.11575197329123815,
"count": 480
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.1010389839609464,
"min": 0.00015815873339306564,
"max": 0.11575197329123815,
"count": 480
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10250727211435635,
"min": 0.00015888037111532564,
"max": 0.11866774335503578,
"count": 480
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10250727211435635,
"min": 0.00015888037111532564,
"max": 0.11866774335503578,
"count": 480
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 480
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 480
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 480
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.2,
"max": 0.20000000000000007,
"count": 480
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 480
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 480
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1718479331",
"python_version": "3.10.14 (main, Jun 14 2024, 18:23:26) [GCC 11.4.0]",
"command_line_arguments": "/home/ivan/Code/rl/hf/unit7/u7/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1718492588"
},
"total": 13257.191734499007,
"count": 1,
"self": 0.21883212600369006,
"children": {
"run_training.setup": {
"total": 0.010704863001592457,
"count": 1,
"self": 0.010704863001592457
},
"TrainerController.start_learning": {
"total": 13256.962197510002,
"count": 1,
"self": 11.19704046548577,
"children": {
"TrainerController._reset_env": {
"total": 2.3967403589049354,
"count": 50,
"self": 2.3967403589049354
},
"TrainerController.advance": {
"total": 13243.187545907567,
"count": 678304,
"self": 10.965911682753358,
"children": {
"env_step": {
"total": 9734.517115767929,
"count": 678304,
"self": 7381.073501714622,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2346.105229501147,
"count": 678304,
"self": 59.2861431860365,
"children": {
"TorchPolicy.evaluate": {
"total": 2286.8190863151103,
"count": 1263196,
"self": 2286.8190863151103
}
}
},
"workers": {
"total": 7.338384552160278,
"count": 678304,
"self": 0.0,
"children": {
"worker_root": {
"total": 13242.747293296328,
"count": 678304,
"is_parallel": true,
"self": 7165.139361804351,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017898979713208973,
"count": 2,
"is_parallel": true,
"self": 0.00042084691813215613,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013690510531887412,
"count": 8,
"is_parallel": true,
"self": 0.0013690510531887412
}
}
},
"UnityEnvironment.step": {
"total": 0.0187501460313797,
"count": 1,
"is_parallel": true,
"self": 0.0003802470164373517,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003191750147379935,
"count": 1,
"is_parallel": true,
"self": 0.0003191750147379935
},
"communicator.exchange": {
"total": 0.016770449001342058,
"count": 1,
"is_parallel": true,
"self": 0.016770449001342058
},
"steps_from_proto": {
"total": 0.0012802749988622963,
"count": 2,
"is_parallel": true,
"self": 0.00026854907628148794,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010117259225808084,
"count": 8,
"is_parallel": true,
"self": 0.0010117259225808084
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 6077.540349922841,
"count": 678303,
"is_parallel": true,
"self": 317.6406598382746,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 190.51674898219062,
"count": 678303,
"is_parallel": true,
"self": 190.51674898219062
},
"communicator.exchange": {
"total": 4672.0610915617435,
"count": 678303,
"is_parallel": true,
"self": 4672.0610915617435
},
"steps_from_proto": {
"total": 897.3218495406327,
"count": 1356606,
"is_parallel": true,
"self": 167.22421341767767,
"children": {
"_process_rank_one_or_two_observation": {
"total": 730.097636122955,
"count": 5426424,
"is_parallel": true,
"self": 730.097636122955
}
}
}
}
},
"steps_from_proto": {
"total": 0.06758156913565472,
"count": 98,
"is_parallel": true,
"self": 0.01229875162243843,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.05528281751321629,
"count": 392,
"is_parallel": true,
"self": 0.05528281751321629
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 3497.704518456885,
"count": 678304,
"self": 85.6454694432905,
"children": {
"process_trajectory": {
"total": 1227.5400425905827,
"count": 678304,
"self": 1223.2639663565205,
"children": {
"RLTrainer._checkpoint": {
"total": 4.276076234062202,
"count": 20,
"self": 4.276076234062202
}
}
},
"_update_policy": {
"total": 2184.519006423012,
"count": 480,
"self": 1017.7918696403503,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1166.7271367826615,
"count": 14403,
"self": 1166.7271367826615
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.2503462433815e-07,
"count": 1,
"self": 7.2503462433815e-07
},
"TrainerController._save_models": {
"total": 0.18087005300913006,
"count": 1,
"self": 0.0009355859947390854,
"children": {
"RLTrainer._checkpoint": {
"total": 0.17993446701439098,
"count": 1,
"self": 0.17993446701439098
}
}
}
}
}
}
}