poca-SoccerTwos / run_logs /timers.json
GiuliaMP's picture
First Push
05eff75
raw
history blame
20.2 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.863402843475342,
"min": 2.8418586254119873,
"max": 3.2956948280334473,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 57909.45703125,
"min": 16824.85546875,
"max": 148237.46875,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 86.92857142857143,
"min": 72.04411764705883,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19472.0,
"min": 4884.0,
"max": 29948.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1318.5978989021112,
"min": 1194.0640425488612,
"max": 1323.746780218973,
"count": 398
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 147682.96467703645,
"min": 2388.1280850977223,
"max": 178693.59971130683,
"count": 398
},
"SoccerTwos.Step.mean": {
"value": 4999998.0,
"min": 9258.0,
"max": 4999998.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999998.0,
"min": 9258.0,
"max": 4999998.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.0024093177635222673,
"min": -0.060914769768714905,
"max": 0.16929885745048523,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.2722529172897339,
"min": -8.284408569335938,
"max": 14.221104621887207,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.009136270731687546,
"min": -0.06261375546455383,
"max": 0.1744844764471054,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 1.0323985815048218,
"min": -8.515470504760742,
"max": 14.656696319580078,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0307398222189034,
"min": -0.5333333333333333,
"max": 0.4670666642487049,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 3.473599910736084,
"min": -40.27520024776459,
"max": 36.327200055122375,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0307398222189034,
"min": -0.5333333333333333,
"max": 0.4670666642487049,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 3.473599910736084,
"min": -40.27520024776459,
"max": 36.327200055122375,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.017211358656641095,
"min": 0.011587637230210628,
"max": 0.02452330959883208,
"count": 232
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.017211358656641095,
"min": 0.011587637230210628,
"max": 0.02452330959883208,
"count": 232
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.05854383024076621,
"min": 1.005996855004317e-06,
"max": 0.0648781197766463,
"count": 232
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.05854383024076621,
"min": 1.005996855004317e-06,
"max": 0.0648781197766463,
"count": 232
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.05999068741997083,
"min": 1.3523445394033237e-06,
"max": 0.06687828612824281,
"count": 232
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.05999068741997083,
"min": 1.3523445394033237e-06,
"max": 0.06687828612824281,
"count": 232
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 232
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 232
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 232
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 232
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 232
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 232
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1702284732",
"python_version": "3.10.12 | packaged by conda-forge | (main, Jun 23 2023, 22:40:32) [GCC 12.3.0]",
"command_line_arguments": "/home/giulia/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwosN --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1702298193"
},
"total": 13460.7845673,
"count": 1,
"self": 1.7956099999992148,
"children": {
"run_training.setup": {
"total": 0.0414395999999968,
"count": 1,
"self": 0.0414395999999968
},
"TrainerController.start_learning": {
"total": 13458.9475177,
"count": 1,
"self": 7.450014800146164,
"children": {
"TrainerController._reset_env": {
"total": 13.12735950000279,
"count": 25,
"self": 13.12735950000279
},
"TrainerController.advance": {
"total": 13437.849233499852,
"count": 326343,
"self": 9.080444799572433,
"children": {
"env_step": {
"total": 10943.651513499686,
"count": 326343,
"self": 6875.888252799553,
"children": {
"SubprocessEnvManager._take_step": {
"total": 4062.7215727999906,
"count": 326343,
"self": 64.16633119938297,
"children": {
"TorchPolicy.evaluate": {
"total": 3998.5552416006076,
"count": 645142,
"self": 3998.5552416006076
}
}
},
"workers": {
"total": 5.041687900142634,
"count": 326343,
"self": 0.0,
"children": {
"worker_root": {
"total": 13440.917202100192,
"count": 326343,
"is_parallel": true,
"self": 7507.967370800142,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.003072199999998304,
"count": 2,
"is_parallel": true,
"self": 0.001370099999981278,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001702100000017026,
"count": 8,
"is_parallel": true,
"self": 0.001702100000017026
}
}
},
"UnityEnvironment.step": {
"total": 0.02837019999999768,
"count": 1,
"is_parallel": true,
"self": 0.0004579000000006772,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00034999999999740794,
"count": 1,
"is_parallel": true,
"self": 0.00034999999999740794
},
"communicator.exchange": {
"total": 0.026116500000000542,
"count": 1,
"is_parallel": true,
"self": 0.026116500000000542
},
"steps_from_proto": {
"total": 0.0014457999999990534,
"count": 2,
"is_parallel": true,
"self": 0.00034640000002639226,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010993999999726611,
"count": 8,
"is_parallel": true,
"self": 0.0010993999999726611
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 5932.910143500053,
"count": 326342,
"is_parallel": true,
"self": 178.9587261008901,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 135.33032190009277,
"count": 326342,
"is_parallel": true,
"self": 135.33032190009277
},
"communicator.exchange": {
"total": 5027.371411200163,
"count": 326342,
"is_parallel": true,
"self": 5027.371411200163
},
"steps_from_proto": {
"total": 591.2496842989079,
"count": 652684,
"is_parallel": true,
"self": 134.45183469819182,
"children": {
"_process_rank_one_or_two_observation": {
"total": 456.7978496007161,
"count": 2610736,
"is_parallel": true,
"self": 456.7978496007161
}
}
}
}
},
"steps_from_proto": {
"total": 0.03968779999763683,
"count": 48,
"is_parallel": true,
"self": 0.00879139999221934,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.03089640000541749,
"count": 192,
"is_parallel": true,
"self": 0.03089640000541749
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2485.1172752005928,
"count": 326343,
"self": 57.36275270098531,
"children": {
"process_trajectory": {
"total": 939.5547369996184,
"count": 326343,
"self": 935.2898431996175,
"children": {
"RLTrainer._checkpoint": {
"total": 4.264893800000891,
"count": 10,
"self": 4.264893800000891
}
}
},
"_update_policy": {
"total": 1488.1997854999888,
"count": 232,
"self": 659.7722875999405,
"children": {
"TorchPOCAOptimizer.update": {
"total": 828.4274979000484,
"count": 6975,
"self": 828.4274979000484
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.999995770864189e-07,
"count": 1,
"self": 8.999995770864189e-07
},
"TrainerController._save_models": {
"total": 0.5209090000007564,
"count": 1,
"self": 0.08256520000031742,
"children": {
"RLTrainer._checkpoint": {
"total": 0.43834380000043893,
"count": 1,
"self": 0.43834380000043893
}
}
}
}
}
}
}