poca-SoccerTwos / run_logs /timers.json
zslrmhb's picture
First Push
40d59dd
raw
history blame
20.2 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.289663314819336,
"min": 2.229106903076172,
"max": 3.2957377433776855,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 41836.7265625,
"min": 15532.056640625,
"max": 162660.25,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 67.89041095890411,
"min": 60.037974683544306,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19824.0,
"min": 12644.0,
"max": 29528.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1448.2605400645664,
"min": 1174.5279436799449,
"max": 1448.2605400645664,
"count": 368
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 211446.0388494267,
"min": 2350.9733001096633,
"max": 227017.79822142006,
"count": 368
},
"SoccerTwos.Step.mean": {
"value": 4999980.0,
"min": 9888.0,
"max": 4999980.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999980.0,
"min": 9888.0,
"max": 4999980.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.20114600658416748,
"min": -0.0980902761220932,
"max": 0.2208496779203415,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 29.16617202758789,
"min": -2.9210610389709473,
"max": 29.16617202758789,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.19902649521827698,
"min": -0.09284777194261551,
"max": 0.20998428761959076,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 28.858840942382812,
"min": -3.026115894317627,
"max": 28.858840942382812,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.2863475885884515,
"min": -0.5714285714285714,
"max": 0.5720509084788236,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 41.52040034532547,
"min": -27.98080015182495,
"max": 62.92559993267059,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.2863475885884515,
"min": -0.5714285714285714,
"max": 0.5720509084788236,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 41.52040034532547,
"min": -27.98080015182495,
"max": 62.92559993267059,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016194876989660163,
"min": 0.010523932663879047,
"max": 0.023885350689912835,
"count": 233
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016194876989660163,
"min": 0.010523932663879047,
"max": 0.023885350689912835,
"count": 233
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.0811188002427419,
"min": 1.1295445574432961e-07,
"max": 0.08559163709481557,
"count": 233
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.0811188002427419,
"min": 1.1295445574432961e-07,
"max": 0.08559163709481557,
"count": 233
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.08247003927826882,
"min": 1.2279456408729555e-07,
"max": 0.0869223048289617,
"count": 233
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.08247003927826882,
"min": 1.2279456408729555e-07,
"max": 0.0869223048289617,
"count": 233
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 233
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 233
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.30000000000000004,
"min": 0.30000000000000004,
"max": 0.30000000000000004,
"count": 233
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.30000000000000004,
"min": 0.30000000000000004,
"max": 0.30000000000000004,
"count": 233
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 233
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 233
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1695361112",
"python_version": "3.9.18 (main, Sep 11 2023, 13:41:44) \n[GCC 11.2.0]",
"command_line_arguments": "/home/hongbin/anaconda3/envs/rl/bin/mlagents-learn config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.x86_64 --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu113",
"numpy_version": "1.21.2",
"end_time_seconds": "1695364051"
},
"total": 2938.7596286730004,
"count": 1,
"self": 0.1665410020004856,
"children": {
"run_training.setup": {
"total": 0.005034699000134424,
"count": 1,
"self": 0.005034699000134424
},
"TrainerController.start_learning": {
"total": 2938.588052972,
"count": 1,
"self": 2.9322815978453036,
"children": {
"TrainerController._reset_env": {
"total": 3.342722705000142,
"count": 25,
"self": 3.342722705000142
},
"TrainerController.advance": {
"total": 2932.227263988155,
"count": 326253,
"self": 3.0041637250456006,
"children": {
"env_step": {
"total": 2282.7101968870484,
"count": 326253,
"self": 1733.5442979182008,
"children": {
"SubprocessEnvManager._take_step": {
"total": 547.3272669913404,
"count": 326253,
"self": 20.42333503315149,
"children": {
"TorchPolicy.evaluate": {
"total": 526.9039319581889,
"count": 642592,
"self": 526.9039319581889
}
}
},
"workers": {
"total": 1.838631977507248,
"count": 326253,
"self": 0.0,
"children": {
"worker_root": {
"total": 2934.9022757198245,
"count": 326253,
"is_parallel": true,
"self": 1537.3427580649422,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001007656999718165,
"count": 2,
"is_parallel": true,
"self": 0.00022144599961393396,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007862110001042311,
"count": 8,
"is_parallel": true,
"self": 0.0007862110001042311
}
}
},
"UnityEnvironment.step": {
"total": 0.011203951999959827,
"count": 1,
"is_parallel": true,
"self": 0.00025870200033750734,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00020776399969690829,
"count": 1,
"is_parallel": true,
"self": 0.00020776399969690829
},
"communicator.exchange": {
"total": 0.009974828999929741,
"count": 1,
"is_parallel": true,
"self": 0.009974828999929741
},
"steps_from_proto": {
"total": 0.00076265699999567,
"count": 2,
"is_parallel": true,
"self": 0.0001465820005250862,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006160749994705839,
"count": 8,
"is_parallel": true,
"self": 0.0006160749994705839
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1397.5407786518822,
"count": 326252,
"is_parallel": true,
"self": 80.19892113257629,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 47.33894162897013,
"count": 326252,
"is_parallel": true,
"self": 47.33894162897013
},
"communicator.exchange": {
"total": 1042.850906856132,
"count": 326252,
"is_parallel": true,
"self": 1042.850906856132
},
"steps_from_proto": {
"total": 227.15200903420373,
"count": 652504,
"is_parallel": true,
"self": 40.58871454117207,
"children": {
"_process_rank_one_or_two_observation": {
"total": 186.56329449303166,
"count": 2610016,
"is_parallel": true,
"self": 186.56329449303166
}
}
}
}
},
"steps_from_proto": {
"total": 0.018739003000064258,
"count": 48,
"is_parallel": true,
"self": 0.0035014149998460198,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.015237588000218238,
"count": 192,
"is_parallel": true,
"self": 0.015237588000218238
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 646.5129033760609,
"count": 326253,
"self": 28.07758124200882,
"children": {
"process_trajectory": {
"total": 184.22420884905068,
"count": 326253,
"self": 183.3088944670494,
"children": {
"RLTrainer._checkpoint": {
"total": 0.9153143820012701,
"count": 10,
"self": 0.9153143820012701
}
}
},
"_update_policy": {
"total": 434.2111132850014,
"count": 233,
"self": 307.0473835939929,
"children": {
"TorchPOCAOptimizer.update": {
"total": 127.16372969100848,
"count": 6996,
"self": 127.16372969100848
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.769999577547424e-07,
"count": 1,
"self": 4.769999577547424e-07
},
"TrainerController._save_models": {
"total": 0.08578420399953757,
"count": 1,
"self": 0.0009311839994552429,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08485302000008232,
"count": 1,
"self": 0.08485302000008232
}
}
}
}
}
}
}