jaymanvirk's picture
initial commit
292a11f verified
raw
history blame
20.2 kB
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.51484751701355,
"min": 2.4968974590301514,
"max": 3.2957088947296143,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 48687.44921875,
"min": 17183.470703125,
"max": 127448.0859375,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 148.15151515151516,
"min": 110.6086956521739,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19556.0,
"min": 4616.0,
"max": 30060.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1205.9946991120182,
"min": 1182.7427229333125,
"max": 1236.0059830681423,
"count": 458
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 77183.66074316917,
"min": 2365.5766621176053,
"max": 109429.25045098405,
"count": 458
},
"SoccerTwos.Step.mean": {
"value": 4999966.0,
"min": 9976.0,
"max": 4999966.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999966.0,
"min": 9976.0,
"max": 4999966.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.011815357953310013,
"min": -0.8224837183952332,
"max": 0.09149566292762756,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.7679982781410217,
"min": -41.33378219604492,
"max": 5.38049840927124,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.011457674205303192,
"min": -0.8225486874580383,
"max": 0.09321700781583786,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.7447488307952881,
"min": -41.12950134277344,
"max": 5.042311668395996,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.10054153845860409,
"min": -0.6888603781754116,
"max": 0.500728577375412,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -6.535199999809265,
"min": -36.509600043296814,
"max": 22.3924001455307,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.10054153845860409,
"min": -0.6888603781754116,
"max": 0.500728577375412,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -6.535199999809265,
"min": -36.509600043296814,
"max": 22.3924001455307,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.02056941466871649,
"min": 0.012793293024878949,
"max": 0.028850990000986107,
"count": 235
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.02056941466871649,
"min": 0.012793293024878949,
"max": 0.028850990000986107,
"count": 235
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.023601138219237327,
"min": 1.9371433760776804e-06,
"max": 3.8241605819535573,
"count": 235
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.023601138219237327,
"min": 1.9371433760776804e-06,
"max": 3.8241605819535573,
"count": 235
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.025794722388188043,
"min": 5.924941281894765e-06,
"max": 3.704598916705609,
"count": 235
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.025794722388188043,
"min": 5.924941281894765e-06,
"max": 3.704598916705609,
"count": 235
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.003000000000000001,
"min": 0.003,
"max": 0.003000000000000001,
"count": 235
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.003000000000000001,
"min": 0.003,
"max": 0.003000000000000001,
"count": 235
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 235
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.19999999999999996,
"max": 0.20000000000000007,
"count": 235
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 235
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005,
"max": 0.005000000000000001,
"count": 235
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1715704230",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/root/miniconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos --run-id=soccer_twos --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1715741171"
},
"total": 36965.794145468004,
"count": 1,
"self": 5.352597061006236,
"children": {
"run_training.setup": {
"total": 0.07465267699990363,
"count": 1,
"self": 0.07465267699990363
},
"TrainerController.start_learning": {
"total": 36960.366895729996,
"count": 1,
"self": 11.881535156600876,
"children": {
"TrainerController._reset_env": {
"total": 15.367443916025877,
"count": 99,
"self": 15.367443916025877
},
"TrainerController.advance": {
"total": 36932.607473506374,
"count": 351571,
"self": 12.379925891757011,
"children": {
"env_step": {
"total": 9206.65211241196,
"count": 351571,
"self": 6769.482378721448,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2429.4186117198715,
"count": 351571,
"self": 95.42245306367113,
"children": {
"TorchPolicy.evaluate": {
"total": 2333.9961586562004,
"count": 692016,
"self": 2333.9961586562004
}
}
},
"workers": {
"total": 7.751121970641179,
"count": 351571,
"self": 0.0,
"children": {
"worker_root": {
"total": 36933.626827574946,
"count": 351571,
"is_parallel": true,
"self": 31740.253805830158,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.008072216000982735,
"count": 2,
"is_parallel": true,
"self": 0.0024276560006910586,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.005644560000291676,
"count": 8,
"is_parallel": true,
"self": 0.005644560000291676
}
}
},
"UnityEnvironment.step": {
"total": 0.036281407000387844,
"count": 1,
"is_parallel": true,
"self": 0.0007422099997711484,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0008546700000806595,
"count": 1,
"is_parallel": true,
"self": 0.0008546700000806595
},
"communicator.exchange": {
"total": 0.031440329999895766,
"count": 1,
"is_parallel": true,
"self": 0.031440329999895766
},
"steps_from_proto": {
"total": 0.00324419700064027,
"count": 2,
"is_parallel": true,
"self": 0.0006480240017481265,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0025961729988921434,
"count": 8,
"is_parallel": true,
"self": 0.0025961729988921434
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 5193.080491720794,
"count": 351570,
"is_parallel": true,
"self": 263.54286897053316,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 237.43503371335646,
"count": 351570,
"is_parallel": true,
"self": 237.43503371335646
},
"communicator.exchange": {
"total": 3660.8622616610774,
"count": 351570,
"is_parallel": true,
"self": 3660.8622616610774
},
"steps_from_proto": {
"total": 1031.2403273758273,
"count": 703140,
"is_parallel": true,
"self": 208.3572503934265,
"children": {
"_process_rank_one_or_two_observation": {
"total": 822.8830769824008,
"count": 2812560,
"is_parallel": true,
"self": 822.8830769824008
}
}
}
}
},
"steps_from_proto": {
"total": 0.2925300239949138,
"count": 196,
"is_parallel": true,
"self": 0.06102310497772123,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.23150691901719256,
"count": 784,
"is_parallel": true,
"self": 0.23150691901719256
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 27713.575435202652,
"count": 351571,
"self": 100.40271507469151,
"children": {
"process_trajectory": {
"total": 2156.992910522981,
"count": 351571,
"self": 2151.141712407979,
"children": {
"RLTrainer._checkpoint": {
"total": 5.851198115002262,
"count": 10,
"self": 5.851198115002262
}
}
},
"_update_policy": {
"total": 25456.17980960498,
"count": 235,
"self": 1327.0889110348544,
"children": {
"TorchPOCAOptimizer.update": {
"total": 24129.090898570124,
"count": 7062,
"self": 24129.090898570124
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.9069993868470192e-06,
"count": 1,
"self": 1.9069993868470192e-06
},
"TrainerController._save_models": {
"total": 0.510441243997775,
"count": 1,
"self": 0.024752318997343536,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4856889250004315,
"count": 1,
"self": 0.4856889250004315
}
}
}
}
}
}
}