rahil1206's picture
First Push
039a0b5 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0841541290283203,
"min": 1.0841541290283203,
"max": 2.8670506477355957,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10375.35546875,
"min": 10375.35546875,
"max": 29393.00390625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.751287460327148,
"min": 0.3630122244358063,
"max": 12.751287460327148,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2486.5009765625,
"min": 70.42436981201172,
"max": 2588.248046875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07484835144086724,
"min": 0.064124506786517,
"max": 0.07786626284359954,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.29939340576346896,
"min": 0.256498027146068,
"max": 0.383082955906752,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2206829757082696,
"min": 0.10468568799871149,
"max": 0.28082889289248225,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8827319028330785,
"min": 0.41874275199484595,
"max": 1.4023970279148688,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.931818181818183,
"min": 3.2954545454545454,
"max": 25.363636363636363,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1097.0,
"min": 145.0,
"max": 1395.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.931818181818183,
"min": 3.2954545454545454,
"max": 25.363636363636363,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1097.0,
"min": 145.0,
"max": 1395.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1713998629",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/home/rahil/.local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1713999197"
},
"total": 568.2455152830007,
"count": 1,
"self": 0.3243405140019604,
"children": {
"run_training.setup": {
"total": 0.02469060900330078,
"count": 1,
"self": 0.02469060900330078
},
"TrainerController.start_learning": {
"total": 567.8964841599955,
"count": 1,
"self": 0.3836573611188214,
"children": {
"TrainerController._reset_env": {
"total": 5.0591530129968305,
"count": 1,
"self": 5.0591530129968305
},
"TrainerController.advance": {
"total": 562.3441254508871,
"count": 18201,
"self": 0.1843525458680233,
"children": {
"env_step": {
"total": 562.159772905019,
"count": 18201,
"self": 346.5770950880542,
"children": {
"SubprocessEnvManager._take_step": {
"total": 215.3747372614016,
"count": 18201,
"self": 1.2134273185874918,
"children": {
"TorchPolicy.evaluate": {
"total": 214.1613099428141,
"count": 18201,
"self": 214.1613099428141
}
}
},
"workers": {
"total": 0.20794055556325475,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 567.0120763794184,
"count": 18201,
"is_parallel": true,
"self": 293.092488683782,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001913965999847278,
"count": 1,
"is_parallel": true,
"self": 0.000853788988024462,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001060177011822816,
"count": 10,
"is_parallel": true,
"self": 0.001060177011822816
}
}
},
"UnityEnvironment.step": {
"total": 0.04338461499719415,
"count": 1,
"is_parallel": true,
"self": 0.0002913490025093779,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00033960899600060657,
"count": 1,
"is_parallel": true,
"self": 0.00033960899600060657
},
"communicator.exchange": {
"total": 0.04169974900287343,
"count": 1,
"is_parallel": true,
"self": 0.04169974900287343
},
"steps_from_proto": {
"total": 0.0010539079958107322,
"count": 1,
"is_parallel": true,
"self": 0.00022619900846621022,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000827708987344522,
"count": 10,
"is_parallel": true,
"self": 0.000827708987344522
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 273.91958769563644,
"count": 18200,
"is_parallel": true,
"self": 5.334513292094925,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3.1179251737557934,
"count": 18200,
"is_parallel": true,
"self": 3.1179251737557934
},
"communicator.exchange": {
"total": 249.09134830151015,
"count": 18200,
"is_parallel": true,
"self": 249.09134830151015
},
"steps_from_proto": {
"total": 16.375800928275567,
"count": 18200,
"is_parallel": true,
"self": 3.4718712045432767,
"children": {
"_process_rank_one_or_two_observation": {
"total": 12.90392972373229,
"count": 182000,
"is_parallel": true,
"self": 12.90392972373229
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.910999556770548e-05,
"count": 1,
"self": 7.910999556770548e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 558.6422463552153,
"count": 553139,
"is_parallel": true,
"self": 10.553192859297269,
"children": {
"process_trajectory": {
"total": 313.82521122497565,
"count": 553139,
"is_parallel": true,
"self": 313.0736011349727,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7516100900029414,
"count": 4,
"is_parallel": true,
"self": 0.7516100900029414
}
}
},
"_update_policy": {
"total": 234.2638422709424,
"count": 90,
"is_parallel": true,
"self": 26.37037113000406,
"children": {
"TorchPPOOptimizer.update": {
"total": 207.89347114093835,
"count": 4584,
"is_parallel": true,
"self": 207.89347114093835
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.10946922499715583,
"count": 1,
"self": 0.00617173699720297,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10329748799995286,
"count": 1,
"self": 0.10329748799995286
}
}
}
}
}
}
}