DrishtiSharma's picture
First Push
0cf0a91
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7683492302894592,
"min": 0.7600694894790649,
"max": 2.8741514682769775,
"count": 30
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7885.568359375,
"min": 7340.7509765625,
"max": 29402.568359375,
"count": 30
},
"SnowballTarget.Step.mean": {
"value": 299968.0,
"min": 9952.0,
"max": 299968.0,
"count": 30
},
"SnowballTarget.Step.sum": {
"value": 299968.0,
"min": 9952.0,
"max": 299968.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.998841285705566,
"min": 0.19241949915885925,
"max": 12.998841285705566,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2651.763671875,
"min": 37.329383850097656,
"max": 2654.54150390625,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 30
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06713153425964764,
"min": 0.06284714500076255,
"max": 0.07652197535313177,
"count": 30
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3356576712982382,
"min": 0.2513885800030502,
"max": 0.3736244197375616,
"count": 30
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2094671048370062,
"min": 0.10411210189683034,
"max": 0.30990612353472147,
"count": 30
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.047335524185031,
"min": 0.4164484075873214,
"max": 1.4703391290762844,
"count": 30
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 5.288098237333331e-06,
"min": 5.288098237333331e-06,
"max": 0.00029458800180399996,
"count": 30
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.6440491186666655e-05,
"min": 2.6440491186666655e-05,
"max": 0.0014234400255199997,
"count": 30
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10176266666666667,
"min": 0.10176266666666667,
"max": 0.198196,
"count": 30
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5088133333333333,
"min": 0.42025066666666666,
"max": 0.97448,
"count": 30
},
"SnowballTarget.Policy.Beta.mean": {
"value": 9.795706666666662e-05,
"min": 9.795706666666662e-05,
"max": 0.0049099804000000006,
"count": 30
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0004897853333333331,
"min": 0.0004897853333333331,
"max": 0.023726551999999998,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.581818181818182,
"min": 2.5454545454545454,
"max": 25.886363636363637,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1407.0,
"min": 112.0,
"max": 1407.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.581818181818182,
"min": 2.5454545454545454,
"max": 25.886363636363637,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1407.0,
"min": 112.0,
"max": 1407.0,
"count": 30
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678244734",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1678245418"
},
"total": 684.2784943450001,
"count": 1,
"self": 0.4438370200002737,
"children": {
"run_training.setup": {
"total": 0.11171971699991445,
"count": 1,
"self": 0.11171971699991445
},
"TrainerController.start_learning": {
"total": 683.7229376079999,
"count": 1,
"self": 0.8522606479788237,
"children": {
"TrainerController._reset_env": {
"total": 9.05685581199998,
"count": 1,
"self": 9.05685581199998
},
"TrainerController.advance": {
"total": 673.6957560500209,
"count": 27342,
"self": 0.42919146802682917,
"children": {
"env_step": {
"total": 673.2665645819941,
"count": 27342,
"self": 465.44963245902227,
"children": {
"SubprocessEnvManager._take_step": {
"total": 207.42095395898878,
"count": 27342,
"self": 2.2939976459615536,
"children": {
"TorchPolicy.evaluate": {
"total": 205.12695631302722,
"count": 27342,
"self": 46.24493992701673,
"children": {
"TorchPolicy.sample_actions": {
"total": 158.8820163860105,
"count": 27342,
"self": 158.8820163860105
}
}
}
}
},
"workers": {
"total": 0.3959781639830453,
"count": 27342,
"self": 0.0,
"children": {
"worker_root": {
"total": 681.435375669992,
"count": 27342,
"is_parallel": true,
"self": 327.8167077269936,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005358617999945636,
"count": 1,
"is_parallel": true,
"self": 0.003958468000064386,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014001499998812506,
"count": 10,
"is_parallel": true,
"self": 0.0014001499998812506
}
}
},
"UnityEnvironment.step": {
"total": 0.08134832899986577,
"count": 1,
"is_parallel": true,
"self": 0.000558968999712306,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00035829400007969525,
"count": 1,
"is_parallel": true,
"self": 0.00035829400007969525
},
"communicator.exchange": {
"total": 0.07860995700002604,
"count": 1,
"is_parallel": true,
"self": 0.07860995700002604
},
"steps_from_proto": {
"total": 0.001821109000047727,
"count": 1,
"is_parallel": true,
"self": 0.0003753129999495286,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014457960000981984,
"count": 10,
"is_parallel": true,
"self": 0.0014457960000981984
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 353.6186679429984,
"count": 27341,
"is_parallel": true,
"self": 14.410563015999742,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.738144318989953,
"count": 27341,
"is_parallel": true,
"self": 7.738144318989953
},
"communicator.exchange": {
"total": 285.91918032102194,
"count": 27341,
"is_parallel": true,
"self": 285.91918032102194
},
"steps_from_proto": {
"total": 45.550780286986765,
"count": 27341,
"is_parallel": true,
"self": 9.85456692895491,
"children": {
"_process_rank_one_or_two_observation": {
"total": 35.696213358031855,
"count": 273410,
"is_parallel": true,
"self": 35.696213358031855
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00039866600013738207,
"count": 1,
"self": 0.00039866600013738207,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 668.8792041659781,
"count": 593637,
"is_parallel": true,
"self": 15.004068415982147,
"children": {
"process_trajectory": {
"total": 380.84772338199673,
"count": 593637,
"is_parallel": true,
"self": 378.36171301299646,
"children": {
"RLTrainer._checkpoint": {
"total": 2.486010369000269,
"count": 6,
"is_parallel": true,
"self": 2.486010369000269
}
}
},
"_update_policy": {
"total": 273.0274123679992,
"count": 136,
"is_parallel": true,
"self": 95.3609779240087,
"children": {
"TorchPPOOptimizer.update": {
"total": 177.6664344439905,
"count": 6933,
"is_parallel": true,
"self": 177.6664344439905
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1176664320000782,
"count": 1,
"self": 0.0008412150000367546,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11682521700004145,
"count": 1,
"self": 0.11682521700004145
}
}
}
}
}
}
}