DeepRL / run_logs /timers.json
jackoyoungblood's picture
First PushBlock-Jack
1e37ba9
raw
history blame
18 kB
{
"name": "root",
"gauges": {
"PushBlock.Policy.Entropy.mean": {
"value": 0.2016972005367279,
"min": 0.18549907207489014,
"max": 1.8744633197784424,
"count": 50
},
"PushBlock.Policy.Entropy.sum": {
"value": 12082.46875,
"min": 11112.13671875,
"max": 113727.4375,
"count": 50
},
"PushBlock.Step.mean": {
"value": 2999995.0,
"min": 59981.0,
"max": 2999995.0,
"count": 50
},
"PushBlock.Step.sum": {
"value": 2999995.0,
"min": 59981.0,
"max": 2999995.0,
"count": 50
},
"PushBlock.Policy.ExtrinsicValueEstimate.mean": {
"value": 4.532731533050537,
"min": 0.06312137842178345,
"max": 4.55885124206543,
"count": 50
},
"PushBlock.Policy.ExtrinsicValueEstimate.sum": {
"value": 15116.66015625,
"min": 60.53340148925781,
"max": 15470.3671875,
"count": 50
},
"PushBlock.Losses.PolicyLoss.mean": {
"value": 0.06696407760636634,
"min": 0.06569872397478488,
"max": 0.07140306353799185,
"count": 50
},
"PushBlock.Losses.PolicyLoss.sum": {
"value": 1.941958250584624,
"min": 1.4558263342006108,
"max": 2.0864144192310055,
"count": 50
},
"PushBlock.Losses.ValueLoss.mean": {
"value": 0.06186323563217709,
"min": 0.03626665951959768,
"max": 0.3553684272410749,
"count": 50
},
"PushBlock.Losses.ValueLoss.sum": {
"value": 1.7940338333331356,
"min": 1.0517331260683327,
"max": 10.305684389991173,
"count": 50
},
"PushBlock.Policy.LearningRate.mean": {
"value": 3.087381729526438e-06,
"min": 3.087381729526438e-06,
"max": 0.0002966075106546396,
"count": 50
},
"PushBlock.Policy.LearningRate.sum": {
"value": 8.95340701562667e-05,
"min": 8.95340701562667e-05,
"max": 0.0084374907875031,
"count": 50
},
"PushBlock.Policy.Epsilon.mean": {
"value": 0.10102909425287358,
"min": 0.10102909425287358,
"max": 0.19886916984126987,
"count": 50
},
"PushBlock.Policy.Epsilon.sum": {
"value": 2.929843733333334,
"min": 2.929843733333334,
"max": 5.7124969,
"count": 50
},
"PushBlock.Policy.Beta.mean": {
"value": 0.00011280651586206898,
"min": 0.00011280651586206898,
"max": 0.009887030067142857,
"count": 50
},
"PushBlock.Policy.Beta.sum": {
"value": 0.0032713889600000006,
"min": 0.0032713889600000006,
"max": 0.28125844031,
"count": 50
},
"PushBlock.Environment.EpisodeLength.mean": {
"value": 17.034724337496193,
"min": 16.847930413917215,
"max": 804.1730769230769,
"count": 50
},
"PushBlock.Environment.EpisodeLength.sum": {
"value": 55925.0,
"min": 41817.0,
"max": 74923.0,
"count": 50
},
"PushBlock.Environment.CumulativeReward.mean": {
"value": 4.9793144792735795,
"min": 1.1180461471757064,
"max": 4.980931816069633,
"count": 50
},
"PushBlock.Environment.CumulativeReward.sum": {
"value": 16352.068749934435,
"min": 58.13839965313673,
"max": 16760.835561074317,
"count": 50
},
"PushBlock.Policy.ExtrinsicReward.mean": {
"value": 4.9793144792735795,
"min": 1.1180461471757064,
"max": 4.980931816069633,
"count": 50
},
"PushBlock.Policy.ExtrinsicReward.sum": {
"value": 16352.068749934435,
"min": 58.13839965313673,
"max": 16760.835561074317,
"count": 50
},
"PushBlock.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"PushBlock.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1661438077",
"python_version": "3.7.13 (default, Apr 24 2022, 01:04:09) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PushBlock.yaml --env=./trained-envs-executables/linux/PushBlock/PushBlock --run-id=PushBlock Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1661441051"
},
"total": 2974.1935982289997,
"count": 1,
"self": 0.2708926149994113,
"children": {
"run_training.setup": {
"total": 0.03884206900011122,
"count": 1,
"self": 0.03884206900011122
},
"TrainerController.start_learning": {
"total": 2973.883863545,
"count": 1,
"self": 4.222226816023522,
"children": {
"TrainerController._reset_env": {
"total": 5.337355265000042,
"count": 1,
"self": 5.337355265000042
},
"TrainerController.advance": {
"total": 2964.264817552976,
"count": 183916,
"self": 4.029414248805097,
"children": {
"env_step": {
"total": 1667.2106005960522,
"count": 183916,
"self": 1517.0580446200443,
"children": {
"SubprocessEnvManager._take_step": {
"total": 148.06953378606113,
"count": 183916,
"self": 8.418083007220957,
"children": {
"TorchPolicy.evaluate": {
"total": 139.65145077884017,
"count": 93759,
"self": 46.02098987685531,
"children": {
"TorchPolicy.sample_actions": {
"total": 93.63046090198486,
"count": 93759,
"self": 93.63046090198486
}
}
}
}
},
"workers": {
"total": 2.0830221899468597,
"count": 183916,
"self": 0.0,
"children": {
"worker_root": {
"total": 2967.5151370739154,
"count": 183916,
"is_parallel": true,
"self": 1695.8266128530695,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0059622110002237605,
"count": 1,
"is_parallel": true,
"self": 0.004477401999793074,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014848090004306869,
"count": 4,
"is_parallel": true,
"self": 0.0014848090004306869
}
}
},
"UnityEnvironment.step": {
"total": 0.026541934999841033,
"count": 1,
"is_parallel": true,
"self": 0.0005433209998955135,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00043376699977670796,
"count": 1,
"is_parallel": true,
"self": 0.00043376699977670796
},
"communicator.exchange": {
"total": 0.02414151299990408,
"count": 1,
"is_parallel": true,
"self": 0.02414151299990408
},
"steps_from_proto": {
"total": 0.001423334000264731,
"count": 1,
"is_parallel": true,
"self": 0.00033219399983863696,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001091140000426094,
"count": 4,
"is_parallel": true,
"self": 0.001091140000426094
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1271.688524220846,
"count": 183915,
"is_parallel": true,
"self": 47.46229449071143,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 35.0671926720247,
"count": 183915,
"is_parallel": true,
"self": 35.0671926720247
},
"communicator.exchange": {
"total": 1044.1114959289575,
"count": 183915,
"is_parallel": true,
"self": 1044.1114959289575
},
"steps_from_proto": {
"total": 145.04754112915225,
"count": 183915,
"is_parallel": true,
"self": 35.15162142514737,
"children": {
"_process_rank_one_or_two_observation": {
"total": 109.89591970400488,
"count": 735660,
"is_parallel": true,
"self": 109.89591970400488
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1293.0248027081188,
"count": 183916,
"self": 6.962872703143603,
"children": {
"process_trajectory": {
"total": 437.8201000129616,
"count": 183916,
"self": 437.4436326859609,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3764673270006824,
"count": 6,
"self": 0.3764673270006824
}
}
},
"_update_policy": {
"total": 848.2418299920137,
"count": 1443,
"self": 461.6994568790328,
"children": {
"TorchPPOOptimizer.update": {
"total": 386.54237311298084,
"count": 69624,
"self": 386.54237311298084
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1199999789823778e-06,
"count": 1,
"self": 1.1199999789823778e-06
},
"TrainerController._save_models": {
"total": 0.05946279100044194,
"count": 1,
"self": 0.0007346769998548552,
"children": {
"RLTrainer._checkpoint": {
"total": 0.058728114000587084,
"count": 1,
"self": 0.058728114000587084
}
}
}
}
}
}
}