ItchyB's picture
First run
97dec15
raw
history blame
18.8 kB
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6534873843193054,
"min": 0.6534873843193054,
"max": 1.5431253910064697,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 19531.431640625,
"min": 19531.431640625,
"max": 50713.2734375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989922.0,
"min": 29952.0,
"max": 989922.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989922.0,
"min": 29952.0,
"max": 989922.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5039507150650024,
"min": -0.12338055670261383,
"max": 0.5929787755012512,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 138.08248901367188,
"min": -29.487953186035156,
"max": 163.66213989257812,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.03150377422571182,
"min": 0.00490775378420949,
"max": 0.4278498888015747,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 8.632034301757812,
"min": 1.2969324588775635,
"max": 100.11687469482422,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06848002173091645,
"min": 0.06330861079775439,
"max": 0.07399730283015296,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9587203042328304,
"min": 0.4844167022529291,
"max": 1.0347373552549648,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016658629933530296,
"min": 0.0010643052913413462,
"max": 0.016658629933530296,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23322081906942416,
"min": 0.007450137039389423,
"max": 0.23322081906942416,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.547733198407141e-06,
"min": 7.547733198407141e-06,
"max": 0.0002950848016384,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010566826477769997,
"min": 0.00010566826477769997,
"max": 0.0027920981693007004,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251587857142859,
"min": 0.10251587857142859,
"max": 0.19836159999999997,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4352223000000002,
"min": 1.3684608000000003,
"max": 2.2755587,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002613362692857142,
"min": 0.0002613362692857142,
"max": 0.00983632384,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003658707769999999,
"min": 0.003658707769999999,
"max": 0.09309686007000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013311014510691166,
"min": 0.013190694153308868,
"max": 0.4133564829826355,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.18635420501232147,
"min": 0.18466971814632416,
"max": 2.8934953212738037,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 323.0978260869565,
"min": 323.0978260869565,
"max": 999.0,
"count": 32
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29725.0,
"min": 1685.0,
"max": 62214.0,
"count": 32
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.654105468601971,
"min": -1.0000000521540642,
"max": 1.6611124759539961,
"count": 32
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 150.52359764277935,
"min": -58.27420325577259,
"max": 150.52359764277935,
"count": 32
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.654105468601971,
"min": -1.0000000521540642,
"max": 1.6611124759539961,
"count": 32
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 150.52359764277935,
"min": -58.27420325577259,
"max": 150.52359764277935,
"count": 32
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04542399158142891,
"min": 0.04542399158142891,
"max": 4.694267954826355,
"count": 32
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.133583233910031,
"min": 1.8478424977511168,
"max": 117.35669887065887,
"count": 32
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1682276088",
"python_version": "3.9.16 | packaged by conda-forge | (main, Feb 1 2023, 21:39:03) \n[GCC 11.3.0]",
"command_line_arguments": "/home/byron/miniconda3/envs/torch/bin/mlagents-learn ./ml-agents/config/ppo/PyramidsRND.yaml --env=./ml-agents/training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training2 --no-graphics --torch-device=cuda:0 --num-envs 4 --num-areas 8",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cu117",
"numpy_version": "1.21.2",
"end_time_seconds": "1682277165"
},
"total": 1076.351261099,
"count": 1,
"self": 0.4212915969983442,
"children": {
"run_training.setup": {
"total": 0.023083496998879127,
"count": 1,
"self": 0.023083496998879127
},
"TrainerController.start_learning": {
"total": 1075.906886005003,
"count": 1,
"self": 1.1175611507642316,
"children": {
"TrainerController._reset_env": {
"total": 12.854328707995592,
"count": 1,
"self": 12.854328707995592
},
"TrainerController.advance": {
"total": 1061.8692720722465,
"count": 50711,
"self": 1.0072531040204922,
"children": {
"env_step": {
"total": 308.1554301210999,
"count": 50711,
"self": 127.75338604554418,
"children": {
"SubprocessEnvManager._take_step": {
"total": 179.70773294112587,
"count": 63944,
"self": 3.6913824499060865,
"children": {
"TorchPolicy.evaluate": {
"total": 176.01635049121978,
"count": 62735,
"self": 176.01635049121978
}
}
},
"workers": {
"total": 0.6943111344298813,
"count": 50711,
"self": 0.0,
"children": {
"worker_root": {
"total": 4300.106483937736,
"count": 63941,
"is_parallel": true,
"self": 3397.442163723812,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004771393985720351,
"count": 4,
"is_parallel": true,
"self": 0.00135901098838076,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0034123829973395914,
"count": 32,
"is_parallel": true,
"self": 0.0034123829973395914
}
}
},
"UnityEnvironment.step": {
"total": 0.12181193399010226,
"count": 4,
"is_parallel": true,
"self": 0.000998283998342231,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0016725879977457225,
"count": 4,
"is_parallel": true,
"self": 0.0016725879977457225
},
"communicator.exchange": {
"total": 0.11608262499794364,
"count": 4,
"is_parallel": true,
"self": 0.11608262499794364
},
"steps_from_proto": {
"total": 0.003058436996070668,
"count": 4,
"is_parallel": true,
"self": 0.0007625639991601929,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002295872996910475,
"count": 32,
"is_parallel": true,
"self": 0.002295872996910475
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 902.6643202139239,
"count": 63937,
"is_parallel": true,
"self": 16.631254727501073,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 12.274879490956664,
"count": 63937,
"is_parallel": true,
"self": 12.274879490956664
},
"communicator.exchange": {
"total": 828.0996875454002,
"count": 63937,
"is_parallel": true,
"self": 828.0996875454002
},
"steps_from_proto": {
"total": 45.65849845006596,
"count": 63937,
"is_parallel": true,
"self": 11.43318056021235,
"children": {
"_process_rank_one_or_two_observation": {
"total": 34.22531788985361,
"count": 511496,
"is_parallel": true,
"self": 34.22531788985361
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 752.7065888471261,
"count": 50711,
"self": 2.05000208268757,
"children": {
"process_trajectory": {
"total": 120.992824682442,
"count": 50711,
"self": 120.85008095644298,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14274372599902563,
"count": 2,
"self": 0.14274372599902563
}
}
},
"_update_policy": {
"total": 629.6637620819965,
"count": 420,
"self": 310.2301196816261,
"children": {
"TorchPPOOptimizer.update": {
"total": 319.43364240037045,
"count": 22926,
"self": 319.43364240037045
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.999907782301307e-07,
"count": 1,
"self": 6.999907782301307e-07
},
"TrainerController._save_models": {
"total": 0.06572337400575634,
"count": 1,
"self": 0.0007505450048483908,
"children": {
"RLTrainer._checkpoint": {
"total": 0.06497282900090795,
"count": 1,
"self": 0.06497282900090795
}
}
}
}
}
}
}