ppo-Pyramids / run_logs /timers.json
dnarqq's picture
First Push
174d953
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.322129487991333,
"min": 0.322129487991333,
"max": 1.507158637046814,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9699.962890625,
"min": 9699.962890625,
"max": 45721.1640625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989910.0,
"min": 29952.0,
"max": 989910.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989910.0,
"min": 29952.0,
"max": 989910.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6654283404350281,
"min": -0.12162134051322937,
"max": 0.6654283404350281,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 192.3087921142578,
"min": -28.824256896972656,
"max": 192.3087921142578,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.008051485754549503,
"min": 0.005244612693786621,
"max": 0.1955219805240631,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.3268795013427734,
"min": 1.4999592304229736,
"max": 47.12079620361328,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0718197513896961,
"min": 0.06462203201607779,
"max": 0.07502773609857762,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0054765194557453,
"min": 0.4972768485684607,
"max": 1.0852015428288184,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016475532296441723,
"min": 0.0006775765604752274,
"max": 0.017533378427158044,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2306574521501841,
"min": 0.008808495286177956,
"max": 0.2454672979802126,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.705340288728571e-06,
"min": 7.705340288728571e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001078747640422,
"min": 0.0001078747640422,
"max": 0.003508107530630899,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10256841428571431,
"min": 0.10256841428571431,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4359578000000004,
"min": 1.3886848,
"max": 2.5693691000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002665845871428571,
"min": 0.0002665845871428571,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00373218422,
"min": 0.00373218422,
"max": 0.11695997308999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010828888975083828,
"min": 0.010828888975083828,
"max": 0.3384212553501129,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15160444378852844,
"min": 0.15160444378852844,
"max": 2.3689486980438232,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 279.02777777777777,
"min": 279.02777777777777,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30135.0,
"min": 15984.0,
"max": 32784.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6642605365689742,
"min": -1.0000000521540642,
"max": 1.7106237502971497,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 181.40439848601818,
"min": -30.306201703846455,
"max": 181.40439848601818,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6642605365689742,
"min": -1.0000000521540642,
"max": 1.7106237502971497,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 181.40439848601818,
"min": -30.306201703846455,
"max": 181.40439848601818,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.031287780978370865,
"min": 0.031287780978370865,
"max": 7.178404874168336,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.4103681266424246,
"min": 3.1845385901688132,
"max": 114.85447798669338,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1690104651",
"python_version": "3.10.6 (main, May 29 2023, 11:10:38) [GCC 11.3.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1690106978"
},
"total": 2327.142123697,
"count": 1,
"self": 0.47579881900037435,
"children": {
"run_training.setup": {
"total": 0.03231194399995729,
"count": 1,
"self": 0.03231194399995729
},
"TrainerController.start_learning": {
"total": 2326.6340129339997,
"count": 1,
"self": 1.3428490581013648,
"children": {
"TrainerController._reset_env": {
"total": 5.556313808999903,
"count": 1,
"self": 5.556313808999903
},
"TrainerController.advance": {
"total": 2319.640301463898,
"count": 64042,
"self": 1.4302620418216065,
"children": {
"env_step": {
"total": 1659.5688652709694,
"count": 64042,
"self": 1553.0644509681001,
"children": {
"SubprocessEnvManager._take_step": {
"total": 105.6922641699698,
"count": 64042,
"self": 4.721253984011128,
"children": {
"TorchPolicy.evaluate": {
"total": 100.97101018595868,
"count": 62561,
"self": 100.97101018595868
}
}
},
"workers": {
"total": 0.8121501328994327,
"count": 64042,
"self": 0.0,
"children": {
"worker_root": {
"total": 2321.503562342989,
"count": 64042,
"is_parallel": true,
"self": 881.4217742140106,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002452171999948405,
"count": 1,
"is_parallel": true,
"self": 0.0006822289997217013,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017699430002267036,
"count": 8,
"is_parallel": true,
"self": 0.0017699430002267036
}
}
},
"UnityEnvironment.step": {
"total": 0.04832729100007782,
"count": 1,
"is_parallel": true,
"self": 0.0005591050000930409,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005108129998916411,
"count": 1,
"is_parallel": true,
"self": 0.0005108129998916411
},
"communicator.exchange": {
"total": 0.04550632199993743,
"count": 1,
"is_parallel": true,
"self": 0.04550632199993743
},
"steps_from_proto": {
"total": 0.0017510510001557122,
"count": 1,
"is_parallel": true,
"self": 0.00034772399999383197,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014033270001618803,
"count": 8,
"is_parallel": true,
"self": 0.0014033270001618803
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1440.0817881289784,
"count": 64041,
"is_parallel": true,
"self": 34.32635555296429,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.743758521027075,
"count": 64041,
"is_parallel": true,
"self": 22.743758521027075
},
"communicator.exchange": {
"total": 1281.5819890810083,
"count": 64041,
"is_parallel": true,
"self": 1281.5819890810083
},
"steps_from_proto": {
"total": 101.42968497397874,
"count": 64041,
"is_parallel": true,
"self": 20.17562130699457,
"children": {
"_process_rank_one_or_two_observation": {
"total": 81.25406366698417,
"count": 512328,
"is_parallel": true,
"self": 81.25406366698417
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 658.6411741511072,
"count": 64042,
"self": 2.374792865042764,
"children": {
"process_trajectory": {
"total": 109.61476486606489,
"count": 64042,
"self": 109.40732821406527,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2074366519996147,
"count": 2,
"self": 0.2074366519996147
}
}
},
"_update_policy": {
"total": 546.6516164199995,
"count": 451,
"self": 357.9265664339639,
"children": {
"TorchPPOOptimizer.update": {
"total": 188.72504998603563,
"count": 22806,
"self": 188.72504998603563
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.639999578008428e-07,
"count": 1,
"self": 8.639999578008428e-07
},
"TrainerController._save_models": {
"total": 0.0945477390000633,
"count": 1,
"self": 0.0013600190000033763,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09318772000005993,
"count": 1,
"self": 0.09318772000005993
}
}
}
}
}
}
}