ppo-Huggy / run_logs /timers.json
chans's picture
Huggy
f6c6e8e
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.402813196182251,
"min": 1.402813196182251,
"max": 1.4272922277450562,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70106.9921875,
"min": 67885.1171875,
"max": 78319.3125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 84.85540069686411,
"min": 81.11822660098522,
"max": 419.19166666666666,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 48707.0,
"min": 48707.0,
"max": 50303.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999956.0,
"min": 49790.0,
"max": 1999956.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999956.0,
"min": 49790.0,
"max": 1999956.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4303476810455322,
"min": 0.15938083827495575,
"max": 2.509798765182495,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1397.449951171875,
"min": 18.966320037841797,
"max": 1513.8768310546875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.753478131605231,
"min": 1.897657567713441,
"max": 3.97343139334618,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2158.249925673008,
"min": 225.82125055789948,
"max": 2331.0211389660835,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.753478131605231,
"min": 1.897657567713441,
"max": 3.97343139334618,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2158.249925673008,
"min": 225.82125055789948,
"max": 2331.0211389660835,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017542432476249006,
"min": 0.013167783810179873,
"max": 0.01999284463818185,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.052627297428747014,
"min": 0.026335567620359747,
"max": 0.05660752055312818,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.06069977813296848,
"min": 0.020472330910464125,
"max": 0.06069977813296848,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.18209933439890544,
"min": 0.04094466182092825,
"max": 0.18209933439890544,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.7271987576333377e-06,
"min": 3.7271987576333377e-06,
"max": 0.00029536245154585,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1181596272900013e-05,
"min": 1.1181596272900013e-05,
"max": 0.00084407806864065,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10124236666666668,
"min": 0.10124236666666668,
"max": 0.19845415000000008,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3037271,
"min": 0.20763020000000004,
"max": 0.58135935,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.199409666666677e-05,
"min": 7.199409666666677e-05,
"max": 0.004922862084999999,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021598229000000028,
"min": 0.00021598229000000028,
"max": 0.014069831565000001,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677826546",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677829091"
},
"total": 2544.381329444,
"count": 1,
"self": 0.4397814650001237,
"children": {
"run_training.setup": {
"total": 0.11346896200001311,
"count": 1,
"self": 0.11346896200001311
},
"TrainerController.start_learning": {
"total": 2543.828079017,
"count": 1,
"self": 4.49633737296017,
"children": {
"TrainerController._reset_env": {
"total": 10.192443646999948,
"count": 1,
"self": 10.192443646999948
},
"TrainerController.advance": {
"total": 2529.03008423004,
"count": 232475,
"self": 4.9418382311246205,
"children": {
"env_step": {
"total": 1972.3102413570114,
"count": 232475,
"self": 1649.7613659850465,
"children": {
"SubprocessEnvManager._take_step": {
"total": 319.60363437392414,
"count": 232475,
"self": 17.012949197860735,
"children": {
"TorchPolicy.evaluate": {
"total": 302.5906851760634,
"count": 222989,
"self": 75.87337881610222,
"children": {
"TorchPolicy.sample_actions": {
"total": 226.71730635996119,
"count": 222989,
"self": 226.71730635996119
}
}
}
}
},
"workers": {
"total": 2.945240998040731,
"count": 232475,
"self": 0.0,
"children": {
"worker_root": {
"total": 2535.1377635780213,
"count": 232475,
"is_parallel": true,
"self": 1197.8150087019726,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010571869999580485,
"count": 1,
"is_parallel": true,
"self": 0.0004010869999433453,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006561000000147033,
"count": 2,
"is_parallel": true,
"self": 0.0006561000000147033
}
}
},
"UnityEnvironment.step": {
"total": 0.033754301000044507,
"count": 1,
"is_parallel": true,
"self": 0.00033843500000330096,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00023110800003678378,
"count": 1,
"is_parallel": true,
"self": 0.00023110800003678378
},
"communicator.exchange": {
"total": 0.030606949000002714,
"count": 1,
"is_parallel": true,
"self": 0.030606949000002714
},
"steps_from_proto": {
"total": 0.0025778090000017073,
"count": 1,
"is_parallel": true,
"self": 0.00039709699996137715,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00218071200004033,
"count": 2,
"is_parallel": true,
"self": 0.00218071200004033
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1337.3227548760487,
"count": 232474,
"is_parallel": true,
"self": 39.38093851478948,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 85.13134265409457,
"count": 232474,
"is_parallel": true,
"self": 85.13134265409457
},
"communicator.exchange": {
"total": 1117.652396641054,
"count": 232474,
"is_parallel": true,
"self": 1117.652396641054
},
"steps_from_proto": {
"total": 95.15807706611054,
"count": 232474,
"is_parallel": true,
"self": 40.6927535691222,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.46532349698833,
"count": 464948,
"is_parallel": true,
"self": 54.46532349698833
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 551.7780046419041,
"count": 232475,
"self": 6.727390398930652,
"children": {
"process_trajectory": {
"total": 178.01802080397084,
"count": 232475,
"self": 176.71076990596993,
"children": {
"RLTrainer._checkpoint": {
"total": 1.307250898000916,
"count": 10,
"self": 1.307250898000916
}
}
},
"_update_policy": {
"total": 367.0325934390026,
"count": 97,
"self": 308.87235727601194,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.16023616299066,
"count": 2910,
"self": 58.16023616299066
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.629999683762435e-07,
"count": 1,
"self": 8.629999683762435e-07
},
"TrainerController._save_models": {
"total": 0.10921290400028738,
"count": 1,
"self": 0.002158216000225366,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10705468800006201,
"count": 1,
"self": 0.10705468800006201
}
}
}
}
}
}
}