ML-Agents-Pyramids / run_logs_timers.json
Parth673's picture
First puch
b6c1d98
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2827010452747345,
"min": 0.27878057956695557,
"max": 1.3358147144317627,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 8467.4619140625,
"min": 8343.6533203125,
"max": 40523.2734375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989979.0,
"min": 29872.0,
"max": 989979.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989979.0,
"min": 29872.0,
"max": 989979.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5949585437774658,
"min": -0.11364227533340454,
"max": 0.654905378818512,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 166.58839416503906,
"min": -26.93321990966797,
"max": 185.33822631835938,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01225257944315672,
"min": 0.004778169095516205,
"max": 0.5398792624473572,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.430722236633301,
"min": 1.275771141052246,
"max": 127.95138549804688,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06832349438419831,
"min": 0.06525146078310369,
"max": 0.07581017602517803,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9565289213787764,
"min": 0.5649015058286646,
"max": 1.071090160831398,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0168747355104036,
"min": 0.00016267441986968968,
"max": 0.0168747355104036,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2362462971456504,
"min": 0.0022774418781756557,
"max": 0.2362462971456504,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.542718914364286e-06,
"min": 7.542718914364286e-06,
"max": 0.00029522070159310003,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010559806480109999,
"min": 0.00010559806480109999,
"max": 0.0035070146309952,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10251420714285715,
"min": 0.10251420714285715,
"max": 0.19840690000000002,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4351989,
"min": 1.4351989,
"max": 2.5690048000000005,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002611692935714286,
"min": 0.0002611692935714286,
"max": 0.00984084931,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036563701100000004,
"min": 0.0036563701100000004,
"max": 0.11692357952000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.024067768827080727,
"min": 0.0221833698451519,
"max": 0.6140232682228088,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.3369487524032593,
"min": 0.310567170381546,
"max": 4.912186145782471,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 327.9139784946237,
"min": 290.2857142857143,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30496.0,
"min": 15904.0,
"max": 33655.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5659456294353888,
"min": -0.9998500519432127,
"max": 1.6763687380589545,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 144.06699790805578,
"min": -31.995201662182808,
"max": 171.3577987626195,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5659456294353888,
"min": -0.9998500519432127,
"max": 1.6763687380589545,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 144.06699790805578,
"min": -31.995201662182808,
"max": 171.3577987626195,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.07804274664933632,
"min": 0.07277603369570812,
"max": 12.76470751594752,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 7.179932691738941,
"min": 5.975544401531806,
"max": 204.23532025516033,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1700133366",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1700135679"
},
"total": 2313.347362671,
"count": 1,
"self": 0.7099156399999629,
"children": {
"run_training.setup": {
"total": 0.06779406800001198,
"count": 1,
"self": 0.06779406800001198
},
"TrainerController.start_learning": {
"total": 2312.5696529630004,
"count": 1,
"self": 1.437597030977031,
"children": {
"TrainerController._reset_env": {
"total": 4.364818040999808,
"count": 1,
"self": 4.364818040999808
},
"TrainerController.advance": {
"total": 2306.6928190500244,
"count": 63917,
"self": 1.5091755290027322,
"children": {
"env_step": {
"total": 1653.4276456489952,
"count": 63917,
"self": 1519.8727314219716,
"children": {
"SubprocessEnvManager._take_step": {
"total": 132.68931758498525,
"count": 63917,
"self": 4.689221155999576,
"children": {
"TorchPolicy.evaluate": {
"total": 128.00009642898567,
"count": 62526,
"self": 128.00009642898567
}
}
},
"workers": {
"total": 0.8655966420383265,
"count": 63917,
"self": 0.0,
"children": {
"worker_root": {
"total": 2307.6589386580154,
"count": 63917,
"is_parallel": true,
"self": 907.56658908803,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0025508030000764848,
"count": 1,
"is_parallel": true,
"self": 0.0007278719999703753,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018229310001061094,
"count": 8,
"is_parallel": true,
"self": 0.0018229310001061094
}
}
},
"UnityEnvironment.step": {
"total": 0.053754996999941795,
"count": 1,
"is_parallel": true,
"self": 0.000641763999738032,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00047979800001485273,
"count": 1,
"is_parallel": true,
"self": 0.00047979800001485273
},
"communicator.exchange": {
"total": 0.050888967999981105,
"count": 1,
"is_parallel": true,
"self": 0.050888967999981105
},
"steps_from_proto": {
"total": 0.0017444670002078055,
"count": 1,
"is_parallel": true,
"self": 0.0003887240004587511,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013557429997490544,
"count": 8,
"is_parallel": true,
"self": 0.0013557429997490544
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1400.0923495699853,
"count": 63916,
"is_parallel": true,
"self": 35.898795952989076,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.44069597399516,
"count": 63916,
"is_parallel": true,
"self": 24.44069597399516
},
"communicator.exchange": {
"total": 1240.498163851012,
"count": 63916,
"is_parallel": true,
"self": 1240.498163851012
},
"steps_from_proto": {
"total": 99.25469379198921,
"count": 63916,
"is_parallel": true,
"self": 19.992510525930584,
"children": {
"_process_rank_one_or_two_observation": {
"total": 79.26218326605863,
"count": 511328,
"is_parallel": true,
"self": 79.26218326605863
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 651.7559978720265,
"count": 63917,
"self": 2.667247995144635,
"children": {
"process_trajectory": {
"total": 124.21948795788194,
"count": 63917,
"self": 124.06211604788223,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15737190999971062,
"count": 2,
"self": 0.15737190999971062
}
}
},
"_update_policy": {
"total": 524.8692619189999,
"count": 454,
"self": 317.4320839739971,
"children": {
"TorchPPOOptimizer.update": {
"total": 207.43717794500276,
"count": 22824,
"self": 207.43717794500276
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.047999830916524e-06,
"count": 1,
"self": 1.047999830916524e-06
},
"TrainerController._save_models": {
"total": 0.07441779299961127,
"count": 1,
"self": 0.0013956919997326622,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0730221009998786,
"count": 1,
"self": 0.0730221009998786
}
}
}
}
}
}
}