DM-Improves-AT1 / tiny_ori_long /training_options.json
wzekai99's picture
SVHN and TinyImagenet
df4e7cf
{
"dataset_kwargs": {
"class_name": "training.dataset.ImageFolderDataset",
"path": "datasets/tiny-64x64.zip",
"use_labels": true,
"xflip": false,
"cache": true,
"resolution": 64,
"max_size": 100000
},
"data_loader_kwargs": {
"pin_memory": true,
"num_workers": 1,
"prefetch_factor": 2
},
"network_kwargs": {
"model_type": "SongUNet",
"embedding_type": "positional",
"encoder_type": "standard",
"decoder_type": "standard",
"channel_mult_noise": 1,
"resample_filter": [
1,
1
],
"model_channels": 128,
"channel_mult": [
2,
2,
2
],
"class_name": "training.networks.EDMPrecond",
"augment_dim": 9,
"dropout": 0.13,
"use_fp16": false
},
"loss_kwargs": {
"class_name": "training.loss.EDMLoss"
},
"optimizer_kwargs": {
"class_name": "torch.optim.Adam",
"lr": 0.001,
"betas": [
0.9,
0.999
],
"eps": 1e-08
},
"augment_kwargs": {
"class_name": "training.augment.AugmentPipe",
"p": 0.12,
"xflip": 100000000.0,
"yflip": 1,
"scale": 1,
"rotate_frac": 1,
"aniso": 1,
"translate_frac": 1
},
"total_kimg": 200000,
"ema_halflife_kimg": 500,
"batch_size": 256,
"batch_gpu": null,
"loss_scaling": 1.0,
"cudnn_benchmark": true,
"kimg_per_tick": 50,
"snapshot_ticks": 50,
"state_dump_ticks": 500,
"seed": 479076514,
"run_dir": "training-runs/tiny-64x64-cond-ddpmpp-edm-gpus4-batch256-fp32"
}