Spaces:
Sleeping
Sleeping
File size: 7,805 Bytes
bec0b04 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 | Job:
run_mode: "Training"
#{Training, Predict, Repeat, CV, Hyperparameter, Ensemble, Analysis}
Training:
job_name: "my_train_job"
reprocess: "False"
model: CGCNN_demo
load_model: "False"
save_model: "True"
model_path: "my_model.pth"
write_output: "True"
parallel: "True"
#seed=0 means random initalization
seed: 0
Predict:
job_name: "my_predict_job"
reprocess: "False"
model_path: "my_model.pth"
write_output: "True"
seed: 0
Repeat:
job_name: "my_repeat_job"
reprocess: "False"
model: CGCNN_demo
model_path: "my_model.pth"
write_output: "False"
parallel: "True"
seed: 0
###specific options
#number of repeat trials
repeat_trials: 5
CV:
job_name: "my_CV_job"
reprocess: "False"
model: CGCNN_demo
write_output: "True"
parallel: "True"
seed: 0
###specific options
#number of folds for n-fold CV
cv_folds: 5
Hyperparameter:
job_name: "my_hyperparameter_job"
reprocess: "False"
model: CGCNN_demo
seed: 0
###specific options
hyper_trials: 10
#number of concurrent trials (can be greater than number of GPUs)
hyper_concurrency: 8
#frequency of checkpointing and update (default: 1)
hyper_iter: 1
#resume a previous hyperparameter optimization run
hyper_resume: "True"
#Verbosity of ray tune output; available: (1, 2, 3)
hyper_verbosity: 1
#Delete processed datasets
hyper_delete_processed: "True"
Ensemble:
job_name: "my_ensemble_job"
reprocess: "False"
save_model: "False"
model_path: "my_model.pth"
write_output: "Partial"
parallel: "True"
seed: 0
###specific options
#List of models to use: (Example: "CGCNN_demo,MPNN_demo,SchNet_demo,MEGNet_demo" or "CGCNN_demo,CGCNN_demo,CGCNN_demo,CGCNN_demo")
ensemble_list: "CGCNN_demo,CGCNN_demo,CGCNN_demo,CGCNN_demo,CGCNN_demo"
Analysis:
job_name: "my_job"
reprocess: "False"
model: CGCNN_demo
model_path: "my_model.pth"
write_output: "True"
seed: 0
Processing:
#Whether to use "inmemory" or "large" format for pytorch-geometric dataset. Reccomend inmemory unless the dataset is too large
dataset_type: "inmemory"
#Path to data files
data_path: "/data"
#Path to target file within data_path
target_path: "targets.csv"
#Method of obtaining atom idctionary: available:(provided, default, blank, generated)
dictionary_source: "default"
#Path to atom dictionary file within data_path
dictionary_path: "atom_dict.json"
#Format of data files (limit to those supported by ASE)
data_format: "json"
#Print out processing info
verbose: "True"
#graph specific settings
graph_max_radius : 8.0
graph_max_neighbors : 12
voronoi: "False"
edge_features: "True"
graph_edge_length : 50
#SM specific settings
SM_descriptor: "False"
#SOAP specific settings
SOAP_descriptor: "False"
SOAP_rcut : 8.0
SOAP_nmax : 6
SOAP_lmax : 4
SOAP_sigma : 0.3
Training:
#Index of target column in targets.csv
target_index: 0
#Loss functions (from pytorch) examples: l1_loss, mse_loss, binary_cross_entropy
loss: "l1_loss"
#Ratios for train/val/test split out of a total of 1
train_ratio: 0.8
val_ratio: 0.05
test_ratio: 0.15
#Training print out frequency (print per n number of epochs)
verbosity: 5
Models:
CGCNN_demo:
model: CGCNN
dim1: 100
dim2: 150
pre_fc_count: 1
gc_count: 4
post_fc_count: 3
pool: "global_mean_pool"
pool_order: "early"
batch_norm: "True"
batch_track_stats: "True"
act: "relu"
dropout_rate: 0.0
epochs: 250
lr: 0.002
batch_size: 100
optimizer: "AdamW"
optimizer_args: {}
scheduler: "ReduceLROnPlateau"
scheduler_args: {"mode":"min", "factor":0.8, "patience":10, "min_lr":0.00001, "threshold":0.0002}
MPNN_demo:
model: MPNN
dim1: 100
dim2: 100
dim3: 100
pre_fc_count: 1
gc_count: 4
post_fc_count: 3
pool: "global_mean_pool"
pool_order: "early"
batch_norm: "True"
batch_track_stats: "True"
act: "relu"
dropout_rate: 0.0
epochs: 250
lr: 0.001
batch_size: 100
optimizer: "AdamW"
optimizer_args: {}
scheduler: "ReduceLROnPlateau"
scheduler_args: {"mode":"min", "factor":0.8, "patience":10, "min_lr":0.00001, "threshold":0.0002}
SchNet_demo:
model: SchNet
dim1: 100
dim2: 100
dim3: 150
cutoff: 8
pre_fc_count: 1
gc_count: 4
post_fc_count: 3
pool: "global_mean_pool"
pool_order: "early"
batch_norm: "True"
batch_track_stats: "True"
act: "relu"
dropout_rate: 0.0
epochs: 250
lr: 0.0005
batch_size: 100
optimizer: "AdamW"
optimizer_args: {}
scheduler: "ReduceLROnPlateau"
scheduler_args: {"mode":"min", "factor":0.8, "patience":10, "min_lr":0.00001, "threshold":0.0002}
MEGNet_demo:
model: MEGNet
dim1: 100
dim2: 100
dim3: 100
pre_fc_count: 1
gc_count: 4
gc_fc_count: 1
post_fc_count: 3
pool: "global_mean_pool"
pool_order: "early"
batch_norm: "True"
batch_track_stats: "True"
act: "relu"
dropout_rate: 0.0
epochs: 250
lr: 0.0005
batch_size: 100
optimizer: "AdamW"
optimizer_args: {}
scheduler: "ReduceLROnPlateau"
scheduler_args: {"mode":"min", "factor":0.8, "patience":10, "min_lr":0.00001, "threshold":0.0002}
GCN_demo:
model: GCN
dim1: 100
dim2: 150
pre_fc_count: 1
gc_count: 4
post_fc_count: 3
pool: "global_mean_pool"
pool_order: "early"
batch_norm: "True"
batch_track_stats: "True"
act: "relu"
dropout_rate: 0.0
epochs: 250
lr: 0.002
batch_size: 100
optimizer: "AdamW"
optimizer_args: {}
scheduler: "ReduceLROnPlateau"
scheduler_args: {"mode":"min", "factor":0.8, "patience":10, "min_lr":0.00001, "threshold":0.0002}
SM_demo:
model: SM
dim1: 100
fc_count: 2
epochs: 200
lr: 0.002
batch_size: 100
optimizer: "AdamW"
optimizer_args: {}
scheduler: "ReduceLROnPlateau"
scheduler_args: {"mode":"min", "factor":0.8, "patience":10, "min_lr":0.00001, "threshold":0.0002}
SOAP_demo:
model: SOAP
dim1: 100
fc_count: 2
epochs: 200
lr: 0.002
batch_size: 100
optimizer: "AdamW"
optimizer_args: {}
scheduler: "ReduceLROnPlateau"
scheduler_args: {"mode":"min", "factor":0.8, "patience":10, "min_lr":0.00001, "threshold":0.0002}
|