| { |
| "architectures": [ |
| "FIMMJP" |
| ], |
| "dtype": "float32", |
| "initial_distribution_decoder": { |
| "dropout": 0.1, |
| "hidden_act": { |
| "name": "torch.nn.SELU" |
| }, |
| "hidden_layers": [ |
| 128, |
| 128 |
| ], |
| "initialization_scheme": "lecun_normal", |
| "name": "fim.models.blocks.base.MLP" |
| }, |
| "intensity_matrix_decoder": { |
| "dropout": 0.1, |
| "hidden_act": { |
| "name": "torch.nn.SELU" |
| }, |
| "hidden_layers": [ |
| 128, |
| 128 |
| ], |
| "initialization_scheme": "lecun_normal", |
| "name": "fim.models.blocks.base.MLP" |
| }, |
| "model_type": "fimmjp", |
| "n_states": 6, |
| "path_attention": { |
| "embed_dim": 256, |
| "n_heads": 4, |
| "n_queries": 16, |
| "name": "fim.models.blocks.MultiHeadLearnableQueryAttention", |
| "output_projection": true |
| }, |
| "pos_encodings": { |
| "name": "fim.models.blocks.positional_encodings.SineTimeEncoding", |
| "out_features": 256 |
| }, |
| "transformers_version": "4.57.1", |
| "ts_encoder": { |
| "encoder_layer": { |
| "batch_first": true, |
| "d_model": 256, |
| "dim_feedforward": 1024, |
| "dropout": 0.1, |
| "name": "torch.nn.TransformerEncoderLayer", |
| "nhead": 4 |
| }, |
| "name": "torch.nn.TransformerEncoder", |
| "num_layers": 4 |
| }, |
| "use_adjacency_matrix": false, |
| "use_num_of_paths": true |
| } |
|
|