| { |
| "monitor_metric": "eval_MRR@NM_0,eval_MRR@NM_1", |
| "trainer_kwargs": { |
| "class_name": "CROSS_MODAL_IR" |
| }, |
| "trainee_kwargs": { |
| "class_name": "CLIP_Encoder", |
| "freeze_prefixes": [], |
| "use_attention": false, |
| "symmetric_CL": false, |
| "weighted_loss": false, |
| "image_type": false, |
| "mlm_type": false, |
| "tie_weights": false, |
| "loss": { |
| "class_name": "NLLLoss", |
| "align_uniform": true |
| } |
| }, |
| "data_module_kwargs": { |
| "class_name": "cross_modal_kb_DataModule_article", |
| "data_processor": { |
| "class_name": "kb_viquae_data_processor", |
| "dataset_path": "../../all_data/kb_dataset_new/", |
| "kb_path": "../../all_data/passages/", |
| "entity_kb_path": "../../all_data/kb/", |
| "validation_dataset_path": "../../all_data/" |
| }, |
| "input_key": "input", |
| "passage_key": "passage", |
| "relevant_indices_key": "BM25_provenance_indices", |
| "irrelevant_indices_key": "BM25_irrelevant_indices", |
| "use_image": true, |
| "add_positives": false, |
| "cross_modal_viquae_valid": true, |
| "use_mep": false, |
| "use_mlm": false, |
| "use_CL": true, |
| "augmented": false, |
| "random_mask": false, |
| "dataloader_kwargs": { |
| "num_workers": 6, |
| "prefetch_factor": 2 |
| }, |
| "tokenizer_kwargs": { |
| "class_name": "CLIPTokenizer", |
| "pretrained_model_name_or_path": "clip-vit-base-patch32_tokenizer" |
| }, |
| "tokenization_kwargs": { |
| "max_length": 77, |
| "padding": "longest" |
| }, |
| "image_processor_kwargs": { |
| "class_name": "ImageFormatter", |
| "feature_extractor_kwargs": { |
| "class_name": "CLIPFeatureExtractor", |
| "pretrained_model_name_or_path": "clip-vit-base-patch32_FE" |
| } |
| } |
| }, |
| "text_encoder_kwargs": { |
| "class_name": "CLIP_Text_Encoder", |
| "checkpoint_name": "text_encoder", |
| "base_encoder_kwargs": { |
| "class_name": "CLIPModel", |
| "pretrained_model_name_or_path": "/gpfswork/rech/der/ufc61ee/my_transformers_cache/clip-vit-base-patch32" |
| }, |
| "inference_path": "saved_models/CLIP_article/text_encoder" |
| }, |
| "image_encoder_kwargs": { |
| "class_name": "CLIP_Image_Encoder", |
| "checkpoint_name": "image_encoder", |
| "base_encoder_kwargs": { |
| "class_name": "CLIPModel", |
| "pretrained_model_name_or_path": "/gpfswork/rech/der/ufc61ee/my_transformers_cache/clip-vit-base-patch32" |
| }, |
| "inference_path": "saved_models/CLIP_article/image_encoder" |
| } |
| } |