datasets = [ [ dict( abbr='LongBench_narrativeqa', eval_cfg=dict( evaluator=dict( type='opencompass.datasets.LongBenchF1Evaluator'), pred_role='BOT'), infer_cfg=dict( inferencer=dict( max_out_len=128, type='opencompass.openicl.icl_inferencer.GenInferencer'), prompt_template=dict( template=dict(round=[ dict( prompt= 'You are given a story, which can be either a novel or a movie script, and a question. Answer the question as concisely as you can, using a single phrase if possible. Do not provide any explanation.\n\nStory: {context}\n\nNow, answer the question based on the story as concisely as you can, using a single phrase if possible. Do not provide any explanation.\n\nQuestion: {input}\n\nAnswer:', role='HUMAN'), ]), type= 'opencompass.openicl.icl_prompt_template.PromptTemplate'), retriever=dict( type='opencompass.openicl.icl_retriever.ZeroRetriever')), name='narrativeqa', path='opencompass/Longbench', reader_cfg=dict( input_columns=[ 'context', 'input', ], output_column='answers', test_split='test', train_split='test'), type='opencompass.datasets.LongBenchnarrativeqaDataset'), ], ] eval = dict(runner=dict(task=dict(dump_details=True))) models = [ dict( abbr='mask_gdn-1.3B', batch_padding=False, batch_size=16, max_out_len=100, max_seq_len=16384, path='/mnt/jfzn/msj/train_exp/mask_gdn_1B_hrr-rank4', run_cfg=dict(num_gpus=1), tokenizer_path='/mnt/jfzn/msj/train_exp/mask_gdn_1B_hrr-rank4', type='opencompass.models.HuggingFaceCausalLM'), ] work_dir = 'outputs/default/20251127_164548'