| import torch |
| from transformers import AutoTokenizer, AutoModelForCausalLM |
| ckpt_path = "internlm/internlm-xcomposer2-vl-7b" |
| tokenizer = AutoTokenizer.from_pretrained(ckpt_path, trust_remote_code=True) |
| |
| model = AutoModelForCausalLM.from_pretrained(ckpt_path, torch_dtype=torch.float16, trust_remote_code=True, device_map={"":"cuda:1"}) |
|
|
| model.max_length = 5120 |
|
|
| image_folder = "/home/ps/Downloads/datasets/micbench/micbench_imgs/" |
| questions_file = "/home/ps/Downloads/datasets/micbench/micbench_{}.json" |
| answers_file = "/home/ps/Downloads/InternLM-XComposer/q-bench2-xcomposer2-micbench-{}.jsonl" |
|
|
| split = "test" |
|
|
| import json |
| from ast import literal_eval |
| from PIL import Image |
| import numpy as np |
| from tqdm import tqdm |
|
|
|
|
| with open(questions_file.format(split)) as f: |
| if questions_file.endswith("json"): |
| llvqa_data = json.load(f) |
| else: |
| llvqa_data = [literal_eval(line) for line in f.readlines()] |
| print(llvqa_data[0]) |
| |
| model = model.eval() |
|
|
| correct_ = np.zeros((3,4)) |
| all_ = np.zeros((3,4)) |
|
|
| correct= 0 |
|
|
| pbar = tqdm(total=len(llvqa_data)) |
|
|
| for i, llddata in enumerate(llvqa_data): |
| img_path_list = llddata["img_path"] |
| if len(img_path_list) > 3: |
| pbar.update(1) |
| continue |
| images = [] |
| for img_path in img_path_list: |
| image = Image.open(image_folder+img_path).convert("RGB") |
| image = model.vis_processor(image) |
| images.append(image) |
| image = torch.stack(images) |
| options_prompt = '' |
| for choice, ans in zip(["A.", "B.", "C.", "D."], llddata["candidates"]): |
| options_prompt += f"{choice} {ans}\n" |
| if ans == llddata["correct_ans"]: |
| llddata["correct_choice"] = choice[0] |
| query = '<ImageHere> '*len(img_path_list) + 'Please answer this question by choosing the correct choice.' |
| query += "Context: N/A\nQuestion: " + llddata["question"] + '\nOptions: ' + options_prompt |
| print(img_path_list, query) |
| with torch.no_grad(): |
| with torch.cuda.amp.autocast(): |
| response, history = model.chat(tokenizer, query=query, image=image, history=[], do_sample=False, max_new_tokens=6) |
| outputs = response.replace("The answer is ", "") |
| if llddata["correct_choice"] in outputs: |
| correct += 1 |
| |
| |
| |
| with open(answers_file.format(split), "a") as wf: |
| json.dump(llddata, wf) |
| |
| pbar.update(1) |
| pbar.set_description("[Running Accuracy]: {:.4f},[Response]: {}, [Correct Ans]: {}, , [Prog]: {}".format(correct/(i+1), outputs, llddata.get("correct_choice", -1), i+1)) |