YOLO26n / code /python /run_test.py
qc903113684's picture
Force sync model package from model_farm_code
da55708 verified
import time
import numpy as np
import cv2
import os
import aidlite
import argparse
import onnxruntime
root_path = os.path.dirname(os.path.abspath(__file__))
"""返回 COCO 数据集的类别名称(80 类)。"""
classes=[
"person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat",
"traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat",
"dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack",
"umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball",
"kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket",
"bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
"sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair",
"sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", "laptop", "mouse",
"remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator",
"book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"
]
def letterbox(
im,
new_shape,
color=(114, 114, 114),
auto=False,
scaleFill=False,
scaleup=True,
stride=32,
):
"""
Resize and pad image while meeting stride-multiple constraints
Returns:
im (array): (height, width, 3)
ratio (array): [w_ratio, h_ratio]
(dw, dh) (array): [w_padding h_padding]
"""
shape = im.shape[:2] # current shape [height, width]
if isinstance(new_shape, int): # [h_rect, w_rect]
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better val mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # wh ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) # w h
dw, dh = (
new_shape[1] - new_unpad[0],
new_shape[0] - new_unpad[1],
) # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0]) # [w h]
ratio = (
new_shape[1] / shape[1],
new_shape[0] / shape[0],
) # [w_ratio, h_ratio]
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
im = cv2.copyMakeBorder(
im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color
)
return im, ratio, (dw, dh)
class Colors:
def __init__(self):
hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',
'2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')
self.palette = [self.hex2rgb(f'#{c}') for c in hexs]
self.n = len(self.palette)
def __call__(self, i, bgr=False):
c = self.palette[int(i) % self.n]
return (c[2], c[1], c[0]) if bgr else c
@staticmethod
def hex2rgb(h):
return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
def rescale_coords(boxes, image_shape, input_shape):
image_height, image_width = image_shape
input_height, input_width = input_shape
scale = min(input_width / image_width, input_height / image_height)
pad_w = (input_width - image_width * scale) / 2
pad_h = (input_height - image_height * scale) / 2
boxes[:, [0, 2]] = (boxes[:, [0, 2]] - pad_w) / scale
boxes[:, [1, 3]] = (boxes[:, [1, 3]] - pad_h) / scale
boxes[:, [0, 2]] = np.clip(boxes[:, [0, 2]], 0, image_width)
boxes[:, [1, 3]] = np.clip(boxes[:, [1, 3]], 0, image_height)
return boxes.astype(int)
def preprocess(image, input_shape):
# Resize
input_img = letterbox(image, input_shape)[0]
# Transpose
# input_img = input_img[..., ::-1].transpose(2, 0, 1)
input_img = input_img[..., ::-1]
# Expand
input_img = input_img[np.newaxis, :, :, :].astype(np.float32)
# Contiguous
input_img = np.ascontiguousarray(input_img)
# Norm
blob = input_img / 255.0
return blob
def postprocess(output_data, conf_thres, image_shape, input_shape):
outs = output_data # test.py 中 output_data 已经是 (8400, 84)
outs = outs[outs[:, 4] >= conf_thres]
boxes = outs[:, :4]
scores = outs[:, -2]
labels = outs[:, -1].astype(int)
boxes = rescale_coords(boxes, image_shape, input_shape)
return boxes, scores, labels
class qnn_yolo26:
def __init__(self,model_path,input_shape,output_shape):
self.input_shape = input_shape
self.output_shape = output_shape
self.config = aidlite.Config.create_instance()
if self.config is None:
print("Create config failed !")
return False
self.config.implement_type = aidlite.ImplementType.TYPE_LOCAL
self.config.framework_type = aidlite.FrameworkType.TYPE_QNN
self.config.accelerate_type = aidlite.AccelerateType.TYPE_DSP
self.config.is_quantify_model = 1
self.model = aidlite.Model.create_instance(model_path)
self.model.set_model_properties(self.input_shape, aidlite.DataType.TYPE_FLOAT32, self.output_shape, aidlite.DataType.TYPE_FLOAT32)
if self.model is None:
print("Create model failed !")
return False
self.interpreter = aidlite.InterpreterBuilder.build_interpretper_from_model_and_config(self.model, self.config)
if self.interpreter is None:
print("build_interpretper_from_model_and_config failed !")
return None
result = self.interpreter.init()
if result != 0:
print(f"interpreter init failed !")
return False
result = self.interpreter.load_model()
if result != 0:
print("interpreter load model failed !")
return False
print("detect model load success!")
def __del__(self):
self.interpreter.destory()
def __call__(self, img_input,invoke_nums):
result = self.interpreter.set_input_tensor(0, img_input.data)
if result != 0:
print("interpreter set_input_tensor() failed")
invoke_time=[]
for i in range(invoke_nums):
t1=time.time()
result = self.interpreter.invoke()
if result != 0:
print("interpreter set_input_tensor() failed")
cost_time = (time.time()-t1)*1000
invoke_time.append(cost_time)
max_invoke_time = max(invoke_time)
min_invoke_time = min(invoke_time)
mean_invoke_time = sum(invoke_time)/invoke_nums
var_invoketime=np.var(invoke_time)
print("====================================")
print(f"QNN invoke {invoke_nums} times:\n --mean_invoke_time is {mean_invoke_time} \n --max_invoke_time is {max_invoke_time} \n --min_invoke_time is {min_invoke_time} \n --var_invoketime is {var_invoketime}")
print("====================================")
qnn_1 = self.interpreter.get_output_tensor(0)
qnn_2 = self.interpreter.get_output_tensor(1)
qnn_out = sorted([qnn_1,qnn_2], key=len)
qnn_local = qnn_out[0].reshape(*self.output_shape[0])
qnn_conf = qnn_out[1].reshape(*self.output_shape[1])
output1 = np.concatenate([qnn_local, qnn_conf], axis = 1).transpose(0,2,1)
return output1
class onnx_yolov26:
def __init__(self,model_path):
self.sess_options = onnxruntime.SessionOptions()
self.sess_options.intra_op_num_threads = 1
self.sess = onnxruntime.InferenceSession(model_path,sess_options=self.sess_options)
self.outname = [i.name for i in self.sess.get_outputs()]
self.inname = [i.name for i in self.sess.get_inputs()]
def __call__(self,img_input):
inp = {self.inname[0]:img_input}
t1=time.time()
out_put = self.sess.run(self.outname,inp)[0]
cost_time = (time.time()-t1)*1000
return out_put
def main(args):
input_shape = (640, 640)
conf_thres = 0.25
img_path = args.imgs
invoke_nums = args.invoke_nums
qnn_path = args.target_model
# qnn +onnx推理
qnn_input_shape = [[1,640,640,3]]
qnn_output_shape = [[1,4,8400],[1,80,8400]]
qnn_model = qnn_yolo26(qnn_path,qnn_input_shape,qnn_output_shape)
onnx_model_path = f"{root_path}/../models/post_process.onnx"
onnx_model = onnx_yolov26(onnx_model_path)
print("Begin to run qnn...")
im0 = cv2.imread(img_path)
image_shape = im0.shape[:2]
img_qnn = preprocess(im0, input_shape)
out1 = qnn_model(img_qnn,invoke_nums)
out2 = onnx_model(out1)[0]
boxes, scores, labels = postprocess(out2, conf_thres, image_shape, input_shape)
print(f"Detect {len(boxes)} targets")
colors = Colors()
for label, score, box in zip(labels, scores, boxes):
label_text = f'{classes[label]}: {score:.2f}'
color = colors(label, True)
cv2.rectangle(im0, (box[0], box[1]), (box[2], box[3]), color, 2, lineType=cv2.LINE_AA)
cv2.putText(im0, label_text, (box[0], box[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
output_image_path = f"{root_path}/detected_results.jpg"
cv2.imwrite(output_image_path, im0)
print(f"Saved detected result to {output_image_path}")
def parser_args():
parser = argparse.ArgumentParser(description="Inferrence yolov10 model")
parser.add_argument('--target_model',type=str,default=f"{root_path}/../models/cutoff_yolo26n_qcs6490_w8a8.qnn236.ctx.bin",help="Predict images path")
parser.add_argument('--imgs',type=str,default=f"{root_path}/bus.jpg",help="Predict images path")
parser.add_argument('--invoke_nums',type=int,default=10,help="Inference nums")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parser_args()
main(args)