YOLOv8n / code /python /yolov8n.py
qc903113684's picture
Upload folder using huggingface_hub
1f34905 verified
import numpy as np
import cv2
import aidlite
from utils import eqprocess, xywh2xyxy, NMS
import time
OBJ_THRESH = 0.45
NMS_THRESH = 0.45
class Yolov8N(object):
def __init__(self, model_path, width, height, class_num):
self.class_num = class_num
self.width = width
self.height = height
input_shape = [[1,height,width,3]]
self.blocks = int(height * width * ( 1 / 64 + 1 / 256 + 1 / 1024))
self.maskw = int(width / 4)
self.maskh = int(height / 4)
self.output_shape = [[1,4,self.blocks],[1,class_num,self.blocks]]
self.model = aidlite.Model.create_instance(model_path)
if self.model is None:
print("Create model failed !")
return
self.model.set_model_properties(input_shape, aidlite.DataType.TYPE_FLOAT32, self.output_shape, aidlite.DataType.TYPE_FLOAT32)
self.config = aidlite.Config.create_instance()
if self.config is None:
print("build_interpretper_from_model_and_config failed !")
return
self.config.implement_type = aidlite.ImplementType.TYPE_LOCAL
self.config.framework_type = aidlite.FrameworkType.TYPE_QNN
self.config.accelerate_type = aidlite.AccelerateType.TYPE_DSP
self.config.is_quantify_model = 1
self.interpreter = aidlite.InterpreterBuilder.build_interpretper_from_model_and_config(self.model, self.config)
if self.interpreter is None:
print("build_interpretper_from_model_and_config failed !")
return
self.interpreter.init()
self.interpreter.load_model()
def __del__(self):
self.interpreter.destory()
def __call__(self, frame,invoke_nums):
img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
img, scale = eqprocess(img, self.height, self.width)
img = img / 255
img = img.astype(np.float32)
self.interpreter.set_input_tensor(0,img.data)
invoke_time=[]
for i in range(invoke_nums):
t1=time.time()
self.interpreter.invoke()
cost_time = (time.time()-t1)*1000
invoke_time.append(cost_time)
max_invoke_time = max(invoke_time)
min_invoke_time = min(invoke_time)
mean_invoke_time = sum(invoke_time)/invoke_nums
var_invoketime=np.var(invoke_time)
print("====================================")
print(f"QNN invoke {invoke_nums} times:\n --mean_invoke_time is {mean_invoke_time} \n --max_invoke_time is {max_invoke_time} \n --min_invoke_time is {min_invoke_time} \n --var_invoketime is {var_invoketime}")
print("====================================")
qnn_1 = self.interpreter.get_output_tensor(0)
qnn_2 = self.interpreter.get_output_tensor(1)
qnn_out = sorted([qnn_1,qnn_2], key=len)
qnn_local = qnn_out[0].reshape(*self.output_shape[0])
qnn_conf = qnn_out[1].reshape(*self.output_shape[1])
x = np.concatenate([qnn_local, qnn_conf], axis = 1).transpose(0,2,1)
x = x[np.amax(x[..., 4:], axis=-1) > OBJ_THRESH]
if len(x) < 1:
return None, None
x = np.c_[x[..., :4], np.amax(x[..., 4:], axis=-1), np.argmax(x[..., 4:], axis=-1)]
x[:, :4] = xywh2xyxy(x[:, :4])
index = NMS(x[:, :4], x[:, 4], NMS_THRESH)
out_boxes = x[index]
out_boxes[..., :4] = out_boxes[..., :4] * scale
return out_boxes