qc903113684 commited on
Commit
063da7b
·
verified ·
1 Parent(s): 93877df

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ code/python/bus.jpg filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: agpl-3.0
3
+ pipeline_tag: object-detection
4
+ tags:
5
+ - AIoT
6
+ - QNN
7
+ ---
8
+
9
+ ![](https://aiot.aidlux.com/_next/image?url=%2Fapi%2Fv1%2Ffiles%2Fmodel%2Fcover%2F%25E5%259B%25BE-10.png&w=640&q=75)
10
+
11
+ ## YOLOv8s: Target Detection
12
+
13
+ YOLOv8 is a cutting-edge, state-of-the-art (SOTA) model that builds upon the success of previous YOLO versions and introduces new features and improvements to further boost performance and flexibility. YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection and tracking, instance segmentation, image classification and pose estimation tasks.
14
+
15
+ ### Source model
16
+
17
+ - Input shape: 640x640
18
+ - Number of parameters: 25.91M
19
+ - Model size: 98.9MB
20
+ - Output shape: 1x84x8400
21
+
22
+ Source model repository: [yolov8](https://github.com/ultralytics/ultralytics)
23
+
24
+ ## Performance Reference
25
+
26
+ Please search model by model name in [Model Farm](https://aiot.aidlux.com/en/models)
27
+
28
+ ## Inference & Model Conversion
29
+
30
+ Please search model by model name in [Model Farm](https://aiot.aidlux.com/en/models)
31
+
32
+ ## License
33
+
34
+ - Source Model: [AGPL-3.0](https://github.com/ultralytics/ultralytics/blob/main/LICENSE)
35
+
36
+ - Deployable Model: [AGPL-3.0](https://github.com/ultralytics/ultralytics/blob/main/LICENSE)
code/README.md ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Model Information
2
+ ### Source model
3
+ - Input shape: 640x640
4
+ - Number of parameters: 25.91M
5
+ - Model size: 98.9MB
6
+ - Output shape: 1x84x8400
7
+
8
+ Source model repository: [yolov8](https://github.com/ultralytics/ultralytics)
9
+
10
+ ## Inference with AidLite SDK
11
+
12
+ ### SDK installation
13
+ Model Farm uses AidLite SDK as the model inference SDK. For details, please refer to the https://docs.aidlux.com/software/ai-sdk/aidlite_guide(https://docs.aidlux.com/software/ai-sdk/aidlite_guide)
14
+
15
+ - install AidLite SDK
16
+
17
+ ```bash
18
+ # Install the appropriate version of the aidlite sdk
19
+ sudo aid-pkg update
20
+ sudo aid-pkg install aidlite-sdk
21
+ # Download the qnn version that matches the above backend. Eg Install QNN2.23 Aidlite: sudo aid-pkg install aidlite-qnn223
22
+ sudo aid-pkg install aidlite-{QNN VERSION}
23
+ ```
24
+
25
+ - Verify AidLite SDK
26
+
27
+ ```bash
28
+ # Install the appropriate version of the aidlite sdk
29
+ sudo aid-pkg update
30
+ sudo aid-pkg install aidlite-sdk
31
+ # Download the qnn version that matches the above backend. Eg Install QNN2.23 Aidlite: sudo aid-pkg install aidlite-qnn223
32
+ sudo aid-pkg install aidlite-{QNN VERSION}
33
+ # eg: Install QNN 2.23 Aidlite: sudo aid-pkg install aidlite-qnn223
34
+ ```
35
+
36
+ ### Run Demo
37
+
38
+ #### python
39
+ ```bash
40
+ cd model_farm_yolov8m_qcs8550_qnn2.36_fp16_aidlite
41
+ python3 python/run_test.py --target_model ./[model_file_path] --imgs ./python/bus.jpg --invoke_nums 10
42
+ ```
43
+
44
+ ####
45
+ ```bash
46
+
47
+ ```
code/python/bus.jpg ADDED

Git LFS Details

  • SHA256: 33b198a1d2839bb9ac4c65d61f9e852196793cae9a0781360859425f6022b69c
  • Pointer size: 131 Bytes
  • Size of remote file: 487 kB
code/python/run_test.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import argparse
3
+ from yolov8m import Yolov8M
4
+ from utils import draw_detect_res
5
+
6
+ def parser_args():
7
+ parser = argparse.ArgumentParser(description="Run model benchmarks")
8
+ parser.add_argument('--target_model',type=str,default='./models/cutoff_yolov8m_qcs8550_fp16.qnn236.ctx.bin',help="inference model path")
9
+ parser.add_argument('--imgs',type=str,default='./python/bus.jpg',help="Predict images path")
10
+ parser.add_argument('--height',type=int,default=640,help="run backend")
11
+ parser.add_argument('--weight',type=int,default=640,help="run backend")
12
+ parser.add_argument('--cls_num',type=int,default=80,help="run backend")
13
+ parser.add_argument('--invoke_nums',type=int,default=10,help="Inference nums")
14
+ parser.add_argument('--model_type',type=str,default='QNN',help="run backend")
15
+ args = parser.parse_args()
16
+ return args
17
+
18
+ if __name__ == "__main__":
19
+ args = parser_args()
20
+ height = args.height
21
+ weight = args.weight
22
+
23
+
24
+ model = Yolov8M(args.target_model, args.weight, args.height, args.cls_num)
25
+ frame = cv2.imread(args.imgs)
26
+
27
+ out_boxes= model(frame,args.invoke_nums)
28
+ print(f"=================== \n Detect {len(out_boxes)} targets.")
29
+ result = draw_detect_res(frame, out_boxes)
30
+ cv2.imwrite("./python/result.jpg", result)
code/python/utils.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+
4
+ CLASSES = ("person", "bicycle", "car", "motorbike ", "aeroplane ", "bus ", "train", "truck ", "boat", "traffic light",
5
+ "fire hydrant", "stop sign ", "parking meter", "bench", "bird", "cat", "dog ", "horse ", "sheep", "cow", "elephant",
6
+ "bear", "zebra ", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite",
7
+ "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife ",
8
+ "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza ", "donut", "cake", "chair", "sofa",
9
+ "pottedplant", "bed", "diningtable", "toilet ", "tvmonitor", "laptop ", "mouse ", "remote ", "keyboard ", "cell phone", "microwave ",
10
+ "oven ", "toaster", "sink", "refrigerator ", "book", "clock", "vase", "scissors ", "teddy bear ", "hair drier", "toothbrush ")
11
+
12
+ def eqprocess(image, size1, size2):
13
+ h,w,_ = image.shape
14
+ mask = np.zeros((size1,size2,3),dtype=np.float32)
15
+ scale1 = h /size1
16
+ scale2 = w / size2
17
+ if scale1 > scale2:
18
+ scale = scale1
19
+ else:
20
+ scale = scale2
21
+ img = cv2.resize(image,(int(w / scale),int(h / scale)))
22
+ mask[:int(h / scale),:int(w / scale),:] = img
23
+ return mask, scale
24
+
25
+ def xywh2xyxy(x):
26
+ '''
27
+ Box (center x, center y, width, height) to (x1, y1, x2, y2)
28
+ '''
29
+ y = np.copy(x)
30
+ y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
31
+ y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
32
+ y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
33
+ y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
34
+ return y
35
+
36
+ def xyxy2xywh(box):
37
+ '''
38
+ Box (left_top x, left_top y, right_bottom x, right_bottom y) to (left_top x, left_top y, width, height)
39
+ '''
40
+ box[:, 2:] = box[:, 2:] - box[:, :2]
41
+ return box
42
+
43
+ def NMS(dets, scores, thresh):
44
+ '''
45
+ 单类NMS算法
46
+ dets.shape = (N, 5), (left_top x, left_top y, right_bottom x, right_bottom y, Scores)
47
+ '''
48
+ x1 = dets[:,0]
49
+ y1 = dets[:,1]
50
+ x2 = dets[:,2]
51
+ y2 = dets[:,3]
52
+ areas = (y2-y1+1) * (x2-x1+1)
53
+ keep = []
54
+ index = scores.argsort()[::-1]
55
+ while index.size >0:
56
+ i = index[0] # every time the first is the biggst, and add it directly
57
+ keep.append(i)
58
+ x11 = np.maximum(x1[i], x1[index[1:]]) # calculate the points of overlap
59
+ y11 = np.maximum(y1[i], y1[index[1:]])
60
+ x22 = np.minimum(x2[i], x2[index[1:]])
61
+ y22 = np.minimum(y2[i], y2[index[1:]])
62
+ w = np.maximum(0, x22-x11+1) # the weights of overlap
63
+ h = np.maximum(0, y22-y11+1) # the height of overlap
64
+ overlaps = w*h
65
+ ious = overlaps / (areas[i]+areas[index[1:]] - overlaps)
66
+ idx = np.where(ious<=thresh)[0]
67
+ index = index[idx+1] # because index start from 1
68
+
69
+ return keep
70
+
71
+
72
+ def draw_detect_res(img, det_pred):
73
+ '''
74
+ 检测结果绘制
75
+ '''
76
+ if det_pred is None:
77
+ return img
78
+
79
+ img = img.astype(np.uint8)
80
+ im_canvas = img.copy()
81
+ color_step = int(255/len(CLASSES))
82
+ for i in range(len(det_pred)):
83
+ x1, y1, x2, y2 = [int(t) for t in det_pred[i][:4]]
84
+ cls_id = int(det_pred[i][5])
85
+ print(i+1,[x1,y1,x2,y2],det_pred[i][4], f'{CLASSES[cls_id]}')
86
+ cv2.putText(img, f'{CLASSES[cls_id]}', (x1, y1-6), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
87
+ cv2.rectangle(img, (x1, y1), (x2, y2), (0, int(cls_id*color_step), int(255-cls_id*color_step)),thickness = 2)
88
+ img = cv2.addWeighted(im_canvas, 0.3, img, 0.7, 0)
89
+ return img
90
+
91
+ def scale_mask(masks, im0_shape):
92
+ masks = cv2.resize(masks, (im0_shape[1], im0_shape[0]),
93
+ interpolation=cv2.INTER_LINEAR)
94
+ if len(masks.shape) == 2:
95
+ masks = masks[:, :, None]
96
+ return masks
97
+
98
+ def crop_mask(masks, boxes):
99
+ n, h, w = masks.shape
100
+ x1, y1, x2, y2 = np.split(boxes[:, :, None], 4, 1)
101
+ r = np.arange(w, dtype=x1.dtype)[None, None, :]
102
+ c = np.arange(h, dtype=x1.dtype)[None, :, None]
103
+ return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2))
104
+
105
+ def process_mask(protos, masks_in, bboxes, im0_shape):
106
+ c, mh, mw = protos.shape
107
+ masks = np.matmul(masks_in, protos.reshape((c, -1))).reshape((-1, mh, mw)).transpose(1, 2, 0) # HWN
108
+ masks = np.ascontiguousarray(masks)
109
+ masks = scale_mask(masks, im0_shape) # re-scale mask from P3 shape to original input image shape
110
+ masks = np.einsum('HWN -> NHW', masks) # HWN -> NHW
111
+ masks = crop_mask(masks, bboxes)
112
+ return np.greater(masks, 0.5)
113
+
114
+ def masks2segments(masks):
115
+ segments = []
116
+ for x in masks.astype('uint8'):
117
+ c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[0] # CHAIN_APPROX_SIMPLE
118
+ if c:
119
+ c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2)
120
+ else:
121
+ c = np.zeros((0, 2)) # no segments found
122
+ segments.append(c.astype('float32'))
123
+ return segments
code/python/yolov8m.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ import aidlite
4
+ from utils import eqprocess, xywh2xyxy, NMS
5
+ import time
6
+
7
+ OBJ_THRESH = 0.45
8
+ NMS_THRESH = 0.45
9
+
10
+ class Yolov8M(object):
11
+ def __init__(self, model_path, width, height, class_num):
12
+ self.class_num = class_num
13
+ self.width = width
14
+ self.height = height
15
+ input_shape = [[1,height,width,3]]
16
+ self.blocks = int(height * width * ( 1 / 64 + 1 / 256 + 1 / 1024))
17
+ self.maskw = int(width / 4)
18
+ self.maskh = int(height / 4)
19
+ self.output_shape = [[1,4,self.blocks],[1,class_num,self.blocks]]
20
+
21
+ self.model = aidlite.Model.create_instance(model_path)
22
+ if self.model is None:
23
+ print("Create model failed !")
24
+ return
25
+ self.model.set_model_properties(input_shape, aidlite.DataType.TYPE_FLOAT32, self.output_shape, aidlite.DataType.TYPE_FLOAT32)
26
+
27
+ self.config = aidlite.Config.create_instance()
28
+ if self.config is None:
29
+ print("build_interpretper_from_model_and_config failed !")
30
+ return
31
+
32
+ self.config.implement_type = aidlite.ImplementType.TYPE_LOCAL
33
+ self.config.framework_type = aidlite.FrameworkType.TYPE_QNN
34
+ self.config.accelerate_type = aidlite.AccelerateType.TYPE_DSP
35
+ self.config.is_quantify_model = 1
36
+
37
+ self.interpreter = aidlite.InterpreterBuilder.build_interpretper_from_model_and_config(self.model, self.config)
38
+ if self.interpreter is None:
39
+ print("build_interpretper_from_model_and_config failed !")
40
+ return
41
+
42
+ self.interpreter.init()
43
+ self.interpreter.load_model()
44
+
45
+ def __del__(self):
46
+ self.interpreter.destory()
47
+
48
+ def __call__(self, frame,invoke_nums):
49
+ img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
50
+ img, scale = eqprocess(img, self.height, self.width)
51
+ img = img / 255
52
+ img = img.astype(np.float32)
53
+ self.interpreter.set_input_tensor(0,img.data)
54
+
55
+ invoke_time=[]
56
+ for i in range(invoke_nums):
57
+ t1=time.time()
58
+ self.interpreter.invoke()
59
+ cost_time = (time.time()-t1)*1000
60
+ invoke_time.append(cost_time)
61
+
62
+ max_invoke_time = max(invoke_time)
63
+ min_invoke_time = min(invoke_time)
64
+ mean_invoke_time = sum(invoke_time)/invoke_nums
65
+ var_invoketime=np.var(invoke_time)
66
+ print("====================================")
67
+ print(f"QNN invoke {invoke_nums} times:\n --mean_invoke_time is {mean_invoke_time} \n --max_invoke_time is {max_invoke_time} \n --min_invoke_time is {min_invoke_time} \n --var_invoketime is {var_invoketime}")
68
+ print("====================================")
69
+
70
+ qnn_1 = self.interpreter.get_output_tensor(0)
71
+ qnn_2 = self.interpreter.get_output_tensor(1)
72
+ qnn_out = sorted([qnn_1,qnn_2], key=len)
73
+
74
+ qnn_local = qnn_out[0].reshape(*self.output_shape[0])
75
+ qnn_conf = qnn_out[1].reshape(*self.output_shape[1])
76
+
77
+
78
+ x = np.concatenate([qnn_local, qnn_conf], axis = 1).transpose(0,2,1)
79
+ x = x[np.amax(x[..., 4:], axis=-1) > OBJ_THRESH]
80
+ if len(x) < 1:
81
+ return None, None
82
+
83
+ x = np.c_[x[..., :4], np.amax(x[..., 4:], axis=-1), np.argmax(x[..., 4:], axis=-1)]
84
+
85
+ x[:, :4] = xywh2xyxy(x[:, :4])
86
+ index = NMS(x[:, :4], x[:, 4], NMS_THRESH)
87
+ out_boxes = x[index]
88
+ out_boxes[..., :4] = out_boxes[..., :4] * scale
89
+
90
+ return out_boxes
91
+
models/QCS8550/FP16/cutoff_yolov8m_qcs8550_fp16.qnn236.ctx.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5563318d29916d0b364fab3ec75b1ff85b52722642dd99286764e3610f2790b2
3
+ size 54079176
models/QCS8550/W8A8/cutoff_yolov8m_qcs8550_w8a8.qnn236.ctx.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb5b8850c4ada73ebc98435979b9ff711bd8866cecc54b8da04bec6856a009c8
3
+ size 26754784
models/QCS8550/unknown_precision/cutoff_yolov8m_qcs8550_w8a16.qnn236.ctx.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef05372c98461950a41566cca52c6d40b162e641affd89db7056b0cc7f6c012e
3
+ size 27311840