| import torch |
| import torch.nn as nn |
| from lib.models.common import Conv, SPP, Bottleneck, BottleneckCSP, Focus, Concat, Detect, SharpenConv |
| from torch.nn import Upsample |
| from lib.utils import check_anchor_order |
| from lib.utils import initialize_weights |
| import argparse |
| import onnx |
| import onnxruntime as ort |
| import onnxsim |
|
|
| import math |
| import cv2 |
|
|
| |
| YOLOP = [ |
| [24, 33, 42], |
| [-1, Focus, [3, 32, 3]], |
| [-1, Conv, [32, 64, 3, 2]], |
| [-1, BottleneckCSP, [64, 64, 1]], |
| [-1, Conv, [64, 128, 3, 2]], |
| [-1, BottleneckCSP, [128, 128, 3]], |
| [-1, Conv, [128, 256, 3, 2]], |
| [-1, BottleneckCSP, [256, 256, 3]], |
| [-1, Conv, [256, 512, 3, 2]], |
| [-1, SPP, [512, 512, [5, 9, 13]]], |
| [-1, BottleneckCSP, [512, 512, 1, False]], |
| [-1, Conv, [512, 256, 1, 1]], |
| [-1, Upsample, [None, 2, 'nearest']], |
| [[-1, 6], Concat, [1]], |
| [-1, BottleneckCSP, [512, 256, 1, False]], |
| [-1, Conv, [256, 128, 1, 1]], |
| [-1, Upsample, [None, 2, 'nearest']], |
| [[-1, 4], Concat, [1]], |
|
|
| [-1, BottleneckCSP, [256, 128, 1, False]], |
| [-1, Conv, [128, 128, 3, 2]], |
| [[-1, 14], Concat, [1]], |
| [-1, BottleneckCSP, [256, 256, 1, False]], |
| [-1, Conv, [256, 256, 3, 2]], |
| [[-1, 10], Concat, [1]], |
| [-1, BottleneckCSP, [512, 512, 1, False]], |
| [[17, 20, 23], Detect, |
| [1, [[3, 9, 5, 11, 4, 20], [7, 18, 6, 39, 12, 31], [19, 50, 38, 81, 68, 157]], [128, 256, 512]]], |
| |
|
|
| [16, Conv, [256, 128, 3, 1]], |
| [-1, Upsample, [None, 2, 'nearest']], |
| [-1, BottleneckCSP, [128, 64, 1, False]], |
| [-1, Conv, [64, 32, 3, 1]], |
| [-1, Upsample, [None, 2, 'nearest']], |
| [-1, Conv, [32, 16, 3, 1]], |
| [-1, BottleneckCSP, [16, 8, 1, False]], |
| [-1, Upsample, [None, 2, 'nearest']], |
| [-1, Conv, [8, 2, 3, 1]], |
|
|
| [16, Conv, [256, 128, 3, 1]], |
| [-1, Upsample, [None, 2, 'nearest']], |
| [-1, BottleneckCSP, [128, 64, 1, False]], |
| [-1, Conv, [64, 32, 3, 1]], |
| [-1, Upsample, [None, 2, 'nearest']], |
| [-1, Conv, [32, 16, 3, 1]], |
| [-1, BottleneckCSP, [16, 8, 1, False]], |
| [-1, Upsample, [None, 2, 'nearest']], |
| [-1, Conv, [8, 2, 3, 1]] |
| ] |
|
|
|
|
| class MCnet(nn.Module): |
| def __init__(self, block_cfg): |
| super(MCnet, self).__init__() |
| layers, save = [], [] |
| self.nc = 1 |
| self.detector_index = -1 |
| self.det_out_idx = block_cfg[0][0] |
| self.seg_out_idx = block_cfg[0][1:] |
| self.num_anchors = 3 |
| self.num_outchannel = 5 + self.nc |
| |
| for i, (from_, block, args) in enumerate(block_cfg[1:]): |
| block = eval(block) if isinstance(block, str) else block |
| if block is Detect: |
| self.detector_index = i |
| block_ = block(*args) |
| block_.index, block_.from_ = i, from_ |
| layers.append(block_) |
| save.extend(x % i for x in ([from_] if isinstance(from_, int) else from_) if x != -1) |
| assert self.detector_index == block_cfg[0][0] |
|
|
| self.model, self.save = nn.Sequential(*layers), sorted(save) |
| self.names = [str(i) for i in range(self.nc)] |
|
|
| |
| Detector = self.model[self.detector_index] |
| if isinstance(Detector, Detect): |
| s = 128 |
| |
| |
| with torch.no_grad(): |
| model_out = self.forward(torch.zeros(1, 3, s, s)) |
| detects, _, _ = model_out |
| Detector.stride = torch.tensor([s / x.shape[-2] for x in detects]) |
| |
| Detector.anchors /= Detector.stride.view(-1, 1, 1) |
| check_anchor_order(Detector) |
| self.stride = Detector.stride |
| |
| initialize_weights(self) |
|
|
| def forward(self, x): |
| cache = [] |
| out = [] |
| det_out = None |
| for i, block in enumerate(self.model): |
| if block.from_ != -1: |
| x = cache[block.from_] if isinstance(block.from_, int) \ |
| else [x if j == -1 else cache[j] for j in |
| block.from_] |
| x = block(x) |
| if i in self.seg_out_idx: |
| |
| |
| out.append(torch.sigmoid(x)) |
| if i == self.detector_index: |
| |
| if self.training: |
| det_out = x |
| else: |
| det_out = x[0] |
| cache.append(x if block.index in self.save else None) |
| return det_out, out[0], out[1] |
| |
|
|
| def _initialize_biases(self, cf=None): |
| |
| |
| |
| m = self.model[self.detector_index] |
| for mi, s in zip(m.m, m.stride): |
| b = mi.bias.view(m.na, -1) |
| b[:, 4] += math.log(8 / (640 / s) ** 2) |
| b[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) |
| mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) |
|
|
|
|
| if __name__ == "__main__": |
| parser = argparse.ArgumentParser() |
| parser.add_argument('--height', type=int, default=640) |
| parser.add_argument('--width', type=int, default=640) |
| args = parser.parse_args() |
|
|
| do_simplify = True |
|
|
| device = 'cuda' if torch.cuda.is_available() else 'cpu' |
| model = MCnet(YOLOP) |
| checkpoint = torch.load('./weights/End-to-end.pth', map_location=device) |
| model.load_state_dict(checkpoint['state_dict']) |
| model.eval() |
|
|
| height = args.height |
| width = args.width |
| print("Load ./weights/End-to-end.pth done!") |
| onnx_path = f'./weights/yolop-{height}-{width}.onnx' |
| inputs = torch.randn(1, 3, height, width) |
|
|
| print(f"Converting to {onnx_path}") |
| torch.onnx.export(model, inputs, onnx_path, |
| verbose=False, opset_version=12, input_names=['images'], |
| output_names=['det_out', 'drive_area_seg', 'lane_line_seg']) |
| print('convert', onnx_path, 'to onnx finish!!!') |
| |
| model_onnx = onnx.load(onnx_path) |
| onnx.checker.check_model(model_onnx) |
| print(onnx.helper.printable_graph(model_onnx.graph)) |
|
|
| if do_simplify: |
| print(f'simplifying with onnx-simplifier {onnxsim.__version__}...') |
| model_onnx, check = onnxsim.simplify(model_onnx, check_n=3) |
| assert check, 'assert check failed' |
| onnx.save(model_onnx, onnx_path) |
|
|
| x = inputs.cpu().numpy() |
| try: |
| sess = ort.InferenceSession(onnx_path) |
|
|
| for ii in sess.get_inputs(): |
| print("Input: ", ii) |
| for oo in sess.get_outputs(): |
| print("Output: ", oo) |
|
|
| print('read onnx using onnxruntime sucess') |
| except Exception as e: |
| print('read failed') |
| raise e |
|
|
| """ |
| PYTHONPATH=. python3 ./export_onnx.py --height 640 --width 640 |
| PYTHONPATH=. python3 ./export_onnx.py --height 1280 --width 1280 |
| PYTHONPATH=. python3 ./export_onnx.py --height 320 --width 320 |
| """ |
|
|