1. 编译安装PaddleLite
1.2 树莓派环境配置
sudo apt update
sudo apt-get install -y --no-install-recomends gcc g++ make wget python unzip patchelf python-dev
wget https://www.cmake.org/files/v3.10/cmake-3.10.3.tar.gz
tar -zxvf cmake-3.10.3.tar.gz
cd cmake-3.10.3
./configure
make
sudo make install
export CXX=/usr/bin/g++
export CC=/usr/bin/gcc
1.3 下载Paddle-Lite源码 并切换到release分支
git clone https://github.com/PaddlePaddle/Paddle-Lite.git
cd Paddle-Lite && git checkout release/v2.8
1.4 加速第三方库下载
rm -rf third-party
git submodule init
git submodule update
1.5 编译Paddle-Lite
./lite/tools/build_linux.sh --arch=armv7hf --with_python=ON --with_extra=ON --python_version=3.7
可选编译参数
--arch: (armv8|armv7|armv7hf) arm版本,默认为armv8
--toolchain: (gcc|clang) 编译器类型,默认为gcc
--with_extra: (OFF|ON) 是否编译OCR/NLP模型相关kernel&OP,默认为OFF,只编译CV模型相关kernel&OP
--with_python: (OFF|ON) 是否编译python预测库, 默认为 OFF
--python_version: (2.7|3.5|3.7) 编译whl的Python版本,默认为 None
--with_cv: (OFF|ON) 是否编译CV相关预处理库, 默认为 OFF
--with_log: (OFF|ON) 是否输出日志信息, 默认为 ON
--with_exception: (OFF|ON) 是否在错误发生时抛出异常,默认为 OFF
注意:推荐with_cv设置为On否则会缺少部分op
2.使用PaddleLite
2.1 模型训练
本文使用PP-YOLO_MobileNetV3_small
详见PaddleDetection模型训练部分
https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.0-rc/configs/ppyolo
2.2 模型导出
本文使用PP-YOLO_MobileNetV3_small
详见PaddleDetection模型导出部分
2.3 模型调用MobileConfig/CxxConfig
2.3.1 使用opt tool优化模型
import os
import sys
# 引用Paddlelite预测库
sys.path.append('inference_lite_lib.armlinux.armv7hf/python/lib')
sys.path.append('inference_lite_lib.armlinux.armv7hf/cxx/lib')
from lite import *
# 1. 创建opt实例
opt=Opt()
# 2. 指定输入模型地址
opt.set_model_file('./model/__model__')
opt.set_param_file('./model/__params__')
# 3. 指定转化类型: arm、x86、opencl、npu
opt.set_valid_places("arm")
# 4. 指定模型转化类型: naive_buffer、protobuf
opt.set_model_type("naive_buffer")
opt.set_quant_type('QUANT_INT8')
# 4. 输出模型地址
opt.set_optimize_out("model_int8_opt")
# 5. 执行模型优化
opt.run()
2.3.2 图片预测代码
#-*- coding: utf-8 -*-
from __future__ import print_function
import sys
import numpy as np
import cv2
import time
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
sys.path.append('inference_lite_lib.armlinux.armv7hf/python/lib')
sys.path.append('inference_lite_lib.armlinux.armv7hf/cxx/lib')
from lite import *
font_style = ImageFont.truetype("arialuni.ttf", 25)
def read_img(im_path, resize_w,resize_h ):
origin = Image.open(im_path)
img = origin.resize((resize_w,resize_h), Image.BILINEAR)
resized_img = img.copy()
if img.mode != 'RGB':
img = img.convert('RGB')
img = np.array(img).astype('float32').transpose((2, 0, 1)) # HWC to CHW
img -= 127.5
img *= 0.007843
img = img[np.newaxis, :]
return origin,img
def draw_bbox_image(img, boxes, labels, save_name,scores,label_dict):
img_width, img_height = img.size
draw = ImageDraw.Draw(img)
for box, label,score in zip(boxes, labels,scores):
if(score>=0.80):
print("label:",label_dict[int(label)])
xmin, ymin, xmax, ymax = box[0], box[1], box[2], box[3]
draw.rectangle((xmin, ymin, xmax, ymax), None, 'red')
draw.text((xmin, ymin), '{},score={}'.format(label_dict[int(label)],score), (0, 255, 0),font=font_style)
print(save_name)
img.save(save_name)
# 配置config
config = MobileConfig()
config.set_model_from_file('./model_int8_opt.nb')
#config = CxxConfig()
#config.set_model_file('./model/__model__') # 指定模型文件路径
#config.set_param_file('./model/__params__') # 指定参数文件路径
#places = [Place(TargetType.ARM, PrecisionType.FP32)]
#config.set_valid_places(places)
# 创建predictor
predictor = create_paddle_predictor(config)
print('Predict Start')
time_start=time.time()
image_path='./1.jpg'
output_path='./1.predict.jpg'
# 设置输入
input_tensor1 = predictor.get_input(0)
height, width = 320, 320
input_tensor1.resize([1, 3, height, width])
origin,img=read_img(image_path, height, width)
data = img.flatten()
input_tensor1.set_float_data(data)
origin_width, origin_height = origin.size
input_tensor2 = predictor.get_input(1)
input_tensor2.resize([1, 2])
input_tensor2.set_int32_data([origin_height, origin_width])
# 运行
predictor.run()
# 获取输出
output_tensor = predictor.get_output(0)
print(output_tensor.float_data())
print('Time Cost{}'.format(time.time()-time_start))
print('Predict End')
if output_tensor.shape()[1] != 6:
print("No object found in {}".format(image_path))
origin.save(output_path)
bboxes=np.array(list(output_tensor.float_data()), copy = False).reshape(-1,6)
#删除score<0.8的bbox
del_idx=[]
for i in range(len(bboxes)):
if bboxes[i][1]<0.8:
del_idx.append(i)
bboxes=np.delete(bboxes, del_idx, axis=0)
labels = bboxes[:, 0].astype('int32')
scores = bboxes[:, 1].astype('float32')
boxes = bboxes[:, 2:].astype('float32')
print("label:{}".format(labels))
print("score:{}".format(scores))
print("boxes:{}".format(boxes))
label_dict={ 0:'meat', 1:'veg'}
draw_bbox_image(origin, boxes, labels, output_path,scores,label_dict)
2.3.3 视频预测代码
# coding=UTF-8
import cv2
import threading
import os
import sys
import time
import numpy as np
sys.path.append('inference_lite_lib.armlinux.armv7hf/python/lib')
sys.path.append('inference_lite_lib.armlinux.armv7hf/cxx/lib')
from lite import *
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
font_style = ImageFont.truetype("arialuni.ttf", 25)
class Cameara:
def __init__(self, src=0):
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
(self.grabbed, self.frame) = self.stream.read()
self.stopped = False
def start(self):
threading.Thread(target=self.update, args=()).start()
def update(self):
while True:
if self.stopped:
return
(self.grabbed, self.frame) = self.stream.read()
def read(self):
return self.frame
def stop(self):
self.stopped = True
camera = Cameara("/dev/video0")
def data_process(frame, resize_w,resize_h):
origin = frame.copy()
frame = frame.resize((resize_w,resize_h), Image.BILINEAR)
resize = frame.copy()
if frame.mode != 'RGB':
frame = frame.convert('RGB')
frame = np.array(frame).astype('float32').transpose((2, 0, 1)) # HWC to CHW
frame -= 127.5
frame *= 0.007843
frame = frame[np.newaxis, :]
return origin,resize,frame
def draw_bbox_image(img, boxes, labels,scores,label_dict,color):
img_width, img_height = img.size
draw = ImageDraw.Draw(img)
for box, label,score in zip(boxes, labels,scores):
print("label:",label_dict[int(label)])
xmin, ymin, xmax, ymax = box[0], box[1], box[2], box[3]
draw.rectangle((xmin, ymin, xmax, ymax), None, color)
draw.text((xmin, ymin), '{},score={}'.format(label_dict[int(label)],score),color,font=font_style)
return img
if __name__ == "__main__":
config = MobileConfig()
config.set_model_from_file('./model_int8_opt.nb')
# 创建predictor
predictor = create_paddle_predictor(config)
camera.start()
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
label_dict={ 0:'meat', 1:'veg'}
while True:
frame = camera.read()
frame = Image.fromarray(cv2.cvtColor(frame,cv2.COLOR_BGR2RGB))
input_tensor1 = predictor.get_input(0)
height, width = 320, 320
input_tensor1.resize([1, 3, height, width])
origin,resize,img=data_process(frame,320,320)
data = img.flatten()
input_tensor1.set_float_data(data)
origin_width, origin_height = origin.size
input_tensor2 = predictor.get_input(1)
input_tensor2.resize([1, 2])
input_tensor2.set_int32_data([origin_height, origin_width])
print('Predict Start')
time_start=time.time()
predictor.run()
output_tensor = predictor.get_output(0)
print(output_tensor.float_data())
print('Time Cost{}'.format(time.time()-time_start))
print('Predict End')
if output_tensor.shape()[1] != 6:
print("No object found")
continue
bboxes=np.array(list(output_tensor.float_data()), copy = False).reshape(-1,6)
del_idx=[]
for i in range(len(bboxes)):
if bboxes[i][2]<0.8:
del_idx.append(i)
bboxes=np.delete(bboxes, del_idx, axis=0)
labels = bboxes[:, 0].astype('int32')
scores = bboxes[:, 1].astype('float32')
boxes = bboxes[:, 2:].astype('float32')
print("label:{}".format(labels))
print("score:{}".format(scores))
print("boxes:{}".format(boxes))
img=draw_bbox_image(origin, boxes, labels,scores,label_dict,color)
img = cv2.cvtColor(np.asarray(img),cv2.COLOR_RGB2BGR)
cv2.imshow('image',img)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cv2.destroyAllWindows()
附编译好的paddlelite 2.8-rc库
https://wwa.lanzous.com/ifoitm4dv9e
参考文档
https://paddle-lite.readthedocs.io/zh/latest/user_guides/source_compile.html
https://paddle-lite.readthedocs.io/zh/latest/user_guides/Compile/Linux.html
2 comments
博主您好,请问在树莓派上编译之后,运行predictor.run()会出现段错误,是什么原因呢?
你具体出现的错误是什么,可以贴出来。