多个input时,运行到rknn_run()报错Segmentation fault
Closed this issue · 14 comments
我正在往rk3588上移植lightGlue算法 https://github.com/cvg/LightGlue。
需要输入两张图片的特征点坐标和对应的特征描述子。也就是说,需要有四个输入,分别是:
kpts0.shape: (1, 300, 2) # 特征点坐标 int64
kpts1.shape: (1, 300, 2) # 特征描述子 float32
desc0.shape: (1, 300, 256) # 特征点坐标 int64
desc1.shape: (1, 300, 256) # 特征描述子 float32
我的onnx模型输入输出参数如下:
该模型使用脚本推理结果正确。
我通过脚本,将上述onnx转成rknn后,rknn模型如下:
其中转换脚本如下:
`import os
import urllib
import traceback
import time
import sys
import numpy as np
import cv2
import torch
import matplotlib
import matplotlib.pyplot as plt
from typing import List, Optional, Union
from rknn.api import RKNN
ONNX_MODEL = '../weights/superpoint_lightglue.onnx'
RKNN_MODEL = '../weights/superpoint_lightglue.rknn'
DATASET = './quantize/lightglue.txt'
TARGET_PLATFORM = 'rk3588'
QUANTIZE_ON = True # 设置为False时,表示将原始模型转成 fp16,设置为True,表示将模型进行int8量化。
num_keypoints = 300 # 256 # 设置统一的输入特征点数量
def read_image(path: str, grayscale: bool = False) -> np.ndarray:
"""Read an image from path as RGB or grayscale"""
mode = cv2.IMREAD_GRAYSCALE if grayscale else cv2.IMREAD_COLOR
image = cv2.imread(path, mode)
if image is None:
raise IOError(f"Could not read image at {path}.")
if not grayscale:
image = image[..., ::-1]
return image
def normalize_image(image: np.ndarray) -> np.ndarray:
"""Normalize the image tensor and reorder the dimensions."""
if image.ndim == 3:
image = image.transpose((2, 0, 1)) # HxWxC to CxHxW
elif image.ndim == 2:
image = image[None] # add channel axis
else:
raise ValueError(f"Not an image: {image.shape}")
return image / 255.0
def resize_image(
image: np.ndarray,
size: Union[List[int], int],
fn: str,
interp: Optional[str] = "area",
) -> np.ndarray:
"""Resize an image to a fixed size, or according to max or min edge."""
h, w = image.shape[:2]
fn = {"max": max, "min": min}[fn]
if isinstance(size, int):
scale = size / fn(h, w)
h_new, w_new = int(round(h * scale)), int(round(w * scale))
scale = (w_new / w, h_new / h)
elif isinstance(size, (tuple, list)):
h_new, w_new = size
scale = (w_new / w, h_new / h)
else:
raise ValueError(f"Incorrect new size: {size}")
mode = {
"linear": cv2.INTER_LINEAR,
"cubic": cv2.INTER_CUBIC,
"nearest": cv2.INTER_NEAREST,
"area": cv2.INTER_AREA,
}[interp]
return cv2.resize(image, (w_new, h_new), interpolation=mode), scale
def load_image(
path: str,
grayscale: bool = False,
resize: int = None,
fn: str = "max",
interp: str = "area",
):
img = read_image(path, grayscale=grayscale)
scales = [1, 1]
if resize is not None:
img, scales = resize_image(img, resize, fn=fn, interp=interp)
return normalize_image(img)[None].astype(np.float32), np.asarray(scales)
def rgb_to_grayscale(image: np.ndarray) -> np.ndarray:
"""Convert an RGB image to grayscale."""
scale = np.array([0.299, 0.587, 0.114], dtype=image.dtype).reshape(3, 1, 1)
image = (image * scale).sum(axis=-3, keepdims=True)
return image
def plot_images(imgs, titles=None, cmaps='gray', dpi=100, pad=.5,
adaptive=True):
"""Plot a set of images horizontally.
Args:
imgs: a list of NumPy or PyTorch images, RGB (H, W, 3) or mono (H, W).
titles: a list of strings, as titles for each image.
cmaps: colormaps for monochrome images.
adaptive: whether the figure size should fit the image aspect ratios.
"""
n = len(imgs)
if not isinstance(cmaps, (list, tuple)):
cmaps = [cmaps] * n
if adaptive:
ratios = [i.shape[1] / i.shape[0] for i in imgs] # W / H
else:
ratios = [4/3] * n
figsize = [sum(ratios)*4.5, 4.5]
fig, ax = plt.subplots(
1, n, figsize=figsize, dpi=dpi, gridspec_kw={'width_ratios': ratios})
if n == 1:
ax = [ax]
for i in range(n):
ax[i].imshow(imgs[i], cmap=plt.get_cmap(cmaps[i]))
ax[i].get_yaxis().set_ticks([])
ax[i].get_xaxis().set_ticks([])
ax[i].set_axis_off()
for spine in ax[i].spines.values(): # remove frame
spine.set_visible(False)
if titles:
ax[i].set_title(titles[i])
fig.tight_layout(pad=pad)
def plot_matches(kpts0, kpts1, color=None, lw=1.5, ps=4, a=1., labels=None,
axes=None):
"""Plot matches for a pair of existing images.
Args:
kpts0, kpts1: corresponding keypoints of size (N, 2).
color: color of each match, string or RGB tuple. Random if not given.
lw: width of the lines.
ps: size of the end points (no endpoint if ps=0)
indices: indices of the images to draw the matches on.
a: alpha opacity of the match lines.
"""
fig = plt.gcf()
if axes is None:
ax = fig.axes
ax0, ax1 = ax[0], ax[1]
else:
ax0, ax1 = axes
assert len(kpts0) == len(kpts1)
if color is None:
color = matplotlib.cm.hsv(np.random.rand(len(kpts0))).tolist()
elif len(color) > 0 and not isinstance(color[0], (tuple, list)):
color = [color] * len(kpts0)
if lw > 0:
for i in range(len(kpts0)):
l = matplotlib.patches.ConnectionPatch(
xyA=(kpts0[i, 0], kpts0[i, 1]), xyB=(kpts1[i, 0], kpts1[i, 1]),
coordsA=ax0.transData, coordsB=ax1.transData,
axesA=ax0, axesB=ax1,
zorder=1, color=color[i], linewidth=lw, clip_on=True,
alpha=a, label=None if labels is None else labels[i],
picker=5.0)
l.set_annotation_clip(True)
fig.add_artist(l)
# freeze the axes to prevent the transform to change
ax0.autoscale(enable=False)
ax1.autoscale(enable=False)
if ps > 0:
ax0.scatter(kpts0[:, 0], kpts0[:, 1], c=color, s=ps)
ax1.scatter(kpts1[:, 0], kpts1[:, 1], c=color, s=ps)
def normalize_keypoints(kpts: np.ndarray, h: int, w: int) -> np.ndarray:
size = np.array([w, h])
shift = size / 2
scale = size.max() / 2
kpts = (kpts - shift) / scale
return kpts.astype(np.float32)
def post_process(kpts0, kpts1, matches, scales0, scales1):
kpts0 = (kpts0 + 0.5) / scales0 - 0.5
kpts1 = (kpts1 + 0.5) / scales1 - 0.5
# create match indices
m_kpts0, m_kpts1 = kpts0[0][matches[..., 0]], kpts1[0][matches[..., 1]]
return m_kpts0, m_kpts1
def read_image(path: str, grayscale: bool = False) -> np.ndarray:
"""Read an image from path as RGB or grayscale"""
mode = cv2.IMREAD_GRAYSCALE if grayscale else cv2.IMREAD_COLOR
image = cv2.imread(path, mode)
if image is None:
raise IOError(f"Could not read image at {path}.")
if not grayscale:
image = image[..., ::-1]
return image
def load_image(
path: str,
grayscale: bool = False,
resize: int = None,
fn: str = "max",
interp: str = "area",
):
img = read_image(path, grayscale=grayscale)
scales = [1, 1]
if resize is not None:
print(">>> resize :", resize)
img, scales = resize_image(img, resize, fn=fn, interp=interp)
return normalize_image(img)[None].astype(np.float32), np.asarray(scales)
def export_rknn_inference(input_list):
# Create RKNN object
rknn = RKNN(verbose=True)
# pre-process config
print('--> Config model')
rknn.config(quantized_algorithm='normal', quantized_method='channel', target_platform=TARGET_PLATFORM)
print('done')
# Load ONNX model
print('--> Loading model')
ret = rknn.load_onnx(model=ONNX_MODEL,
# inputs=["kpts0","kpts1","desc0","desc1"],
# outputs=["results"],
# outputs=["m0", "valid0", "mscores0"],
# input_size_list=[[1, num_keypoints, 2], [1, num_keypoints, 2], [1, num_keypoints, 256], [1, num_keypoints, 256]]
)
if ret != 0:
print('Load model failed!')
exit(ret)
print('done')
# Build model
print('--> Building model')
ret = rknn.build(do_quantization=QUANTIZE_ON, dataset=DATASET, rknn_batch_size=1)
if ret != 0:
print('Build model failed!')
exit(ret)
print('done')
# # Accuracy analysis
# print('--> Accuracy analysis')
# ret = rknn.accuracy_analysis(inputs=input_list, output_dir=None)
# if ret != 0:
# print('Accuracy analysis failed!')
# exit(ret)
# print('done')
# Export RKNN model
print('--> Export rknn model')
ret = rknn.export_rknn(RKNN_MODEL)
if ret != 0:
print('Export rknn model failed!')
exit(ret)
print('done')
# Init runtime environment
print('--> Init runtime environment')
ret = rknn.init_runtime()
# ret = rknn.init_runtime(target='rk3566')
if ret != 0:
print('Init runtime environment failed!')
exit(ret)
print('done')
# Inference
print('--> Running model')
results = rknn.inference(inputs=input_list)
rknn.release()
print('done')
return results
if name == 'main':
print('This is main ...')
img = None
kpts0 = np.load('./quantize/kpts0.npy')
kpts1 = np.load('./quantize/kpts1.npy')
desc0 = np.load('./quantize/desc0.npy')
desc1 = np.load('./quantize/desc1.npy')
image0, scales0 = load_image("../assets/DSC_0410.JPG", resize = (366, 512))
image1, scales1 = load_image("../assets/DSC_0411.JPG", resize = (366, 512))
# # 对特征点坐标进行归一化
# norm_kpts0 = normalize_keypoints(kpts0, image0.shape[2], image0.shape[3]) # class 'numpy.ndarray'
# norm_kpts1 = normalize_keypoints(kpts1, image1.shape[2], image1.shape[3]) # class 'numpy.ndarray'
# norm_kpts0 = kpts0
# norm_kpts1 = kpts1
norm_kpts0 = np.expand_dims(kpts0, -1)
norm_kpts1 = np.expand_dims(kpts1, -1)
desc0 = np.expand_dims(desc0, -1)
desc1 = np.expand_dims(desc1, -1)
input_list = [norm_kpts0, norm_kpts1, desc0, desc1] # desc0 class 'numpy.ndarray', desc1 class 'numpy.ndarray'
print(">>>>>> Before export_rknn_inference!!!")
print("norm_kpts0.shape:", norm_kpts0.shape)
print("norm_kpts1.shape:", norm_kpts1.shape)
print("desc0.shape:", desc0.shape)
print("desc1.shape:", desc1.shape)
# 推理
results = export_rknn_inference(input_list)
m0, valid0, mscores0 = results[0], results[1], results[2]
m0 = m0.reshape(1, 300)
valid0 = valid0.reshape(1, 300)
mscores0 = mscores0.reshape(1, 300)
print("kpts0.shape:", kpts0.shape)
print("kpts0.shape:", kpts0.shape)
print("norm_kpts0.shape:", norm_kpts0.shape)
print("norm_kpts1.shape:", norm_kpts1.shape)
print("desc0.shape:", desc0.shape)
print("desc1.shape:", desc1.shape)
print(">>>>>>>>>> image0.shape[2]:", image0.shape[2])
print(">>>>>>>>>> image0.shape[3]:", image0.shape[3])
print("scales0 == :", scales0)
print("scales1 == :", scales1)
print("norm_kpts0.shape ==:", norm_kpts0.shape)
print("desc0 == :", desc0)
# print("mscores0 ====:", mscores0)
indices0 = np.arange(m0.shape[1]).reshape(1, m0.shape[1])
m_indices_0 = indices0[valid0]
m_indices_1 = m0[0][m_indices_0]
matches0 = np.stack([m_indices_0, m_indices_1], -1)
mscores0 = mscores0[0][m_indices_0]
m_kpts0, m_kpts1 = post_process(
kpts0, kpts1, matches0, scales0, scales1
)
orig_image0, _ = load_image("../assets/DSC_0410.JPG")
orig_image1, _ = load_image("../assets/DSC_0411.JPG")
plot_images(
[orig_image0[0].transpose(1, 2, 0), orig_image1[0].transpose(1, 2, 0)]
)
plot_matches(m_kpts0, m_kpts1, color="lime", lw=0.2)
# plt.show()
plt.savefig("./lightglue_export_rknn_inference.png")
print(">>>>>>>>>> export_rknn_inference run over!\n")`
=========================================================================
然后我开始使用c++代码编译并在rk3588板端进行推理。其中,使用的测试数据与上面的脚本使用的npy一致。
c++部分代码如下:
1、初始化函数如下:
` bool lightGlue_init(SegRgbParams ¶ms)
{
rknn_context ctx;
std::string model_path = params.model_path;
/* Create the neural network */
int model_data_size = 0;
unsigned char *model_data = load_model(model_path.c_str(), &model_data_size);
if (model_data == NULL)
{
printf("load_model fail!\n");
return -1;
}
int ret = rknn_init(&ctx, model_data, model_data_size, 0, NULL);
if (ret < 0)
{
printf("rknn_init error ret=%d\n", ret);
return -1;
}
// 释放内存
if (model_data)
{
free(model_data);
}
rknn_sdk_version version; // 查询模型的输入输出属性
ret = rknn_query(ctx, RKNN_QUERY_SDK_VERSION, &version, sizeof(rknn_sdk_version));
if (ret < 0)
{
printf("rknn_query error ret=%d\n", ret);
rknn_destroy(ctx);
return -1;
}
printf("sdk version: %s driver version: %s\n", version.api_version, version.drv_version);
// Get Model Input Output Number
rknn_input_output_num io_num; // 查询模型的输入输出属性
ret = rknn_query(ctx, RKNN_QUERY_IN_OUT_NUM, &io_num, sizeof(io_num));
if (ret < 0)
{
printf("rknn_query error ret=%d\n", ret);
rknn_destroy(ctx);
return -1;
}
printf("model input num: %d, output num: %d\n", io_num.n_input, io_num.n_output);
// Get Model Input Info
printf("input tensors:\n");
rknn_tensor_attr input_attrs[io_num.n_input];
memset(input_attrs, 0, sizeof(input_attrs));
for (int i = 0; i < io_num.n_input; i++)
{
input_attrs[i].index = i; // 查询模型的输入输出属性
ret = rknn_query(ctx, RKNN_QUERY_INPUT_ATTR, &(input_attrs[i]), sizeof(rknn_tensor_attr));
if (ret < 0)
{
printf("rknn_query error ret=%d\n", ret);
rknn_destroy(ctx);
return -1;
}
dump_tensor_attr(&(input_attrs[i]));
}
// Get Model Output Info
printf("output tensors:\n");
rknn_tensor_attr output_attrs[io_num.n_output];
memset(output_attrs, 0, sizeof(output_attrs));
for (int i = 0; i < io_num.n_output; i++)
{
output_attrs[i].index = i; // 查询模型的输入输出属性
ret = rknn_query(ctx, RKNN_QUERY_OUTPUT_ATTR, &(output_attrs[i]), sizeof(rknn_tensor_attr));
if (ret != RKNN_SUCC)
{
printf("rknn_query fail! ret=%d\n", ret);
rknn_destroy(ctx);
return -1;
}
dump_tensor_attr(&(output_attrs[i]));
}
// Set to context
app_ctx.rknn_ctx = ctx;
// 指定NPU核心数量,仅3588支持
// npu_core_mask: 0: auto, 1: npu core1, 2: npu core2, 4: npu core3,
// 3: npu core1&2, 7: npu core1&2&3
// Only RK3588 support core mask.
int core_mask = params.npu_core_mask;
rknn_set_core_mask(app_ctx.rknn_ctx, (rknn_core_mask)core_mask);
// TODO
if (output_attrs[0].qnt_type == RKNN_TENSOR_QNT_AFFINE_ASYMMETRIC && output_attrs[0].type != RKNN_TENSOR_FLOAT16)
{
app_ctx.is_quant = true;
printf("app_ctx.is_quant:", app_ctx.is_quant);
}
else
{
app_ctx.is_quant = false;
printf("app_ctx.is_quant:", app_ctx.is_quant);
}
app_ctx.io_num = io_num;
app_ctx.input_attrs = (rknn_tensor_attr *)malloc(io_num.n_input * sizeof(rknn_tensor_attr));
memcpy(app_ctx.input_attrs, input_attrs, io_num.n_input * sizeof(rknn_tensor_attr));
app_ctx.output_attrs = (rknn_tensor_attr *)malloc(io_num.n_output * sizeof(rknn_tensor_attr));
memcpy(app_ctx.output_attrs, output_attrs, io_num.n_output * sizeof(rknn_tensor_attr));
if (input_attrs[0].fmt == RKNN_TENSOR_NCHW)
{
printf("model is NCHW input fmt\n");
app_ctx.model_channel = input_attrs[0].dims[1];
// app_ctx.model_height = input_attrs[0].dims[2];
// app_ctx.model_width = input_attrs[0].dims[3];
}
else
{
printf("model is NHWC input fmt\n");
// app_ctx.model_height = input_attrs[0].dims[1];
// app_ctx.model_width = input_attrs[0].dims[2];
app_ctx.model_channel = input_attrs[0].dims[3];
std::cout << "input_attrs[0].dims[0]=" << input_attrs[0].dims[0] << std::endl;
std::cout << "input_attrs[0].dims[1]=" << input_attrs[0].dims[1] << std::endl;
std::cout << "input_attrs[0].dims[2]=" << input_attrs[0].dims[2] << std::endl;
std::cout << "input_attrs[0].dims[3]=" << input_attrs[0].dims[3] << std::endl;
}
printf("model input channel=%d\n", app_ctx.model_channel);
return 0;
}`
2、如下为读取npy文件,并转成tensor形式:
` // 读取npy文件*
std::vector shape;
std::vector kpts0_data = load_npy("testData/kpts0.npy", shape);
std::cout << "kpts0.shape: (" << shape[0] << ", " << shape[1] << ", " << shape[2] << ")\n";
std::vector kpts1_data = load_npy("testData/kpts1.npy", shape);
std::cout << "kpts1.shape: (" << shape[0] << ", " << shape[1] << ", " << shape[2] << ")\n";
std::vector desc0_data = load_npy("testData/desc0.npy", shape);
std::cout << "desc0.shape: (" << shape[0] << ", " << shape[1] << ", " << shape[2] << ")\n";
std::vector desc1_data = load_npy("testData/desc1.npy", shape);
std::cout << "desc1.shape: (" << shape[0] << ", " << shape[1] << ", " << shape[2] << ")\n";
// ****************************归一化*************************
// 加载图片并归一化
auto [image0, scales0] = load_image("testData/DSC_0410.JPG", false, cv::Size(366, 512));
auto [image1, scales1] = load_image("testData/DSC_0411.JPG", false, cv::Size(366, 512));
// 将kpts0, kpts1转换为Eigen矩阵
Eigen::MatrixXf kpts0 = Eigen::Map<Eigen::Matrix<float, Eigen::Dynamic, 2>>(kpts0_data.data(), 300, 2);
Eigen::MatrixXf kpts1 = Eigen::Map<Eigen::Matrix<float, Eigen::Dynamic, 2>>(kpts1_data.data(), 300, 2);
// 扩展为四维tensor
Eigen::Tensor<float, 4> tensor_kpts0 = expand_to_tensor(kpts0, 1);
Eigen::Tensor<float, 4> tensor_kpts1 = expand_to_tensor(kpts1, 1);
Eigen::Tensor<float, 4> tensor_desc0 = expand_to_tensor(Eigen::Map<Eigen::MatrixXf>(desc0_data.data(), 300, 256), 1);
Eigen::Tensor<float, 4> tensor_desc1 = expand_to_tensor(Eigen::Map<Eigen::MatrixXf>(desc1_data.data(), 300, 256), 1);
// 打印扩展后的维度
std::cout << "tensor_kpts0.shape: (" << tensor_kpts0.dimension(0) << ", "
<< tensor_kpts0.dimension(1) << ", "
<< tensor_kpts0.dimension(2) << ", "
<< tensor_kpts0.dimension(3) << ")\n";
std::cout << "tensor_kpts1.shape: (" << tensor_kpts1.dimension(0) << ", "
<< tensor_kpts1.dimension(1) << ", "
<< tensor_kpts1.dimension(2) << ", "
<< tensor_kpts1.dimension(3) << ")\n";
std::cout << "tensor_desc0.shape: (" << tensor_desc0.dimension(0) << ", "
<< tensor_desc0.dimension(1) << ", "
<< tensor_desc0.dimension(2) << ", "
<< tensor_desc0.dimension(3) << ")\n";
std::cout << "tensor_desc1.shape: (" << tensor_desc1.dimension(0) << ", "
<< tensor_desc1.dimension(1) << ", "
<< tensor_desc1.dimension(2) << ", "
<< tensor_desc1.dimension(3) << ")\n";
// 构造输入
std::vector<Eigen::Tensor<float, 4>> input_list = {tensor_kpts0, tensor_kpts1,
tensor_desc0, tensor_desc1};`
进入推理函数:
cv::Mat result = lightGlue(width, height, bgr_image.data, channels, input_list);
推理函数定义:
` cv::Mat Segmenter::deeplabv3_seg(uint32_t img_width, uint32_t img_height, uint8_t *data, uint64_t img_channels, std::vector<Eigen::Tensor<float, 4>> input_list)
{
int ret;
rknn_input inputs[4];
memset(inputs, 0, sizeof(inputs));
std::cout << "app_ctx.model_channel === :" << app_ctx.model_channel << std::endl;
inputs[0].index = 0;
inputs[0].type = RKNN_TENSOR_INT64;
inputs[0].size = app_ctx.model_channel * 300 * 2 * 8; // 最后乘以8是为了能扩大内存。
inputs[0].fmt = RKNN_TENSOR_NHWC;
inputs[0].pass_through = 0;
inputs[1].index = 1;
inputs[1].type = RKNN_TENSOR_INT64;
inputs[1].size = app_ctx.model_channel * 300 * 2 * 8;
inputs[1].fmt = RKNN_TENSOR_NHWC;
inputs[1].pass_through = 0;
inputs[2].index = 2;
inputs[2].type = RKNN_TENSOR_FLOAT32;
inputs[2].size = app_ctx.model_channel * 300 * 256 * 8;
inputs[2].fmt = RKNN_TENSOR_NHWC;
inputs[2].pass_through = 0;
inputs[3].index = 3;
inputs[3].type = RKNN_TENSOR_FLOAT32;
inputs[3].size = app_ctx.model_channel * 300 * 256 * 8;
inputs[3].fmt = RKNN_TENSOR_NHWC;
inputs[3].pass_through = 0;
// You may not need resize when src resulotion equals to dst resulotion
void *resize_buf = nullptr;
// ************************************************************************************* //
inputs[0].buf = static_cast<void *>(input_list[0].data());
inputs[1].buf = static_cast<void *>(input_list[1].data());
inputs[2].buf = static_cast<void *>(input_list[2].data());
inputs[3].buf = static_cast<void *>(input_list[3].data());
// ************************************************************************************* //
ret = rknn_inputs_set(app_ctx.rknn_ctx, app_ctx.io_num.n_input, inputs); // 设置输入数据
if (ret < 0)
{
printf("rknn_input_set fail! ret=%d\n", ret);
}
std::cout << "deeplabv3_seg : rknn_inputs_set !\n"
<< std::endl;
rknn_output outputs[app_ctx.io_num.n_output];
memset(outputs, 0, sizeof(outputs));
for (int i = 0; i < app_ctx.io_num.n_output; i++)
{
outputs[i].want_float = 1;
}
std::cout << "deeplabv3_seg : rknn_run start !" << std::endl;
ret = rknn_run(app_ctx.rknn_ctx, NULL);
if (ret < 0)
{
printf("ERORR: rknn_run error %d\n", ret);
rknn_destroy(app_ctx.rknn_ctx);
}
std::cout << "deeplabv3_seg : rknn_run end !" << std::endl;
// 获取推理结果数据
ret = rknn_outputs_get(app_ctx.rknn_ctx, app_ctx.io_num.n_output, outputs, NULL);
if (ret < 0)
{
printf("ERORR: rknn_outputs_get error %d\n", ret);
rknn_destroy(app_ctx.rknn_ctx);
}
}`
上述代码运行后,打印:
由于没有更多的错误信息和定位手段,同时RKNN的相关文档中,并没有关于多输入的示例代码,目前无法继续移植。请rknn的相关开发者帮忙给一些建议。
perf测速接口和anay分析接口可以正常跑吗?
噢,看到了,python可以正常推理,那么段错误很有可能是c的编码造成的
看到你的驱动版本有点低,有考虑升级一下驱动版本吗? 不过你这个很奇怪啊,你python连板推理都可以...
你不如试一下rknn lite推理,那个是用pybind使用的c的接口,先用不同版本的lite推理看看能否成功
perf测速接口和anay分析接口可以正常跑吗? 噢,看到了,python可以正常推理,那么段错误很有可能是c的编码造成的 看到你的驱动版本有点低,有考虑升级一下驱动版本吗? 不过你这个很奇怪啊,你python连板推理都可以... 你不如试一下rknn lite推理,那个是用pybind使用的c的接口,先用不同版本的lite推理看看能否成功
我的那个python不是连板子跑的。是在最新版的docker2.2.0环境里面运行的,脚本运行结果也正常的。
使用c++代码编译后,才开始在rk3588板端测试。这个时候就报错了。
您说的驱动版本低,是指我板端的 driver version: 0.7.2驱动太低了吧?我之前用superpoint在这个板子环境里面推理图片也是正常的。
我查一下rknn lite的相关资料。
另外,和您再同步一些信息:
我的四个输入中,kpts0和kpts1都是整型,表示特征点坐标。desc0和desc1都是特征描述子,是浮点数。不知道为啥转成rknn后,desc0和desc1都成了Int8类型,如下图所示:
我在后续的推理代码中,对于应该用什么类型输入有些混乱了。我之前输入图片,默认都是Int8类型的。
你设置了量化QUANTIZE_ON = True # 设置为False时,表示将原始模型转成 fp16,设置为True,表示将模型进行int8量化。
所以输入变成了int8? 我建议先不要量化试试看。我建议你转出rknn模型后,直接用rknnlite在板端推理,或者使用anay接口连板进行精度分析
你设置了量化
QUANTIZE_ON = True # 设置为False时,表示将原始模型转成 fp16,设置为True,表示将模型进行int8量化。
所以输入变成了int8? 我建议先不要量化试试看。我建议你转出rknn模型后,直接用rknnlite在板端推理,或者使用anay接口连板进行精度分析
感谢您的回复。
我关闭了量化,把QUANTIZE_ON 设置为False。
我在dokcer里面跑的精度分析结果,部分内容如下图所示(完整内容见附件
精度分析.txt
):
不量化之后的rknn模型输入类型:
另外,对于精度分析结果的解析,官方文档内容也比较少,所以我只看的cos余弦距离。
你这个精度分析是不对的,只有模拟器的输出,你需要确保连板的情况下可以正常运行精度分析
你这个精度分析是不对的,只有模拟器的输出,你需要确保连板的情况下可以正常运行精度分析
嗯嗯。我明白您的意思。只是我现在通过在docker2.2.0版本的模拟器环境中,已经可以使用Python脚本,调用rknn.inference函数,能成功推理出结果来了,可视化也正确。所以,还有必要再把python脚本与板子连接之后跑一下吗? 现在我感觉我的报错并不是精度问题。而是在翻译成c++代码时,运行ret = rknn_run(app_ctx.rknn_ctx, NULL);直接报错Segmentation fault。我无法深入定位这个问题。
所以,我现在还是先用量化的模型。
我用的python脚本在模拟器将onnx转成rknn并推理rknn模型如下:
onnx2rknn_lightglue_ZQ.txt
rknn模型可视化:
我觉得现在最大的问题应该我的c++代码问题:
初始化代码:
` bool ligthGlue_init(SegRgbParams ¶ms)
{
rknn_context ctx;
std::string model_path = params.model_path;
/* Create the neural network */
int model_data_size = 0;
unsigned char *model_data = load_model(model_path.c_str(), &model_data_size);
if (model_data == NULL)
{
printf("load_model fail!\n");
return -1;
}
int ret = rknn_init(&ctx, model_data, model_data_size, 0, NULL);
if (ret < 0)
{
printf("rknn_init error ret=%d\n", ret);
return -1;
}
// 释放内存
if (model_data)
{
free(model_data);
}
rknn_sdk_version version; // 查询模型的输入输出属性
ret = rknn_query(ctx, RKNN_QUERY_SDK_VERSION, &version, sizeof(rknn_sdk_version));
if (ret < 0)
{
printf("rknn_query error ret=%d\n", ret);
rknn_destroy(ctx);
return -1;
}
printf("sdk version: %s driver version: %s\n", version.api_version, version.drv_version);
// Get Model Input Output Number
rknn_input_output_num io_num; // 查询模型的输入输出属性
ret = rknn_query(ctx, RKNN_QUERY_IN_OUT_NUM, &io_num, sizeof(io_num));
if (ret < 0)
{
printf("rknn_query error ret=%d\n", ret);
rknn_destroy(ctx);
return -1;
}
printf("model input num: %d, output num: %d\n", io_num.n_input, io_num.n_output);
// Get Model Input Info
printf("input tensors:\n");
rknn_tensor_attr input_attrs[io_num.n_input];
memset(input_attrs, 0, sizeof(input_attrs));
for (int i = 0; i < io_num.n_input; i++)
{
input_attrs[i].index = i; // 查询模型的输入输出属性
ret = rknn_query(ctx, RKNN_QUERY_INPUT_ATTR, &(input_attrs[i]), sizeof(rknn_tensor_attr));
if (ret < 0)
{
printf("rknn_query error ret=%d\n", ret);
rknn_destroy(ctx);
return -1;
}
dump_tensor_attr(&(input_attrs[i]));
}
// Get Model Output Info
printf("output tensors:\n");
rknn_tensor_attr output_attrs[io_num.n_output];
memset(output_attrs, 0, sizeof(output_attrs));
for (int i = 0; i < io_num.n_output; i++)
{
output_attrs[i].index = i; // 查询模型的输入输出属性
ret = rknn_query(ctx, RKNN_QUERY_OUTPUT_ATTR, &(output_attrs[i]), sizeof(rknn_tensor_attr));
if (ret != RKNN_SUCC)
{
printf("rknn_query fail! ret=%d\n", ret);
rknn_destroy(ctx);
return -1;
}
dump_tensor_attr(&(output_attrs[i]));
}
// Set to context
app_ctx.rknn_ctx = ctx;
// 指定NPU核心数量,仅3588支持
// npu_core_mask: 0: auto, 1: npu core1, 2: npu core2, 4: npu core3,
// 3: npu core1&2, 7: npu core1&2&3
// Only RK3588 support core mask.
int core_mask = params.npu_core_mask;
rknn_set_core_mask(app_ctx.rknn_ctx, (rknn_core_mask)core_mask);
// TODO
if (output_attrs[0].qnt_type == RKNN_TENSOR_QNT_AFFINE_ASYMMETRIC && output_attrs[0].type != RKNN_TENSOR_FLOAT16)
{
app_ctx.is_quant = true;
std::cout << "app_ctx.is_quant:" << app_ctx.is_quant << std::endl;
}
else
{
app_ctx.is_quant = false;
std::cout << "app_ctx.is_quant:" << app_ctx.is_quant << std::endl;
}
std::cout << "\n *********************************** " << std::endl;
app_ctx.io_num = io_num;
app_ctx.input_attrs = (rknn_tensor_attr *)malloc(io_num.n_input * sizeof(rknn_tensor_attr));
memcpy(app_ctx.input_attrs, input_attrs, io_num.n_input * sizeof(rknn_tensor_attr));
app_ctx.output_attrs = (rknn_tensor_attr *)malloc(io_num.n_output * sizeof(rknn_tensor_attr));
memcpy(app_ctx.output_attrs, output_attrs, io_num.n_output * sizeof(rknn_tensor_attr));
if (input_attrs[0].fmt == RKNN_TENSOR_NCHW)
{
printf("model is NCHW input fmt\n");
// app_ctx.model_channel = input_attrs[0].dims[1];
// app_ctx.model_height = input_attrs[0].dims[2];
// app_ctx.model_width = input_attrs[0].dims[3];
app_ctx.model_channel = input_attrs[1].dims[3];
std::cout << "input_attrs[0].dims[0]=" << input_attrs[0].dims[0] << std::endl;
std::cout << "input_attrs[0].dims[1]=" << input_attrs[0].dims[1] << std::endl;
std::cout << "input_attrs[0].dims[2]=" << input_attrs[0].dims[2] << std::endl;
std::cout << "input_attrs[0].dims[3]=" << input_attrs[0].dims[3] << std::endl;
}
else
{
printf("model is NHWC input fmt\n");
// app_ctx.model_height = input_attrs[0].dims[1];
// app_ctx.model_width = input_attrs[0].dims[2];
app_ctx.model_channel = input_attrs[0].dims[3];
std::cout << "input_attrs[0].dims[0]=" << input_attrs[0].dims[0] << std::endl;
std::cout << "input_attrs[0].dims[1]=" << input_attrs[0].dims[1] << std::endl;
std::cout << "input_attrs[0].dims[2]=" << input_attrs[0].dims[2] << std::endl;
std::cout << "input_attrs[0].dims[3]=" << input_attrs[0].dims[3] << std::endl;
}
printf("model input channel=%d\n", app_ctx.model_channel);
return 0;
}
推理代码:
cv::Mat lightGlue(uint32_t img_width, uint32_t img_height, uint8_t *data, uint64_t img_channels, std::vector<Eigen::Tensor<float, 4>> input_list)
{
int ret;
rknn_input inputs[4];
memset(inputs, 0, sizeof(inputs));
std::cout << "app_ctx.model_channel === :" << app_ctx.model_channel << std::endl;
inputs[0].index = 0;
inputs[0].type = RKNN_TENSOR_INT64;
inputs[0].size = app_ctx.model_channel * 300 * 2 * 8;
inputs[0].fmt = RKNN_TENSOR_NHWC;
inputs[0].pass_through = 0;
inputs[1].index = 1;
inputs[1].type = RKNN_TENSOR_INT64;
inputs[1].size = app_ctx.model_channel * 300 * 2 * 8;
inputs[1].fmt = RKNN_TENSOR_NHWC;
inputs[1].pass_through = 0;
inputs[2].index = 2;
inputs[2].type = RKNN_TENSOR_INT8;
inputs[2].size = app_ctx.model_channel * 300 * 256;
inputs[2].fmt = RKNN_TENSOR_NHWC;
inputs[2].pass_through = 0;
inputs[3].index = 3;
inputs[3].type = RKNN_TENSOR_INT8;
inputs[3].size = app_ctx.model_channel * 300 * 256;
inputs[3].fmt = RKNN_TENSOR_NHWC;
inputs[3].pass_through = 0;
// You may not need resize when src resulotion equals to dst resulotion
void *resize_buf = nullptr;
// ************************************************************************************* //
cv::Mat image = cv::Mat(img_height, img_width, CV_8UC1, data);
inputs[0].buf = static_cast<void *>(input_list[0].data());
inputs[1].buf = static_cast<void *>(input_list[1].data());
inputs[2].buf = static_cast<void *>(input_list[2].data());
inputs[3].buf = static_cast<void *>(input_list[3].data());
// ************************************************************************************* //
ret = rknn_inputs_set(app_ctx.rknn_ctx, app_ctx.io_num.n_input, inputs); // 设置输入数据
if (ret < 0)
{
printf("rknn_input_set fail! ret=%d\n", ret);
}
std::cout << "deeplabv3_seg : rknn_inputs_set !\n"
<< std::endl;
rknn_output outputs[app_ctx.io_num.n_output];
memset(outputs, 0, sizeof(outputs));
for (int i = 0; i < app_ctx.io_num.n_output; i++)
{
outputs[i].want_float = 1;
}
std::cout << "deeplabv3_seg : rknn_run start !" << std::endl;
ret = rknn_run(app_ctx.rknn_ctx, NULL);
if (ret < 0)
{
printf("ERORR: rknn_run error %d\n", ret);
rknn_destroy(app_ctx.rknn_ctx);
}
std::cout << "deeplabv3_seg : rknn_run end !" << std::endl;
// 获取推理结果数据
ret = rknn_outputs_get(app_ctx.rknn_ctx, app_ctx.io_num.n_output, outputs, NULL);
if (ret < 0)
{
printf("ERORR: rknn_outputs_get error %d\n", ret);
rknn_destroy(app_ctx.rknn_ctx);
}
// 后处理部分
float *semi_buf = (float *)outputs[0].buf; // 特征点
float *coarse_desc_buf = (float *)outputs[1].buf; // 特征描述子
// 其他部分代码省略。
}`
其中我通过如下代码将四个npy文件读取进去进行测试:
`
// 读取npy文件*
std::vector shape;
std::vector kpts0_data = load_npy("testData/kpts0.npy", shape);
std::cout << "kpts0.shape: (" << shape[0] << ", " << shape[1] << ", " << shape[2] << ")\n";
std::vector kpts1_data = load_npy("testData/kpts1.npy", shape);
std::cout << "kpts1.shape: (" << shape[0] << ", " << shape[1] << ", " << shape[2] << ")\n";
std::vector desc0_data = load_npy("testData/desc0.npy", shape);
std::cout << "desc0.shape: (" << shape[0] << ", " << shape[1] << ", " << shape[2] << ")\n";
std::vector desc1_data = load_npy("testData/desc1.npy", shape);
std::cout << "desc1.shape: (" << shape[0] << ", " << shape[1] << ", " << shape[2] << ")\n";
// ****************************归一化*************************
// 加载图片并归一化
auto [image0, scales0] = load_image("testData/DSC_0410.JPG", false, cv::Size(366, 512));
auto [image1, scales1] = load_image("testData/DSC_0411.JPG", false, cv::Size(366, 512));
// 将kpts0, kpts1转换为Eigen矩阵
Eigen::MatrixXf kpts0 = Eigen::Map<Eigen::Matrix<float, Eigen::Dynamic, 2>>(kpts0_data.data(), 300, 2);
Eigen::MatrixXf kpts1 = Eigen::Map<Eigen::Matrix<float, Eigen::Dynamic, 2>>(kpts1_data.data(), 300, 2);
// 扩展为四维tensor
Eigen::Tensor<float, 4> tensor_kpts0 = expand_to_tensor(kpts0, 1);
Eigen::Tensor<float, 4> tensor_kpts1 = expand_to_tensor(kpts1, 1);
Eigen::Tensor<float, 4> tensor_desc0 = expand_to_tensor(Eigen::Map<Eigen::MatrixXf>(desc0_data.data(), 300, 256), 1);
Eigen::Tensor<float, 4> tensor_desc1 = expand_to_tensor(Eigen::Map<Eigen::MatrixXf>(desc1_data.data(), 300, 256), 1);
// 打印扩展后的维度
std::cout << "tensor_kpts0.shape: (" << tensor_kpts0.dimension(0) << ", "
<< tensor_kpts0.dimension(1) << ", "
<< tensor_kpts0.dimension(2) << ", "
<< tensor_kpts0.dimension(3) << ")\n";
std::cout << "tensor_kpts1.shape: (" << tensor_kpts1.dimension(0) << ", "
<< tensor_kpts1.dimension(1) << ", "
<< tensor_kpts1.dimension(2) << ", "
<< tensor_kpts1.dimension(3) << ")\n";
std::cout << "tensor_desc0.shape: (" << tensor_desc0.dimension(0) << ", "
<< tensor_desc0.dimension(1) << ", "
<< tensor_desc0.dimension(2) << ", "
<< tensor_desc0.dimension(3) << ")\n";
std::cout << "tensor_desc1.shape: (" << tensor_desc1.dimension(0) << ", "
<< tensor_desc1.dimension(1) << ", "
<< tensor_desc1.dimension(2) << ", "
<< tensor_desc1.dimension(3) << ")\n";
// 构造输入
std::vector<Eigen::Tensor<float, 4>> input_list = {tensor_kpts0, tensor_kpts1,
tensor_desc0, tensor_desc1};
// **************************************************************
cv::Mat mask = lightGlue(width, height, bgr_image.data, channels, input_list);`
上述代码在rknn板端推理时打印结果:
通过打印发现,代码在 ret = rknn_run(app_ctx.rknn_ctx, NULL); 中断了。
所以,还是麻烦您从c++代码的API调用规范方面帮忙看看吧。感谢!
我知道是你c++的问题,但是我不能确保是你的输入问题还是rk的问题,所以让你连板推理一下,连板推理是你用python语言进行推理,很难把输入搞错;如果连板推理也报错,说明是rk的问题,如果连板推理没报错,是你代码前面哪里有问题;你定位到rk的这一行函数段错误了,那我还能看出来什么?你不如直接提交工单了
模拟器是模拟器,他和端侧实际输出是不一样的,我说了,连板推理不管是rknntoolkit还是rknnlite,全都是调的c的接口,就先不用你自己调了
模拟器是模拟器,他和端侧实际输出是不一样的,我说了,连板推理不管是rknntoolkit还是rknnlite,全都是调的c的接口,就先不用你自己调了
感谢您回复。我在RK3588板端安装了rknn_toolkit_lite2-2.2.0-cp38-cp38-linux_aarch64.whl。编写了推理脚本。现在发现在板端推理也报错Segmentation fault。是否一定要升级一下驱动版本?虽然打印信息里面只是个建议。另外,虽然我这个算法可以使用动态输入,但是为了避免出错,我都使用的是固定维度的。
在板端运行脚本后打印信息如下:
这是否可以说明我的板端使用的RK有问题了呢?
我在板子上直接运行的python脚本如下,接口比较简单:
`import cv2
import numpy as np
from rknnlite.api import RKNNLite
rknn_model = './model/RK3588/superpoint_lightglue.rknn'
if name == 'main':
kpts0 = np.load('./testData/kpts0.npy')
kpts1 = np.load('./testData/kpts1.npy')
desc0 = np.load('./testData/desc0.npy')
desc1 = np.load('./testData/desc1.npy')
expand_kpts0 = np.expand_dims(kpts0, 0)
expand_kpts1 = np.expand_dims(kpts1, 0)
desc0 = np.expand_dims(desc0, 0)
desc1 = np.expand_dims(desc1, 0)
print(">>>>>> Before export_rknn_inference!!!")
print("expand_kpts0.shape:", expand_kpts0.shape) # (1, 1, 300, 2)
print("expand_kpts1.shape:", expand_kpts1.shape) # (1, 1, 300, 2)
print("desc0.shape:", desc0.shape) # (1, 1, 300, 256)
print("desc1.shape:", desc1.shape) # (1, 1, 300, 256)
rknn_lite = RKNNLite()
# Load RKNN model
print(f'--> Load RKNN model: {rknn_model}')
ret = rknn_lite.load_rknn(rknn_model)
if ret != 0:
print('Load RKNN model failed')
exit(ret)
print('done')
# Init runtime environment
print('--> Init runtime environment')
# Run on RK356x / RK3576 / RK3588 with Debian OS, do not need specify target.
# For RK3576 / RK3588, specify which NPU core the model runs on through the core_mask parameter.
ret = rknn_lite.init_runtime(core_mask=RKNNLite.NPU_CORE_0)
if ret != 0:
print('Init runtime environment failed')
exit(ret)
print('done')
input_list = [expand_kpts0, expand_kpts1, desc0, desc1] # desc0 class 'numpy.ndarray', desc1 class 'numpy.ndarray'
# Inference
print('--> Running model')
outputs = rknn_lite.inference(inputs=input_list)
print('rknn_lite.inference run done!')`
lite都推理报错,所以和你自己设置的输入大概率也没关系,估计升驱动也不太行,可以试试;也可以用其他版本的lite进行推理,如果都不行,可以提工单或者逐层在原始网络里排查.很繁琐
@liuqinglong110 您好,想问下您前面将lightglue.onnx模型转rknn过程中,开启量化时,量化数据是如何准备的?因为模型输入是两个特征点(1,300,256)和两个描述子(1,300,256),这块应该用啥图片?
还有第二个问题,动态shape您试过吗?我之前也是多输入的推理正常,使用动态输入,转rknn模型也正常,但在板端推理时,报错。具体问题:#150
@liuqinglong110 您好,想问下您前面将lightglue.onnx模型转rknn过程中,开启量化时,量化数据是如何准备的?因为模型输入是两个特征点(1,300,256)和两个描述子(1,300,256),这块应该用啥图片?
还有第二个问题,动态shape您试过吗?我之前也是多输入的推理正常,使用动态输入,转rknn模型也正常,但在板端推理时,报错。具体问题:#150
您好,关于第一个问题:我只是用superpont算法输出的一对图片的特征点和特征描述子都分别保存下来,作为量化数据输入,这四个文件名称都放在txt文件的同一行。
第二个问题:我曾经尝试过动态输入,但是担心出错,所以都改成了固定输入。
最近我在做实验的时候,发现,我在板子上报错,是因为算法中还是有不支持的算子导致的。这些算子本来在最新的rknn2.2支持,旧版本的会警告。所以我拿出来了那些不支持的层,就可以了。
lite都推理报错,所以和你自己设置的输入大概率也没关系,估计升驱动也不太行,可以试试;也可以用其他版本的lite进行推理,如果都不行,可以提工单或者逐层在原始网络里排查.很繁琐
感谢您的热心回答。我昨天做了一些对比实验,在rknn官方不同版本的docker环境用测试。其中1.6版本中转换成rknn模型时,提示torch.topK算子不支持。但是在2.2版本转换模型时是支持的。并且模拟器推理结果正确。后来我就是带着torch.topK算子在板端推理的,然后就是那个段错误。现在我把torch.topK等不支持的算子移到模型外面,可以正常推理了。torch.topK只能自己手动实现了。
@liuqinglong110 您好,想问下您前面将lightglue.onnx模型转rknn过程中,开启量化时,量化数据是如何准备的?因为模型输入是两个特征点(1,300,256)和两个描述子(1,300,256),这块应该用啥图片?
还有第二个问题,动态shape您试过吗?我之前也是多输入的推理正常,使用动态输入,转rknn模型也正常,但在板端推理时,报错。具体问题:#150您好,关于第一个问题:我只是用superpont算法输出的一对图片的特征点和特征描述子都分别保存下来,作为量化数据输入,这四个文件名称都放在txt文件的同一行。 第二个问题:我曾经尝试过动态输入,但是担心出错,所以都改成了固定输入。 最近我在做实验的时候,发现,我在板子上报错,是因为算法中还是有不支持的算子导致的。这些算子本来在最新的rknn2.2支持,旧版本的会警告。所以我拿出来了那些不支持的层,就可以了。
好的,我也尝试下,多谢您的热心回复!