diff --git a/Mindspore.AI/entry/src/main/cpp/include/mslite_data_type.h b/Mindspore.AI/entry/src/main/cpp/include/mslite_data_type.h index fc8d955025b67b165d0fab21140396f60084872d..7177fa544e15e507461c0de9c2b5a79810fc010d 100644 --- a/Mindspore.AI/entry/src/main/cpp/include/mslite_data_type.h +++ b/Mindspore.AI/entry/src/main/cpp/include/mslite_data_type.h @@ -44,7 +44,6 @@ struct LetterBox }; struct PicDesc { - LetterBox letterBox {}; size_t width = 0; size_t height = 0; size_t dataSize = 0; @@ -62,7 +61,6 @@ struct ObjectDesc { struct InferResult { ObjectDesc objects[OBJ_NUMB_MAX_SIZE] = {}; int count = -1; - int id = -1; }; #endif // MS_AI_DATA_TYPE \ No newline at end of file diff --git a/Mindspore.AI/entry/src/main/cpp/include/napi_utils.h b/Mindspore.AI/entry/src/main/cpp/include/napi_utils.h index b57ec6a944e7ecae7f47b396fc0398892f1e6db8..bc672bb8488742d4dd03f34c2c9ded9b8e4fab6c 100644 --- a/Mindspore.AI/entry/src/main/cpp/include/napi_utils.h +++ b/Mindspore.AI/entry/src/main/cpp/include/napi_utils.h @@ -65,10 +65,15 @@ struct MsAIAsyncContext { napi_ref callbackRef = nullptr; AsyncErrorCode status = UNKNOW; - std::string data = ""; + + /* input data */ + std::vector data; int32_t modelId = -1; - InferResult inferResult {}; + int32_t picNo = 0; PicDesc picDesc {}; + + /* output data */ + InferResult inferResult{}; }; bool GetNapiInt32(const napi_env &env, const int32_t value, napi_value &result); diff --git a/Mindspore.AI/entry/src/main/cpp/src/mslite_napi.cpp b/Mindspore.AI/entry/src/main/cpp/src/mslite_napi.cpp index bd1d84b9c51f904c6be9984ef48735c67292b4ce..a71746a2465738a522540556b88ccc7ad82b637e 100644 --- a/Mindspore.AI/entry/src/main/cpp/src/mslite_napi.cpp +++ b/Mindspore.AI/entry/src/main/cpp/src/mslite_napi.cpp @@ -24,33 +24,30 @@ #include #include #include +#include #include "mslite_log.h" #include "mslite_errors.h" #include -#include #include -#include -#include #include "napi_utils.h" #include - -#define MAX_MODEL_PATH_LENGTH 256 +#include using namespace cv; using namespace OHOS::MSAI; +using namespace std; OH_AI_ModelHandle modelms = nullptr; -static float threshold_class = 0.25; -static int output_dimension = 85; -int async_creat = 0; - -///////////////////////// -static cv::Mat src; +static float threshold_class = 0.45; +constexpr int output_dimension = 85; -std::vector pad; -static cv::Mat image; +struct ObjDetectMSOutputData { + size_t elementNum; + vector data; +}; +using PSMSOutputData = shared_ptr; -using namespace std; +static mutex g_predictMux; struct box { float x; @@ -103,7 +100,7 @@ struct Bbox { int classes; }; -//#define MODEL_DATA_TYPE_UINT8 +#define MODEL_DATA_TYPE_UINT8 #ifdef MODEL_DATA_TYPE_UINT8 #define CONV_OUTPUT_40_ZPOINT 173 @@ -156,7 +153,7 @@ static vector nms(std::vector &boxes, float threshold) { return resluts; } -static cv::Mat letterbox(cv::Mat &src, int h, int w, std::vector &pad) { +static cv::Mat letterbox(cv::Mat &src, int h, int w, vector &pad) { int in_w = src.cols; // width int in_h = src.rows; // height @@ -188,7 +185,7 @@ static cv::Mat letterbox(cv::Mat &src, int h, int w, std::vector &pad) { return resize_img; } -static box scale_box(box boxes, std::vector &pad) { +static box scale_box(box boxes, vector &pad) { box scaled_box; float r = pad[2]; @@ -207,12 +204,16 @@ static void flatten(void *x, int size, int layers, int batch, int forward) { int8_t *x_p = (int8_t *)x; int8_t *swap = (int8_t *)calloc(size * layers * batch, sizeof(int8_t)); #endif - int i, c, b; + if (swap == nullptr) { + LOGE("flatten: malloc failed"); + return; + } + int i, c, b, i1, i2; for (b = 0; b < batch; ++b) { for (c = 0; c < layers; ++c) { for (i = 0; i < size; ++i) { - int i1 = b * layers * size + c * size + i; - int i2 = b * layers * size + i * layers + c; + i1 = b * layers * size + c * size + i; + i2 = b * layers * size + i * layers + c; if (forward) swap[i2] = x_p[i1]; else @@ -223,77 +224,7 @@ static void flatten(void *x, int size, int layers, int batch, int forward) { memcpy(x, swap, size * layers * batch * sizeof(int8_t)); free(swap); } -#if 0 -static void flatten(int8_t *x, int size, int layers, int batch, int forward) { - int8_t *swap = (int8_t *)calloc(size * layers * batch, sizeof(int8_t)); - int i, c, b; - for (b = 0; b < batch; ++b) { - for (c = 0; c < layers; ++c) { - for (i = 0; i < size; ++i) { - int i1 = b * layers * size + c * size + i; - int i2 = b * layers * size + i * layers + c; - if (forward) - swap[i2] = x[i1]; - else - swap[i1] = x[i2]; - } - } - } - memcpy(x, swap, size * layers * batch * sizeof(int8_t)); - free(swap); -} - -static void flatten(float *x, int size, int layers, int batch, int forward) { - float *swap = (float *)calloc(size * layers * batch, sizeof(float)); - int i, c, b; - for (b = 0; b < batch; ++b) { - for (c = 0; c < layers; ++c) { - for (i = 0; i < size; ++i) { - int i1 = b * layers * size + c * size + i; - int i2 = b * layers * size + i * layers + c; - if (forward) - swap[i2] = x[i1]; - else - swap[i1] = x[i2]; - } - } - } - memcpy(x, swap, size * layers * batch * sizeof(float)); - free(swap); -} - -static box get_region_box(float *x, float *biases, int n, int index, int i, int j, int w, int h, int stride) { - float xx = (x[index + 0]); - float xy = (x[index + 1]); - float xw = (x[index + 2]); - float xh = (x[index + 3]); - - box b; - b.x = (i + xx * 2 - 0.5) * stride; - b.y = (j + xy * 2 - 0.5) * stride; - b.w = (xw * 2) * (xw * 2) * biases[2 * n]; - b.h = (xh * 2) * (xh * 2) * biases[2 * n + 1]; - - return b; -} -box get_region_box(float *x, int index) { - float xx = (x[index + 0]); - float xy = (x[index + 1]); - float xw = (x[index + 2]); - float xh = (x[index + 3]); - - box b; - - b.x = xx; - b.y = xy; - b.w = xw; - b.h = xh; - - return b; -} -#endif - static box get_region_box(void *x_in, float *biases, int n, int index, int i, int j, int w, int h, int stride) { #ifdef MODEL_DATA_TYPE_UINT8 uint8_t *x = (uint8_t *)x_in; @@ -324,74 +255,9 @@ static box get_region_box(void *x_in, float *biases, int n, int index, int i, in b.h = (xh * 2) * (xh * 2) * biases[2 * n + 1]; return b; } - -#if 0 -static int yolo_v5_post_process_onescale(float *predictions, float *biases, float threshold_in, int stride, - vector &input) { - int i, j; - int num_class = output_dimension - 5; - int coords = 4; - int bb_size = coords + num_class + 1; - - float threshold = threshold_in; - - int nn_width = 640; - int nn_height = 640; - - int num_box = 3; - int modelWidth = nn_width / stride; - int modelHeight = nn_height / stride; - - for (i = 0; i < modelWidth * modelHeight; ++i) { - int row = i / modelWidth; - int col = i % modelWidth; - int n = 0; - for (n = 0; n < num_box; ++n) { - int index = i * num_box + n; - int p_index = index * bb_size + 4; - float scale = predictions[p_index]; - int box_index = index * bb_size; - int class_index = 0; - class_index = index * bb_size + 5; - - if (scale > threshold) { - int max_class = 0; - float probs[num_class]; - for (j = 0; j < num_class; ++j) { - float prob = scale * predictions[class_index + j]; - probs[j] = prob; - if (probs[j] > probs[max_class]) { - max_class = j; - } - } - - if (probs[max_class] > threshold) { - box getbox = - get_region_box(predictions, biases, n, box_index, col, row, modelWidth, modelHeight, stride); - - float l = getbox.x - getbox.w / 2; - float t = getbox.y - getbox.h / 2; - float r = getbox.x + getbox.w / 2; - float d = getbox.y + getbox.h / 2; - //LOGI("l:%f, t:%f, r:%f, d:%f\n", l, t, r, d); - - box scaled_box = scale_box(getbox, pad); - l = scaled_box.x - scaled_box.w / 2; - t = scaled_box.y - scaled_box.h / 2; - r = scaled_box.x + scaled_box.w / 2; - d = scaled_box.y + scaled_box.h / 2; - input.push_back({(int)l, (int)t, (int)r, (int)d, probs[max_class], max_class}); - } - } - } - } - - return 0; -} -#endif - + static int yolo_v5_post_process_onescale(void *predictions_in, float *biases, float threshold_in, int stride, - vector &input) + vector &pad, vector &input) { #ifdef MODEL_DATA_TYPE_UINT8 uint8_t *predictions = (uint8_t *)predictions_in; @@ -496,6 +362,8 @@ static void *ReadModelFile(NativeResourceManager *nativeResourceManager, const s void *modelBuffer = malloc(fileSize); if (modelBuffer == nullptr) { LOGE("Get model file size failed"); + OH_ResourceManager_CloseRawFile(rawFile); + return nullptr; } int ret = OH_ResourceManager_ReadRawFile(rawFile, modelBuffer, fileSize); if (ret == 0) { @@ -509,7 +377,7 @@ static void *ReadModelFile(NativeResourceManager *nativeResourceManager, const s } // 创建上下文,设置线程数、设备类型等参数,并加载模型。 static void DestroyModelBuffer(void **buffer) { - if (buffer == nullptr) { + if ((buffer == nullptr) || (*buffer == nullptr)){ return; } free(*buffer); @@ -521,7 +389,6 @@ static OH_AI_ModelHandle CreateMSLiteModel(void *modelBuffer, size_t modelSize) auto context = OH_AI_ContextCreate(); LOGI("Creat Context.\n"); if (context == nullptr) { - DestroyModelBuffer(&modelBuffer); LOGE("Create MSLite context failed.\n"); return nullptr; } @@ -534,14 +401,12 @@ static OH_AI_ModelHandle CreateMSLiteModel(void *modelBuffer, size_t modelSize) // 加载.ms模型文件 auto model = OH_AI_ModelCreate(); if (model == nullptr) { - DestroyModelBuffer(&modelBuffer); LOGE("Allocate MSLite Model failed.\n"); return nullptr; } auto build_ret = OH_AI_ModelBuild(model, modelBuffer, modelSize, OH_AI_MODELTYPE_MINDIR, context); LOGI("Loader msfile.\n"); - DestroyModelBuffer(&modelBuffer); if (build_ret != OH_AI_STATUS_SUCCESS) { OH_AI_ModelDestroy(&model); LOGE("Build MSLite model failed.\n"); @@ -551,6 +416,8 @@ static OH_AI_ModelHandle CreateMSLiteModel(void *modelBuffer, size_t modelSize) return model; } +constexpr int copyChannelDataLen = 640 * 640 * sizeof(uint8_t); + #define GET_PARAMS(env, info, num) \ size_t argc = num; \ napi_value argv[num] = {nullptr}; \ @@ -558,105 +425,115 @@ static OH_AI_ModelHandle CreateMSLiteModel(void *modelBuffer, size_t modelSize) void *data = nullptr; \ napi_get_cb_info(env, info, &argc, argv, &thisVar, &data) -constexpr int kNumPrintOfOutData = 10; -constexpr int RANDOM_RANGE = 128; -static void FillTensorWithRandom(OH_AI_TensorHandle msTensor) { - auto size = OH_AI_TensorGetDataSize(msTensor); - int8_t *data = (int8_t *)OH_AI_TensorGetMutableData(msTensor); - // letterbox - LOGI("FillTensorWithRandom call letterbox, tensor size %u", size); - image = letterbox(src, 640, 640, pad); +static int ObjectDetectPreprocess(PicDesc &picDesc, std::vector &picData, + std::vector& inputData, std::vector& imagePad, size_t picNo) +{ + LOGI("ObjectDetectPreprocess: in picno=%d", picNo); + if (picData.empty()) { + LOGE("ObjectDetectPreprocess: input pic data is empty."); + return -1; + } + + cv::Mat rgbImage; + cv::Mat yuv(picDesc.width * 3 / 2, picDesc.height, CV_8UC1); + yuv.data = (unsigned char *)(picData.data()); + cv::cvtColor(yuv, rgbImage, COLOR_YUV420sp2BGR); + if (rgbImage.data == nullptr || rgbImage.channels() != 3 || rgbImage.rows == 0 || rgbImage.cols == 0) { + LOGE("cvtColor failed."); + return -1; + } + LOGI("ObjectDetectPreprocess: convert to bgr picno=%d", picNo); + + cv::Mat image = letterbox(rgbImage, 640, 640, imagePad); #ifndef MODEL_DATA_TYPE_UINT8 /* 将uint8数据[0,255]归一化到int8[-128,127] */ image.convertTo(image, CV_8S, 255.0 / 255.0, -128.0); #endif - //LOGI("FillTensorWithRandom letterbox start set input data, rows %d cols %d", image.rows, image.cols); /* 图像格式为HWC,需要转换为模型需要的CHW格式 */ vector img_channel(3); - cv::split(image, img_channel); - int index = 0; - int channel_len = 640 * 640; - for(int i = 0; i < 3; i++) { - memcpy((data + index), img_channel[i].data, channel_len * sizeof(int8_t)); - index += channel_len; - } -} + cv::split(image, img_channel); + LOGI("ObjectDetectPreprocess: image convert to letterbox picno=%d", picNo); + + inputData.resize(copyChannelDataLen * 3); + uint8_t *data = inputData.data(); + std::copy(img_channel[0].data, img_channel[0].data + copyChannelDataLen, data); + std::copy(img_channel[1].data, img_channel[1].data + copyChannelDataLen, data + copyChannelDataLen); + std::copy(img_channel[2].data, img_channel[2].data + copyChannelDataLen, data + copyChannelDataLen *2); -// fill data to inputs tensor -static int FillInputTensors(OH_AI_TensorHandleArray &inputs) { - for (size_t i = 0; i < inputs.handle_num; i++) { - FillTensorWithRandom(inputs.handle_list[i]); - } - return OH_AI_STATUS_SUCCESS; + LOGI("ObjectDetectPreprocess: convert to NCHW picno=%d", picNo); + return 0; } + +constexpr size_t output40 = 40 * 40 * output_dimension * 3; +constexpr size_t output20 = 20 * 20 * output_dimension * 3; + +static int32_t ObjectDetectGetMSOutput(OH_AI_TensorHandleArray &tensorOutputs, vector &msOutput) +{ + void *tensorData = nullptr; + int max_num; + int dataSize; -//模型推理并获取输出数据。 -static void RunMSLiteModel(OH_AI_ModelHandle model, InferResult& inferResult) { - LOGI("Run RunMSLiteModel...................\n"); + for (size_t i = 0; i < tensorOutputs.handle_num; i++) { + auto tensor = tensorOutputs.handle_list[i]; + LOGI("- Tensor %{public}d name is: %{public}s.\n", static_cast(i), OH_AI_TensorGetName(tensor)); + + max_num = (int)OH_AI_TensorGetElementNum(tensor); + // 不关注80*80的小目标 + if ((max_num != output40) && (max_num != output20)) { + continue; + } + auto output = make_shared(); - float iou_threshold = 0.45; - float biases[18] = {10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326}; - vector input; - vector res; + dataSize = (int)OH_AI_TensorGetDataSize(tensor); + tensorData = const_cast(OH_AI_TensorGetData(tensor)); + LOGI("- Tensor %{public}d size is: %{public}d.\n", static_cast(i), dataSize); - LOGI("==========================all predict start=========\n"); - LOGI("==========================predict seg 0 =========\n"); - // 设置模型输入数据 - auto inputs = OH_AI_ModelGetInputs(model); - FillInputTensors(inputs); - LOGI("==========================predict seg 1 =========\n"); - - auto outputs = OH_AI_ModelGetOutputs(model); - // 执行推理并打印输出 - auto predict_ret = OH_AI_ModelPredict(model, inputs, &outputs, nullptr, nullptr); - if (predict_ret != OH_AI_STATUS_SUCCESS) { - OH_AI_ModelDestroy(&model); - LOGE("Predict MSLite model error.\n"); - return; + output->elementNum = max_num; + output->data.resize(dataSize); + std::copy((uint8_t *)tensorData, (uint8_t *)tensorData + dataSize, output->data.begin()); + msOutput.emplace_back(output); } - //LOGI("Run MSLite model success.\n"); - LOGI("==========================predict seg 2 =========\n"); - - //LOGI("Get model outputs:\n"); - for (size_t i = 0; i < outputs.handle_num; i++) { - auto tensor = outputs.handle_list[i]; - //LOGI("- Tensor %{public}d name is: %{public}s.\n", static_cast(i), OH_AI_TensorGetName(tensor)); - //LOGI("- Tensor %{public}d size is: %{public}d.\n", static_cast(i), (int)OH_AI_TensorGetDataSize(tensor)); - void *out_data = const_cast(OH_AI_TensorGetData(tensor)); - std::cout << "Output data is:"; - int max_num = (int)OH_AI_TensorGetElementNum(tensor); - //LOGI("**********max numbers is [%d]*********.\n", max_num); - - if (max_num == 40 * 40 * output_dimension * 3) { - flatten(out_data, 40 * 40, output_dimension * 3, 1, 1); - yolo_v5_post_process_onescale(out_data, &biases[6], threshold_class, 16, input); - } - if (max_num == 20 * 20 * output_dimension * 3) { - flatten(out_data, 20 * 20, output_dimension * 3, 1, 1); - yolo_v5_post_process_onescale(out_data, &biases[12], threshold_class, 32, input); - } - } - //LOGI("================run threshold_class:%f", threshold_class); - LOGI("==========================predict seg 3 =========\n"); - inferResult.id = 0; - res = nms(input, iou_threshold); - for (int i = 0; i < res.size(); i++) { -// LOGI("result 00000%s :%d %d %d %d %f\n", classes[res[i].classes].c_str(), res[i].x, res[i].y, res[i].w, res[i].h, -// res[i].score); - inferResult.objects[i].left = res[i].x; - inferResult.objects[i].top = res[i].y; - inferResult.objects[i].right = res[i].w; - inferResult.objects[i].bottom = res[i].h; - inferResult.objects[i].name = classes[res[i].classes]; - inferResult.objects[i].prop = res[i].score; + return 0; +} + +static int32_t ObjectDetectPredict(OH_AI_ModelHandle model, vector &inputData, + vector &msOutput, int picNo) +{ + // mindspore lite每个模型编译后的model不支持多个推理并行执行, + // 这里需要用互斥锁保证推理串行执行,也包括input和output + std::lock_guard mslock(g_predictMux); + LOGI("ObjectDetectPredict: start inference picNo=%d", picNo); + + // 设置模型输入数据 + auto inputTensor = OH_AI_ModelGetInputs(model); + if (inputTensor.handle_num != 1) { + LOGE("input tensor number is %d", inputTensor.handle_num); + return -1; + } + + size_t inputSize = inputData.size() * sizeof(uint8_t); + auto tensorSize = OH_AI_TensorGetDataSize(inputTensor.handle_list[0]); + if (tensorSize < inputSize) { + LOGE("tensor data buffer isn't enough tensor_size=%d, input=%d", tensorSize, inputSize); + return -1; + } + // 当前模型只有1个input tensor + uint8_t *data = (uint8_t *)OH_AI_TensorGetMutableData(inputTensor.handle_list[0]); + memcpy(data, inputData.data(), tensorSize); + + OH_AI_TensorHandleArray tensorOutputs = OH_AI_ModelGetOutputs(model); + + OH_AI_Status status = OH_AI_ModelPredict(model, inputTensor, &tensorOutputs, nullptr, nullptr); + if (status != OH_AI_STATUS_SUCCESS) { + LOGE("Predict MSLite model error.\n"); + return -1; } - inferResult.count = res.size(); - LOGI("==========================predict seg 4 =========\n"); - LOGI("==========================all predict end=========\n"); + // 先将output数据拷贝到msOutput中,后处理在其他线程中处理,这里尽快返回可以进行下一张图片的推理 + return ObjectDetectGetMSOutput(tensorOutputs, msOutput); } namespace { @@ -728,25 +605,6 @@ int32_t ParsePicDesc(napi_env env, napi_value args, PicDescNapi &picDescNapi) { return RETCODE_SUCCESS; } -static cv::Mat PreProcess(PicDesc &picDesc, std::string &picData, cv::Mat &frame) { - - if (picData.empty()) { - LOGE("PreProcess: input pic data is empty."); - return frame; - } - - cv::Mat yuv(picDesc.width * 3 / 2, picDesc.height, CV_8UC1); - yuv.data = (unsigned char*)(picData.data()); - cv::cvtColor(yuv, frame, COLOR_YUV420sp2BGR); - LOGI("yuv trans to bgr"); - if (frame.data == nullptr || frame.channels() != 3 || frame.rows == 0 || frame.cols == 0) { - LOGE("PreProcess failed."); - return frame; - } - - return frame; -} - napi_status SetInferResult(napi_env env, napi_value result, InferResult& inferResult) { napi_value objects = nullptr; napi_create_array_with_length(env, inferResult.count, &objects); @@ -810,29 +668,91 @@ napi_status SetInferResult(napi_env env, napi_value result, InferResult& inferRe return napi_ok; } -static void ExecuteCB(napi_env env, void *data) { +static void ObjectDetectParseOutput(vector &msOutput, vector &imgPad, InferResult &result) +{ + float iou_threshold = 0.45; + float biases[18] = {10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326}; + vector input; + vector res; + + for (auto output : msOutput) { + if (output->elementNum == output40) { + flatten(output->data.data(), 40 * 40, output_dimension * 3, 1, 1); + yolo_v5_post_process_onescale(output->data.data(),&biases[6], + threshold_class,16,imgPad,input); + continue; + } + + if (output->elementNum == output20) { + flatten(output->data.data(), 20 * 20, output_dimension * 3, 1, 1); + yolo_v5_post_process_onescale(output->data.data(), &biases[12], + threshold_class, 32,imgPad,input); + } + } + + res = nms(input, iou_threshold); + for (int i = 0; i < res.size(); i++) { + LOGD("result 00000%s :%d %d %d %d %f\n", classes[res[i].classes].c_str(), res[i].x, res[i].y, res[i].w, + res[i].h, res[i].score); + result.objects[i].left = res[i].x; + result.objects[i].top = res[i].y; + result.objects[i].right = res[i].w; + result.objects[i].bottom = res[i].h; + result.objects[i].name = classes[res[i].classes]; + result.objects[i].prop = res[i].score; + } + result.count = res.size(); +} + +static void ExecuteCB(napi_env env, void *data) +{ + int32_t ret, picNo; MsAIAsyncContext *context = (MsAIAsyncContext *)data; - LOGI("ExecuteCB start."); - LOGI("modelId: %{public}d, width: %{public}d, height: %{public}d, dataSize: %{public}d", - context->modelId, (int)context->picDesc.width, (int)context->picDesc.height, (int)context->picDesc.dataSize); - - InferResult inferResult {}; - - image = PreProcess(context->picDesc, context->data, src); - LOGI("run ms............"); - RunMSLiteModel(modelms, inferResult); - context->status = SUCCESS; - context->inferResult = inferResult; - LOGD("ExecuteCB end."); + vector inputData; + vector imgPad; + vector msOutput; + + picNo = context->picNo; + LOGI("====================ExecuteCB start picNo=%d===========", picNo); + ret = ObjectDetectPreprocess(context->picDesc, context->data, inputData, imgPad, picNo); + if (ret != 0) { + LOGE("ObjectDetectPreprocess failed"); + context->status = FAIL; + context->inferResult.count = 0; + return; + } + LOGI("====================ExecuteCB seg 1: preprocess picNo=%d=========\n", picNo); + + ret = ObjectDetectPredict(modelms, inputData,msOutput, picNo); + if (ret != 0) { + LOGE("ObjectDetectPredict failed"); + context->status = FAIL; + context->inferResult.count = 0; + return; + } + LOGI("====================ExecuteCB seg 2: predict picNo=%d=========\n", picNo); + + InferResult inferResult{0}; + ObjectDetectParseOutput(msOutput, imgPad, inferResult); + if (ret != 0) { + LOGE("run ms model failed"); + context->status = FAIL; + context->inferResult.count = 0; + } else { + context->status = SUCCESS; + context->inferResult = inferResult; + } + LOGI("====================ExecuteCB end: parse output picNo=%d=========\n", picNo); } static void PromiseCompleteCB(napi_env env, napi_status status, void *data) { - LOGI("PromiseCompleteCB start."); + LOGD("PromiseCompleteCB start."); MsAIAsyncContext *context = (MsAIAsyncContext *)data; napi_value result = nullptr; napi_value undefinedResult = nullptr; napi_get_undefined(env, &undefinedResult); - + int32_t picNo = context->picNo; + status = napi_create_object(env, &result); if (status != napi_ok) { LOGE("napi_create_object failed."); @@ -856,25 +776,79 @@ static void PromiseCompleteCB(napi_env env, napi_status status, void *data) { if (context->callbackRef != nullptr) { napi_delete_reference(env, context->callbackRef); } - + LOGI("=================PromiseCompleteCB pic %d inference complete", picNo); napi_delete_async_work(env, context->asyncWork); delete context; } static void SetData(std::unique_ptr& asyncContext, int32_t modelId, - PicDescNapi& picDescNapi, std::string& data) + PicDescNapi& picDescNapi, unsigned char *data, size_t dataLen) { asyncContext->picDesc.width = picDescNapi.width; asyncContext->picDesc.height = picDescNapi.height; asyncContext->picDesc.dataSize = picDescNapi.dataSize; asyncContext->modelId = modelId; - asyncContext->data = data; + + asyncContext->data.resize(dataLen); + std::copy(data, data + dataLen, asyncContext->data.begin()); +} + +static int ParseNapiParams(napi_env env, napi_callback_info info, std::unique_ptr& context) { + napi_status status; + size_t argc = ARGS_THREE + 1; + napi_value argv[ARGS_THREE + 1] = {nullptr}; + napi_value thisArg; + int32_t ret = napi_get_cb_info(env, info, &argc, argv, &thisArg, nullptr); + int32_t modelId = 0; + PicDescNapi picDescNapi{}; + int32_t picNo; + unsigned char *array_buffer_data = nullptr; + size_t array_buffer_total = 0; + + for (size_t i = PARAM0; i < argc; i++) { + if (i == PARAM0) { + status = napi_get_value_int32(env, argv[i], &modelId); + if ((status != napi_ok) || (modelId < 0)) { + return -1; + } + } else if (i == PARAM1) { + ret = ParsePicDesc(env, argv[i], picDescNapi); + if (ret != RETCODE_SUCCESS) { + return -1; + } + } else if (i == PARAM2) { + status = napi_get_arraybuffer_info(env, argv[i], reinterpret_cast(&array_buffer_data), + &array_buffer_total); + if ((status != napi_ok) || (array_buffer_total <= 0) || (array_buffer_data == nullptr)) { + LOGE("get image array buffer info failed"); + return -1; + } + } else if (i == 3) { + status = napi_get_value_int32(env, argv[i], &picNo); + if ((status != napi_ok) || (picNo < 0)) { + return -1; + } + } else { + LOGE("Invalid input params."); + return -1; + } + } + + LOGD("modelId: %{public}d, width: %{public}d, height: %{public}d, dataSize: %{public}d", modelId, + (int)picDescNapi.width, (int)picDescNapi.height, (int)picDescNapi.dataSize); + LOGD("array_buffer_total: %{public}d,", (int)array_buffer_total); + LOGI("===============ObjectDectionProcess start to handle pic %d", picNo); + + SetData(context, modelId, picDescNapi, array_buffer_data, array_buffer_total); + context->picNo = picNo; + return 0; } } + //读取ms模型文件,指定CPU推理创建好模型并编译模型,modelms为全局变量 static napi_value ObjectDectionInit(napi_env env, napi_callback_info info) { - int32_t ret; + //int32_t ret; int32_t modelId; napi_value error_ret; napi_create_int32(env, -1, &error_ret); @@ -898,8 +872,8 @@ static napi_value ObjectDectionInit(napi_env env, napi_callback_info info) LOGI("Read model file success"); modelms = CreateMSLiteModel(modelBuffer, modelSize); + DestroyModelBuffer(&modelBuffer); if (modelms == nullptr) { - OH_AI_ModelDestroy(&modelms); modelId = -1; LOGE("MSLiteFwk Build model failed.\n"); return error_ret; @@ -912,71 +886,37 @@ static napi_value ObjectDectionInit(napi_env env, napi_callback_info info) return success_ret; } -static napi_value ObjectDectionProcess(napi_env env, napi_callback_info info) -{ - napi_value promise = nullptr; - size_t argc = ARGS_THREE; - napi_value argv[ARGS_THREE] = {nullptr}; - napi_value thisArg; - int32_t ret = napi_get_cb_info(env, info, &argc, argv, &thisArg, nullptr); - int32_t modelId = 0; - PicDescNapi picDescNapi{}; - std::string data = ""; +static napi_value ObjectDectionProcess(napi_env env, napi_callback_info info) { + int ret; + napi_status status; napi_value undefinedResult = nullptr; napi_get_undefined(env, &undefinedResult); - std::unique_ptr asyncContext = std::make_unique(); + std::unique_ptr asyncContext = std::make_unique(); if (asyncContext == nullptr) { LOGE("AsyncContext object create failed."); return undefinedResult; } - for (size_t i = PARAM0; i < argc; i++) { - if (i == PARAM0) { - napi_status status = napi_get_value_int32(env, argv[i], &modelId); - if ((status != napi_ok) || (modelId < 0)) { - return undefinedResult; - } - } else if (i == PARAM1) { - ret = ParsePicDesc(env, argv[i], picDescNapi); - if (ret != RETCODE_SUCCESS) { - return undefinedResult; - } - } else if (i == PARAM2) { - char *array_buffer_data; - size_t array_buffer_total; - napi_status status = napi_get_arraybuffer_info(env, argv[i], reinterpret_cast(&array_buffer_data), - &array_buffer_total); - if ((status != napi_ok) || (array_buffer_total <= 0)) { - return undefinedResult; - } - data.assign(array_buffer_data, array_buffer_data + array_buffer_total); - } else { - LOGE("Invalid input params."); - return undefinedResult; - } + ret = ParseNapiParams(env, info, asyncContext); + if (ret != 0) { + LOGE("ParseNapiParams failed."); + return undefinedResult; } + LOGI("=============ObjectDectionProcess parse napi params for picNo %d", asyncContext->picNo); - LOGI("modelId: %{public}d, width: %{public}d, height: %{public}d, dataSize: %{public}d", modelId, - (int)picDescNapi.width, (int)picDescNapi.height, (int)picDescNapi.dataSize); - LOGI("data.size(): %{public}d,", (int)data.size()); - - SetData(asyncContext, modelId, picDescNapi, data); - + napi_value promise = nullptr; napi_value resourceName = nullptr; napi_create_string_utf8(env, "Process", NAPI_AUTO_LENGTH, &resourceName); - auto status = napi_create_promise(env, &asyncContext->deferred, &promise); + status = napi_create_promise(env, &asyncContext->deferred, &promise); if (status != napi_ok) { LOGE("create callback failed."); return undefinedResult; } - if(1){ - status = napi_create_async_work(env, nullptr, resourceName, ExecuteCB, PromiseCompleteCB, - static_cast(asyncContext.get()), &asyncContext->asyncWork); - async_creat = 1; - } - if (status != napi_ok) { + status = napi_create_async_work(env, nullptr, resourceName, ExecuteCB, PromiseCompleteCB, + static_cast(asyncContext.get()), &asyncContext->asyncWork); + if (status != napi_ok) { LOGE("napi_create_async_work failed."); return undefinedResult; } @@ -984,13 +924,13 @@ static napi_value ObjectDectionProcess(napi_env env, napi_callback_info info) status = napi_queue_async_work(env, asyncContext->asyncWork); if (status == napi_ok) { asyncContext.release(); - LOGI("===================napi_queue_async_work ok."); + LOGD("===================napi_queue_async_work ok."); } else { - LOGI("===================napi_queue_async_work failed."); + LOGE("===================napi_queue_async_work failed."); return undefinedResult; } - LOGI("process async end."); + LOGD("process async end."); return promise; } @@ -1027,18 +967,21 @@ static napi_value setconf(napi_env env, napi_callback_info info) { return napiResult; } - static napi_value ObjectDectionDeInit(napi_env env, napi_callback_info info) { LOGI("ObjectDectionDeinit"); + if (modelms != nullptr) { + OH_AI_ModelDestroy(&modelms); + modelms = nullptr; + } + napi_value success_ret; napi_create_int32(env, 0, &success_ret); - return success_ret; - } +} - EXTERN_C_START - static napi_value Init(napi_env env, napi_value exports) { +EXTERN_C_START +static napi_value Init(napi_env env, napi_value exports) { napi_property_descriptor desc[] = { {"setconf", nullptr, setconf, nullptr, nullptr, nullptr, napi_default, nullptr}, {"Init", nullptr, ObjectDectionInit, nullptr, nullptr, nullptr, napi_default, nullptr}, @@ -1046,23 +989,23 @@ static napi_value ObjectDectionDeInit(napi_env env, napi_callback_info info) { {"DeInit", nullptr, ObjectDectionDeInit, nullptr, nullptr, nullptr, napi_default, nullptr}}; napi_define_properties(env, exports, sizeof(desc) / sizeof(desc[0]), desc); return exports; - } - EXTERN_C_END - - /* - * Napi Module define - */ - static napi_module msLiteModule = { - .nm_version = 1, - .nm_flags = 0, - .nm_filename = nullptr, - .nm_register_func = Init, - .nm_modname = "mslite_napi", - .nm_priv = ((void *)0), - .reserved = {0}, - }; - - /* - * Module register function - */ - extern "C" __attribute__((constructor)) void RegisterModule(void) { napi_module_register(&msLiteModule); } +} +EXTERN_C_END + +/* + * Napi Module define + */ +static napi_module msLiteModule = { + .nm_version = 1, + .nm_flags = 0, + .nm_filename = nullptr, + .nm_register_func = Init, + .nm_modname = "mslite_napi", + .nm_priv = ((void *)0), + .reserved = {0}, +}; + +/* + * Module register function + */ +extern "C" __attribute__((constructor)) void RegisterModule(void) { napi_module_register(&msLiteModule); } diff --git a/Mindspore.AI/entry/src/main/cpp/types/libmslite_napi/index.d.ts b/Mindspore.AI/entry/src/main/cpp/types/libmslite_napi/index.d.ts index 2d702fa1e9e8fcd5c909b17f69e12b01fb8c1912..0baff7638e93c8b19688edb7f407d4d5f1fa0a11 100644 --- a/Mindspore.AI/entry/src/main/cpp/types/libmslite_napi/index.d.ts +++ b/Mindspore.AI/entry/src/main/cpp/types/libmslite_napi/index.d.ts @@ -14,6 +14,7 @@ */ // export const Init: (path:String) => number; +export const setconf: (a:number) => number; export const Init: (path:Object) => number; -export const Process: (modeid:number, picDesc:Object, buffer: ArrayBuffer) => number; +export const Process: (modeid:number, picDesc:Object, buffer: ArrayBuffer, picNo:number) => number; export const DeInit: () => number; \ No newline at end of file diff --git a/Mindspore.AI/entry/src/main/ets/model/CameraModel.ets b/Mindspore.AI/entry/src/main/ets/model/CameraModel.ets index a692dc2432ff2082331014e1912db75176b75577..996299ea533c14a57faaff32dd22bdc38b020327 100644 --- a/Mindspore.AI/entry/src/main/ets/model/CameraModel.ets +++ b/Mindspore.AI/entry/src/main/ets/model/CameraModel.ets @@ -137,6 +137,7 @@ export default class CameraService { */ onImageArrival(callback: Callback): void { this.receiver.on('imageArrival', () => { + //setInterval(() => { this.receiver.readNextImage((err: BusinessError, nextImage: image.Image) => { if (err || nextImage === undefined) { return; @@ -160,21 +161,24 @@ export default class CameraService { dataSize: buffer.byteLength } - if ((number % 5) == 0) { + // 如果每张img都处理,app会卡并且画框有滞后性能为13帧;每2张img推理1张会流畅很多,性能可达到12帧 + // 利用napi异步工作队列,最多有4个线程并行 + if ((number % 2) == 0) { // @ts-ignore - mslite_napi.Process(this.modelId, picDesc, buffer).then((value: InferResult) => { - Logger.info(TAG, `Process result : ${JSON.stringify(value)}`); - callback(value.objects); + Logger.info(TAG, `================Process pic number = ${number}`); + mslite_napi.Process(this.modelId, picDesc, buffer, number).then((value: InferResult) => { + Logger.info(TAG, `============Process result ${JSON.stringify(value)}`); + callback(value.objects); }).catch((err: BusinessError) => { - Logger.info(TAG, `Get result false`); + Logger.info(TAG, `Get result false`); }); + // @ts-ignore } - Logger.info(TAG, `number = ${number}`); - //Logger.info(TAG, `buffer.byteLength = ${buffer.byteLength}`); number ++; nextImage.release(); }) }) + //}, 40) }) } diff --git a/Mindspore.AI/entry/src/main/ets/pages/Index.ets b/Mindspore.AI/entry/src/main/ets/pages/Index.ets index 9c2b6ad9fd2afe96b9d74af46f6a6a9394e84f58..a53372166740077fb54822eb037ac72d3c83e94b 100644 --- a/Mindspore.AI/entry/src/main/ets/pages/Index.ets +++ b/Mindspore.AI/entry/src/main/ets/pages/Index.ets @@ -18,6 +18,7 @@ import grantPermission from '../utlis/PermissionUtils'; import Logger from '../utlis/Logger'; import util from '@ohos.util'; import { BusinessError } from '@ohos.base'; +import prompt from '@ohos.prompt'; import image from '@ohos.multimedia.image'; import fs from '@ohos.file.fs'; import mslite_napi from 'libmslite_napi.so'; @@ -75,7 +76,10 @@ struct CameraPage { @State modelId: number = 0; @State pointData: ObjectDesc[] = []; @State imageNumber: number = 0; + @State numX: number = 0.0; + @State conf: string = '0'; private context: Context = getContext(this); + private textInputControllerX: TextInputController = new TextInputController(); async aboutToAppear() { try { @@ -91,7 +95,6 @@ struct CameraPage { mslite_napi.DeInit(); try { this.modelId = mslite_napi.Init(resourceManager); - // this.modelId = mslite_napi.Init(modelPath); Logger.info(TAG ,`modelId = ${this.modelId.toString()}`) if (this.modelId < 0) { mslite_napi.DeInit(); @@ -177,6 +180,28 @@ struct CameraPage { } .width('960px') .height('1280px') + Row() { + Text("class_min_conf (0-100):") + .fontColor(Color.White) + TextInput({ controller: this.textInputControllerX }) + .type(InputType.Number) + .fontColor(Color.Green) + .onChange(value => { + this.numX = Number.parseFloat(value); + }) + } + Row() { + Button("确定") + .onClick(() => { + if(this.numX > 0 && this.numX < 100) + mslite_napi.setconf(this.numX); + else{ + prompt.showToast({ + message: "请输入0-100" + }) + } + }) + } } .justifyContent(FlexAlign.Center) .backgroundColor('#000000') diff --git a/Mindspore.AI/entry/src/main/resources/rawfile/unm_nchw_int8.ms b/Mindspore.AI/entry/src/main/resources/rawfile/unm_nchw_int8.ms new file mode 100755 index 0000000000000000000000000000000000000000..f182186d58cd19e58e83a64170d9da6053778090 Binary files /dev/null and b/Mindspore.AI/entry/src/main/resources/rawfile/unm_nchw_int8.ms differ diff --git a/Mindspore.AI/entry/src/main/resources/rawfile/unm_nchw_uint8.ms b/Mindspore.AI/entry/src/main/resources/rawfile/unm_nchw_uint8.ms new file mode 100755 index 0000000000000000000000000000000000000000..a041b97939c0164600b983586b75ad6ff3a3269c Binary files /dev/null and b/Mindspore.AI/entry/src/main/resources/rawfile/unm_nchw_uint8.ms differ