9.7 visual studio 搭建yolov10的onnx的预测(c++)
1.环境配置
在进行onnx预测前,需要搭建的环境如下:
1.opencv环境的配置,可参考博客:9.2 c++搭建opencv环境-CSDN博客
2.libtorch环境的配置,可参考博客:9.4 visualStudio 2022 配置 cuda 和 torch (c++)-CSDN博客
3.cuda环境的配置,可参考博客:9.4 visualStudio 2022 配置 cuda 和 torch (c++)-CSDN博客
4.onnx环境的配置,可参考博客:VS2019配置ONNXRuntime c++环境_microsoft.ml.onnxruntime-CSDN博客
2.yolov10的c++代码
该代码做了部分的修改,最后调试成功。具体的代码如下:
main.cpp
#include <iostream>
//#include <getopt.h>
#include "yolov5v8_dnn.h"
#include "yolov5v8_ort.h"using namespace std;
using namespace cv;void main(int argc, char** argv)
{string img_path = "E:\\vs\\daima\\1_8\\Project1\\x64\\Release\\street.jpg";string model_path = "E:\\vs\\daima\\1_8\\Project1\\x64\\Release\\yolov8n-seg.onnx";string test_cls = "dnn";if (test_cls == "dnn") {// Input the path of model ("yolov8s.onnx" or "yolov5s.onnx") to run Inference with yolov8/yolov5 (ONNX)// Note that in this example the classes are hard-coded and 'classes.txt' is a place holder.Inference inf(model_path, cv::Size(640, 640), "classes.txt", true);cv::Mat frame = cv::imread(img_path);std::vector<Detection> output = inf.runInference(frame);if (output.size() != 0) inf.DrawPred(frame, output);else cout << "Detect Nothing!" << endl;}if (test_cls == "ort") {DCSP_CORE* yoloDetector = new DCSP_CORE;
#ifdef USE_CUDA//DCSP_INIT_PARAM params{ model_path, YOLO_ORIGIN_V5, {640, 640}, 0.25, 0.45, 0.5, true }; // GPU FP32 inferenceDCSP_INIT_PARAM params{ model_path, YOLO_ORIGIN_V5_HALF, {640, 640}, 0.25, 0.45, 0.5, true }; // GPU FP16 inference
#elseDCSP_INIT_PARAM params{ model_path, YOLO_ORIGIN_V5, {640, 640},0.25, 0.45, 0.5, false }; // CPU inference
#endifyoloDetector->CreateSession(params);cv::Mat img = cv::imread(img_path);std::vector<DCSP_RESULT> res;yoloDetector->RunSession(img, res);if (res.size() != 0) yoloDetector->DrawPred(img, res);else cout << "Detect Nothing!" << endl;}
}
yolov5v8_dnn.cpp
#include "yolov5v8_dnn.h"
using namespace std;Inference::Inference(const std::string& onnxModelPath, const cv::Size& modelInputShape, const std::string& classesTxtFile, const bool& runWithCuda)
{modelPath = onnxModelPath;modelShape = modelInputShape;classesPath = classesTxtFile;cudaEnabled = runWithCuda;loadOnnxNetwork();// loadClassesFromFile(); The classes are hard-coded for this example
}std::vector<Detection> Inference::runInference(const cv::Mat& input)
{cv::Mat SrcImg = input;cv::Mat netInputImg;cv::Vec4d params;LetterBox(SrcImg, netInputImg, params, cv::Size(modelShape.width, modelShape.height));cv::Mat blob;cv::dnn::blobFromImage(netInputImg, blob, 1.0 / 255.0, modelShape, cv::Scalar(), true, false);net.setInput(blob);std::vector<cv::Mat> outputs;net.forward(outputs, net.getUnconnectedOutLayersNames());if (outputs.size() == 2) RunSegmentation = true;int rows = outputs[0].size[1];int dimensions = outputs[0].size[2];bool yolov8 = false;// yolov5 has an output of shape (batchSize, 25200, 85) (Num classes + box[x,y,w,h] + confidence[c])// yolov8 has an output of shape (batchSize, 84, 8400) (Num classes + box[x,y,w,h])if (dimensions > rows) // Check if the shape[2] is more than shape[1] (yolov8){yolov8 = true;rows = outputs[0].size[2];dimensions = outputs[0].size[1];outputs[0] = outputs[0].reshape(1, dimensions);cv::transpose(outputs[0], outputs[0]);}float* data = (float*)outputs[0].data;std::vector<int> class_ids;std::vector<float> confidences;std::vector<cv::Rect> boxes;std::vector<vector<float>> picked_proposals;for (int i = 0; i < rows; ++i){int _segChannels;if (yolov8){float* classes_scores = data + 4;cv::Mat scores(1, classes.size(), CV_32FC1, classes_scores);cv::Point class_id;double maxClassScore;minMaxLoc(scores, 0, &maxClassScore, 0, &class_id);if (maxClassScore > modelScoreThreshold){if (RunSegmentation) {_segChannels = outputs[1].size[1];vector<float> temp_proto(data + classes.size() + 4, data + classes.size() + 4 + _segChannels);picked_proposals.push_back(temp_proto);}confidences.push_back(maxClassScore);class_ids.push_back(class_id.x);float x = (data[0] - params[2]) / params[0];float y = (data[1] - params[3]) / params[1];float w = data[2] / params[0];float h = data[3] / params[1];int left = MAX(round(x - 0.5 * w + 0.5), 0);int top = MAX(round(y - 0.5 * h + 0.5), 0);if ((left + w) > SrcImg.cols) { w = SrcImg.cols - left; }if ((top + h) > SrcImg.rows) { h = SrcImg.rows - top; }boxes.push_back(cv::Rect(left, top, int(w), int(h)));}}else // yolov5{float confidence = data[4];if (confidence >= modelConfidenceThreshold){float* classes_scores = data + 5;cv::Mat scores(1, classes.size(), CV_32FC1, classes_scores);cv::Point class_id;double max_class_score;minMaxLoc(scores, 0, &max_class_score, 0, &class_id);if (max_class_score > modelScoreThreshold){if (RunSegmentation) {_segChannels = outputs[1].size[1];vector<float> temp_proto(data + classes.size() + 5, data + classes.size() + 5 + _segChannels);picked_proposals.push_back(temp_proto);}confidences.push_back(confidence);class_ids.push_back(class_id.x);float x = (data[0] - params[2]) / params[0];float y = (data[1] - params[3]) / params[1];float w = data[2] / params[0];float h = data[3] / params[1];int left = MAX(round(x - 0.5 * w + 0.5), 0);int top = MAX(round(y - 0.5 * h + 0.5), 0);if ((left + w) > SrcImg.cols) { w = SrcImg.cols - left; }if ((top + h) > SrcImg.rows) { h = SrcImg.rows - top; }boxes.push_back(cv::Rect(left, top, int(w), int(h)));}}}data += dimensions;}std::vector<int> nms_result;cv::dnn::NMSBoxes(boxes, confidences, modelScoreThreshold, modelNMSThreshold, nms_result);std::vector<Detection> detections{};std::vector<vector<float>> temp_mask_proposals;for (unsigned long i = 0; i < nms_result.size(); ++i){int idx = nms_result[i];Detection result;result.class_id = class_ids[idx];result.confidence = confidences[idx];std::random_device rd;std::mt19937 gen(rd());std::uniform_int_distribution<int> dis(100, 255);result.color = cv::Scalar(dis(gen),dis(gen),dis(gen));result.className = classes[result.class_id];result.box = boxes[idx];if (RunSegmentation) temp_mask_proposals.push_back(picked_proposals[idx]);if (result.box.width != 0 && result.box.height != 0) detections.push_back(result);}if (RunSegmentation) {cv::Mat mask_proposals;for (int i = 0; i < temp_mask_proposals.size(); ++i)mask_proposals.push_back(cv::Mat(temp_mask_proposals[i]).t());GetMask(mask_proposals, outputs[1], params, SrcImg.size(), detections);}return detections;
}void Inference::loadClassesFromFile()
{std::ifstream inputFile(classesPath);if (inputFile.is_open()){std::string classLine;while (std::getline(inputFile, classLine))classes.push_back(classLine);inputFile.close();}
}void Inference::loadOnnxNetwork()
{net = cv::dnn::readNetFromONNX(modelPath);if (cudaEnabled){std::cout << "\nRunning on CUDA" << std::endl;net.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA);net.setPreferableTarget(cv::dnn::DNN_TARGET_CUDA);}else{std::cout << "\nRunning on CPU" << std::endl;net.setPreferableBackend(cv::dnn::DNN_BACKEND_OPENCV);net.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);}
}void Inference::LetterBox(const cv::Mat& image, cv::Mat& outImage, cv::Vec4d& params, const cv::Size& newShape,bool autoShape, bool scaleFill, bool scaleUp, int stride, const cv::Scalar& color)
{if (false) {int maxLen = MAX(image.rows, image.cols);outImage = cv::Mat::zeros(cv::Size(maxLen, maxLen), CV_8UC3);image.copyTo(outImage(cv::Rect(0, 0, image.cols, image.rows)));params[0] = 1;params[1] = 1;params[3] = 0;params[2] = 0;}cv::Size shape = image.size();float r = std::min((float)newShape.height / (float)shape.height,(float)newShape.width / (float)shape.width);if (!scaleUp)r = std::min(r, 1.0f);float ratio[2]{ r, r };int new_un_pad[2] = { (int)std::round((float)shape.width * r),(int)std::round((float)shape.height * r) };auto dw = (float)(newShape.width - new_un_pad[0]);auto dh = (float)(newShape.height - new_un_pad[1]);if (autoShape){dw = (float)((int)dw % stride);dh = (float)((int)dh % stride);}else if (scaleFill){dw = 0.0f;dh = 0.0f;new_un_pad[0] = newShape.width;new_un_pad[1] = newShape.height;ratio[0] = (float)newShape.width / (float)shape.width;ratio[1] = (float)newShape.height / (float)shape.height;}dw /= 2.0f;dh /= 2.0f;if (shape.width != new_un_pad[0] && shape.height != new_un_pad[1]){cv::resize(image, outImage, cv::Size(new_un_pad[0], new_un_pad[1]));}else {outImage = image.clone();}int top = int(std::round(dh - 0.1f));int bottom = int(std::round(dh + 0.1f));int left = int(std::round(dw - 0.1f));int right = int(std::round(dw + 0.1f));params[0] = ratio[0];params[1] = ratio[1];params[2] = left;params[3] = top;cv::copyMakeBorder(outImage, outImage, top, bottom, left, right, cv::BORDER_CONSTANT, color);
}void Inference::GetMask(const cv::Mat& maskProposals, const cv::Mat& mask_protos, const cv::Vec4d& params, const cv::Size& srcImgShape, std::vector<Detection>& output) {if (output.size() == 0) return;int _segChannels = mask_protos.size[1];int _segHeight = mask_protos.size[2];int _segWidth = mask_protos.size[3];cv::Mat protos = mask_protos.reshape(0, { _segChannels,_segWidth * _segHeight });cv::Mat matmulRes = (maskProposals * protos).t();cv::Mat masks = matmulRes.reshape(output.size(), { _segHeight,_segWidth });vector<cv::Mat> maskChannels;split(masks, maskChannels);for (int i = 0; i < output.size(); ++i) {cv::Mat dest, mask;//sigmoidcv::exp(-maskChannels[i], dest);dest = 1.0 / (1.0 + dest);cv::Rect roi(int(params[2] / modelShape.width * _segWidth), int(params[3] / modelShape.height * _segHeight), int(_segWidth - params[2] / 2), int(_segHeight - params[3] / 2));dest = dest(roi);cv::resize(dest, mask, srcImgShape, cv::INTER_NEAREST);//cropcv::Rect temp_rect = output[i].box;mask = mask(temp_rect) > modelScoreThreshold;output[i].boxMask = mask;}
}void Inference::DrawPred(cv::Mat& img, vector<Detection>& result) {int detections = result.size();std::cout << "Number of detections:" << detections << std::endl;cv::Mat mask = img.clone();for (int i = 0; i < detections; ++i){Detection detection = result[i];cv::Rect box = detection.box;cv::Scalar color = detection.color;// Detection boxcv::rectangle(img, box, color, 2);mask(detection.box).setTo(color, detection.boxMask);// Detection box textstd::string classString = detection.className + ' ' + std::to_string(detection.confidence).substr(0, 4);cv::Size textSize = cv::getTextSize(classString, cv::FONT_HERSHEY_DUPLEX, 1, 2, 0);cv::Rect textBox(box.x, box.y - 40, textSize.width + 10, textSize.height + 20);cv::rectangle(img, textBox, color, cv::FILLED);cv::putText(img, classString, cv::Point(box.x + 5, box.y - 10), cv::FONT_HERSHEY_DUPLEX, 1, cv::Scalar(0, 0, 0), 2, 0);}// Detection maskif (RunSegmentation) cv::addWeighted(img, 0.5, mask, 0.5, 0, img); //将mask加在原图上面cv::imshow("Inference", img);cv::imwrite("out.bmp", img);cv::waitKey();cv::destroyWindow("Inference");
}
yolov5v8_ort.cpp
#define _CRT_SECURE_NO_WARNINGS
#include "yolov5v8_ort.h"
#include <regex>
#include <random>
#define benchmark
using namespace std;DCSP_CORE::DCSP_CORE() {}DCSP_CORE::~DCSP_CORE() {delete session;
}#ifdef USE_CUDA
namespace Ort
{template<>struct TypeToTensorType<half> { static constexpr ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16; };
}
#endiftemplate<typename T>
char* BlobFromImage(cv::Mat& iImg, T& iBlob) {int channels = iImg.channels();int imgHeight = iImg.rows;int imgWidth = iImg.cols;for (int c = 0; c < channels; c++) {for (int h = 0; h < imgHeight; h++) {for (int w = 0; w < imgWidth; w++) {iBlob[c * imgWidth * imgHeight + h * imgWidth + w] = typename std::remove_pointer<T>::type((iImg.at<cv::Vec3b>(h, w)[c]) / 255.0f);}}}return RET_OK;
}char* PreProcess(cv::Mat& iImg, std::vector<int> iImgSize, cv::Mat& oImg) {cv::Mat img = iImg.clone();cv::resize(iImg, oImg, cv::Size(iImgSize.at(0), iImgSize.at(1)));if (img.channels() == 1) {cv::cvtColor(oImg, oImg, cv::COLOR_GRAY2BGR);}cv::cvtColor(oImg, oImg, cv::COLOR_BGR2RGB);return RET_OK;
}void LetterBox(const cv::Mat& image, cv::Mat& outImage, cv::Vec4d& params, const cv::Size& newShape = cv::Size(640, 640),bool autoShape = false, bool scaleFill = false, bool scaleUp = true, int stride = 32, const cv::Scalar& color = cv::Scalar(114, 114, 114))
{if (false) {int maxLen = MAX(image.rows, image.cols);outImage = cv::Mat::zeros(cv::Size(maxLen, maxLen), CV_8UC3);image.copyTo(outImage(cv::Rect(0, 0, image.cols, image.rows)));params[0] = 1;params[1] = 1;params[3] = 0;params[2] = 0;}cv::Size shape = image.size();float r = std::min((float)newShape.height / (float)shape.height,(float)newShape.width / (float)shape.width);if (!scaleUp)r = std::min(r, 1.0f);float ratio[2]{ r, r };int new_un_pad[2] = { (int)std::round((float)shape.width * r),(int)std::round((float)shape.height * r) };auto dw = (float)(newShape.width - new_un_pad[0]);auto dh = (float)(newShape.height - new_un_pad[1]);if (autoShape){dw = (float)((int)dw % stride);dh = (float)((int)dh % stride);}else if (scaleFill){dw = 0.0f;dh = 0.0f;new_un_pad[0] = newShape.width;new_un_pad[1] = newShape.height;ratio[0] = (float)newShape.width / (float)shape.width;ratio[1] = (float)newShape.height / (float)shape.height;}dw /= 2.0f;dh /= 2.0f;if (shape.width != new_un_pad[0] && shape.height != new_un_pad[1]){cv::resize(image, outImage, cv::Size(new_un_pad[0], new_un_pad[1]));}else {outImage = image.clone();}int top = int(std::round(dh - 0.1f));int bottom = int(std::round(dh + 0.1f));int left = int(std::round(dw - 0.1f));int right = int(std::round(dw + 0.1f));params[0] = ratio[0];params[1] = ratio[1];params[2] = left;params[3] = top;cv::copyMakeBorder(outImage, outImage, top, bottom, left, right, cv::BORDER_CONSTANT, color);
}void GetMask(const int* const _seg_params, const float& rectConfidenceThreshold, const cv::Mat& maskProposals, const cv::Mat& mask_protos, const cv::Vec4d& params, const cv::Size& srcImgShape, std::vector<DCSP_RESULT>& output) {int _segChannels = *_seg_params;int _segHeight = *(_seg_params + 1);int _segWidth = *(_seg_params + 2);int _netHeight = *(_seg_params + 3);int _netWidth = *(_seg_params + 4);cv::Mat protos = mask_protos.reshape(0, { _segChannels,_segWidth * _segHeight });cv::Mat matmulRes = (maskProposals * protos).t();cv::Mat masks = matmulRes.reshape(output.size(), { _segHeight,_segWidth });std::vector<cv::Mat> maskChannels;split(masks, maskChannels);for (int i = 0; i < output.size(); ++i) {cv::Mat dest, mask;//sigmoidcv::exp(-maskChannels[i], dest);dest = 1.0 / (1.0 + dest);cv::Rect roi(int(params[2] / _netWidth * _segWidth), int(params[3] / _netHeight * _segHeight), int(_segWidth - params[2] / 2), int(_segHeight - params[3] / 2));dest = dest(roi);cv::resize(dest, mask, srcImgShape, cv::INTER_NEAREST);//cropcv::Rect temp_rect = output[i].box;mask = mask(temp_rect) > rectConfidenceThreshold;output[i].boxMask = mask;}
}void DCSP_CORE::DrawPred(cv::Mat& img, std::vector<DCSP_RESULT>& result) {int detections = result.size();std::cout << "Number of detections:" << detections << std::endl;cv::Mat mask = img.clone();for (int i = 0; i < detections; ++i){DCSP_RESULT detection = result[i];cv::Rect box = detection.box;cv::Scalar color = detection.color;// Detection boxcv::rectangle(img, box, color, 2);mask(detection.box).setTo(color, detection.boxMask);// Detection box textstd::string classString = detection.className + ' ' + std::to_string(detection.confidence).substr(0, 4);cv::Size textSize = cv::getTextSize(classString, cv::FONT_HERSHEY_DUPLEX, 1, 2, 0);cv::Rect textBox(box.x, box.y - 40, textSize.width + 10, textSize.height + 20);cv::rectangle(img, textBox, color, cv::FILLED);cv::putText(img, classString, cv::Point(box.x + 5, box.y - 10), cv::FONT_HERSHEY_DUPLEX, 1, cv::Scalar(0, 0, 0), 2, 0);}// Detection maskif (RunSegmentation) cv::addWeighted(img, 0.5, mask, 0.5, 0, img); //将mask加在原图上面cv::imshow("Inference", img);cv::imwrite("out.bmp", img);cv::waitKey();cv::destroyWindow("Inference");
}char* DCSP_CORE::CreateSession(DCSP_INIT_PARAM& iParams) {char* Ret = RET_OK;std::regex pattern("[\u4e00-\u9fa5]");bool result = std::regex_search(iParams.ModelPath, pattern);if (result) {char str_tmp[] = "[DCSP_ONNX]:Model path error.Change your model path without chinese characters.";Ret = str_tmp;std::cout << Ret << std::endl;return Ret;}try {modelConfidenceThreshold = iParams.modelConfidenceThreshold;rectConfidenceThreshold = iParams.RectConfidenceThreshold;iouThreshold = iParams.iouThreshold;imgSize = iParams.imgSize;modelType = iParams.ModelType;env = Ort::Env(ORT_LOGGING_LEVEL_WARNING, "Yolo");Ort::SessionOptions sessionOption;if (iParams.CudaEnable) {cudaEnable = iParams.CudaEnable;OrtCUDAProviderOptions cudaOption;cudaOption.device_id = 0;sessionOption.AppendExecutionProvider_CUDA(cudaOption);}sessionOption.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_ALL);sessionOption.SetIntraOpNumThreads(iParams.IntraOpNumThreads);sessionOption.SetLogSeverityLevel(iParams.LogSeverityLevel);#ifdef _WIN32int ModelPathSize = MultiByteToWideChar(CP_UTF8, 0, iParams.ModelPath.c_str(), static_cast<int>(iParams.ModelPath.length()), nullptr, 0);wchar_t* wide_cstr = new wchar_t[ModelPathSize + 1];MultiByteToWideChar(CP_UTF8, 0, iParams.ModelPath.c_str(), static_cast<int>(iParams.ModelPath.length()), wide_cstr, ModelPathSize);wide_cstr[ModelPathSize] = L'\0';const wchar_t* modelPath = wide_cstr;
#elseconst char* modelPath = iParams.ModelPath.c_str();
#endif // _WIN32session = new Ort::Session(env, modelPath, sessionOption);Ort::AllocatorWithDefaultOptions allocator;size_t inputNodesNum = session->GetInputCount();for (size_t i = 0; i < inputNodesNum; i++) {Ort::AllocatedStringPtr input_node_name = session->GetInputNameAllocated(i, allocator);char* temp_buf = new char[50];strcpy(temp_buf, input_node_name.get());inputNodeNames.push_back(temp_buf);}size_t OutputNodesNum = session->GetOutputCount();for (size_t i = 0; i < OutputNodesNum; i++) {Ort::AllocatedStringPtr output_node_name = session->GetOutputNameAllocated(i, allocator);char* temp_buf = new char[10];strcpy(temp_buf, output_node_name.get());outputNodeNames.push_back(temp_buf);}if (outputNodeNames.size() == 2) RunSegmentation = true;options = Ort::RunOptions{ nullptr };WarmUpSession();return RET_OK;}catch (const std::exception& e) {const char* str1 = "[DCSP_ONNX]:";const char* str2 = e.what();std::string result = std::string(str1) + std::string(str2);char* merged = new char[result.length() + 1];std::strcpy(merged, result.c_str());std::cout << merged << std::endl;delete[] merged;char str_tmps[] = "[DCSP_ONNX]:Create session failed.";char* strs = str_tmps;return strs;}
}char* DCSP_CORE::RunSession(cv::Mat& iImg, std::vector<DCSP_RESULT>& oResult) {
#ifdef benchmarkclock_t starttime_1 = clock();
#endif // benchmarkchar* Ret = RET_OK;cv::Mat processedImg;cv::Vec4d params;//resize图片尺寸,PreProcess是直接resize,LetterBox有padding操作//PreProcess(iImg, imgSize, processedImg);LetterBox(iImg, processedImg, params, cv::Size(imgSize.at(1), imgSize.at(0)));if (modelType < 4) {float* blob = new float[processedImg.total() * 3];BlobFromImage(processedImg, blob);std::vector<int64_t> inputNodeDims = { 1, 3, imgSize.at(0), imgSize.at(1) };TensorProcess(starttime_1, params, iImg, blob, inputNodeDims, oResult);}else {
#ifdef USE_CUDAhalf* blob = new half[processedImg.total() * 3];BlobFromImage(processedImg, blob);std::vector<int64_t> inputNodeDims = { 1,3,imgSize.at(0),imgSize.at(1) };TensorProcess(starttime_1, params, iImg, blob, inputNodeDims, oResult);
#endif}return Ret;
}template<typename N>
char* DCSP_CORE::TensorProcess(clock_t& starttime_1, cv::Vec4d& params, cv::Mat& iImg, N* blob, std::vector<int64_t>& inputNodeDims, std::vector<DCSP_RESULT>& oResult) {Ort::Value inputTensor = Ort::Value::CreateTensor<typename std::remove_pointer<N>::type>(Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU), blob, 3 * imgSize.at(0) * imgSize.at(1), inputNodeDims.data(), inputNodeDims.size());
#ifdef benchmarkclock_t starttime_2 = clock();
#endif // benchmarkauto outputTensor = session->Run(options, inputNodeNames.data(), &inputTensor, 1, outputNodeNames.data(), outputNodeNames.size());
#ifdef benchmarkclock_t starttime_3 = clock();
#endif // benchmarkstd::vector<int64_t> _outputTensorShape;_outputTensorShape = outputTensor[0].GetTensorTypeAndShapeInfo().GetShape();//auto output = outputTensor[0].GetTensorMutableData<typename std::remove_pointer<N>::type>();N* output = outputTensor[0].GetTensorMutableData<N>();delete blob;// yolov5 has an output of shape (batchSize, 25200, 85) (Num classes + box[x,y,w,h] + confidence[c])// yolov8 has an output of shape (batchSize, 84, 8400) (Num classes + box[x,y,w,h])// yolov5int dimensions = _outputTensorShape[1];int rows = _outputTensorShape[2];cv::Mat rowData;if (modelType < 3)rowData = cv::Mat(dimensions, rows, CV_32F, output);elserowData = cv::Mat(dimensions, rows, CV_16S, output);// yolov8if (rows > dimensions) {dimensions = _outputTensorShape[2];rows = _outputTensorShape[1];rowData = rowData.t();}std::vector<int> class_ids;std::vector<float> confidences;std::vector<cv::Rect> boxes;std::vector<std::vector<float>> picked_proposals;N* data = (N*)rowData.data;for (int i = 0; i < dimensions; ++i) {switch (modelType) {case 0://V5_ORIGIN_FP32case 7://V5_ORIGIN_FP16{N confidence = data[4];if (confidence >= modelConfidenceThreshold){cv::Mat scores;if (modelType < 3) scores = cv::Mat(1, classes.size(), CV_32FC1, data + 5);else scores = cv::Mat(1, classes.size(), CV_16SC1, data + 5);cv::Point class_id;double max_class_score;minMaxLoc(scores, 0, &max_class_score, 0, &class_id);max_class_score = *(data + 5 + class_id.x) * confidence;if (max_class_score > rectConfidenceThreshold){if (RunSegmentation) {int _segChannels = outputTensor[1].GetTensorTypeAndShapeInfo().GetShape()[1];std::vector<float> temp_proto(data + classes.size() + 5, data + classes.size() + 5 + _segChannels);picked_proposals.push_back(temp_proto);}confidences.push_back(confidence);class_ids.push_back(class_id.x);float x = (data[0] - params[2]) / params[0];float y = (data[1] - params[3]) / params[1];float w = data[2] / params[0];float h = data[3] / params[1];int left = MAX(round(x - 0.5 * w + 0.5), 0);int top = MAX(round(y - 0.5 * h + 0.5), 0);if ((left + w) > iImg.cols) { w = iImg.cols - left; }if ((top + h) > iImg.rows) { h = iImg.rows - top; }boxes.emplace_back(cv::Rect(left, top, int(w), int(h)));}}break;}case 1://V8_ORIGIN_FP32case 4://V8_ORIGIN_FP16{cv::Mat scores;if (modelType < 3) scores = cv::Mat(1, classes.size(), CV_32FC1, data + 4);else scores = cv::Mat(1, classes.size(), CV_16SC1, data + 4);cv::Point class_id;double maxClassScore;cv::minMaxLoc(scores, 0, &maxClassScore, 0, &class_id);maxClassScore = *(data + 4 + class_id.x);if (maxClassScore > rectConfidenceThreshold) {if (RunSegmentation) {int _segChannels = outputTensor[1].GetTensorTypeAndShapeInfo().GetShape()[1];std::vector<float> temp_proto(data + classes.size() + 4, data + classes.size() + 4 + _segChannels);picked_proposals.push_back(temp_proto);}confidences.push_back(maxClassScore);class_ids.push_back(class_id.x);float x = (data[0] - params[2]) / params[0];float y = (data[1] - params[3]) / params[1];float w = data[2] / params[0];float h = data[3] / params[1];int left = MAX(round(x - 0.5 * w + 0.5), 0);int top = MAX(round(y - 0.5 * h + 0.5), 0);if ((left + w) > iImg.cols) { w = iImg.cols - left; }if ((top + h) > iImg.rows) { h = iImg.rows - top; }boxes.emplace_back(cv::Rect(left, top, int(w), int(h)));}break;}}data += rows;}std::vector<int> nmsResult;cv::dnn::NMSBoxes(boxes, confidences, rectConfidenceThreshold, iouThreshold, nmsResult);std::vector<std::vector<float>> temp_mask_proposals;for (int i = 0; i < nmsResult.size(); ++i) {int idx = nmsResult[i];DCSP_RESULT result;result.classId = class_ids[idx];result.confidence = confidences[idx];result.box = boxes[idx];result.className = classes[result.classId];std::random_device rd;std::mt19937 gen(rd());std::uniform_int_distribution<int> dis(100, 255);result.color = cv::Scalar(dis(gen), dis(gen), dis(gen));if (result.box.width != 0 && result.box.height != 0) oResult.push_back(result);if (RunSegmentation) temp_mask_proposals.push_back(picked_proposals[idx]);}if (RunSegmentation) {cv::Mat mask_proposals;for (int i = 0; i < temp_mask_proposals.size(); ++i)mask_proposals.push_back(cv::Mat(temp_mask_proposals[i]).t());std::vector<int64_t> _outputMaskTensorShape;_outputMaskTensorShape = outputTensor[1].GetTensorTypeAndShapeInfo().GetShape();int _segChannels = _outputMaskTensorShape[1];int _segWidth = _outputMaskTensorShape[2];int _segHeight = _outputMaskTensorShape[3];N* pdata = outputTensor[1].GetTensorMutableData<N>();std::vector<float> mask(pdata, pdata + _segChannels * _segWidth * _segHeight);int _seg_params[5] = { _segChannels, _segWidth, _segHeight, inputNodeDims[2], inputNodeDims[3] };cv::Mat mask_protos = cv::Mat(mask);GetMask(_seg_params, rectConfidenceThreshold, mask_proposals, mask_protos, params, iImg.size(), oResult);}#ifdef benchmarkclock_t starttime_4 = clock();double pre_process_time = (double)(starttime_2 - starttime_1) / CLOCKS_PER_SEC * 1000;double process_time = (double)(starttime_3 - starttime_2) / CLOCKS_PER_SEC * 1000;double post_process_time = (double)(starttime_4 - starttime_3) / CLOCKS_PER_SEC * 1000;if (cudaEnable) {std::cout << "[DCSP_ONNX(CUDA)]: " << pre_process_time << "ms pre-process, " << process_time<< "ms inference, " << post_process_time << "ms post-process." << std::endl;}else {std::cout << "[DCSP_ONNX(CPU)]: " << pre_process_time << "ms pre-process, " << process_time<< "ms inference, " << post_process_time << "ms post-process." << std::endl;}
#endif // benchmarkreturn RET_OK;
}char* DCSP_CORE::WarmUpSession() {clock_t starttime_1 = clock();cv::Mat iImg = cv::Mat(cv::Size(imgSize.at(0), imgSize.at(1)), CV_8UC3);cv::Mat processedImg;cv::Vec4d params;//resize图片尺寸,PreProcess是直接resize,LetterBox有padding操作//PreProcess(iImg, imgSize, processedImg);LetterBox(iImg, processedImg, params, cv::Size(imgSize.at(1), imgSize.at(0)));if (modelType < 4) {float* blob = new float[iImg.total() * 3];BlobFromImage(processedImg, blob);std::vector<int64_t> YOLO_input_node_dims = { 1, 3, imgSize.at(0), imgSize.at(1) };Ort::Value input_tensor = Ort::Value::CreateTensor<float>(Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU), blob, 3 * imgSize.at(0) * imgSize.at(1),YOLO_input_node_dims.data(), YOLO_input_node_dims.size());auto output_tensors = session->Run(options, inputNodeNames.data(), &input_tensor, 1, outputNodeNames.data(), outputNodeNames.size());delete[] blob;clock_t starttime_4 = clock();double post_process_time = (double)(starttime_4 - starttime_1) / CLOCKS_PER_SEC * 1000;if (cudaEnable) {std::cout << "[DCSP_ONNX(CUDA)]: " << "Cuda warm-up cost " << post_process_time << " ms. " << std::endl;}}else {
#ifdef USE_CUDAhalf* blob = new half[iImg.total() * 3];BlobFromImage(processedImg, blob);std::vector<int64_t> YOLO_input_node_dims = { 1,3,imgSize.at(0),imgSize.at(1) };Ort::Value input_tensor = Ort::Value::CreateTensor<half>(Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU), blob, 3 * imgSize.at(0) * imgSize.at(1), YOLO_input_node_dims.data(), YOLO_input_node_dims.size());auto output_tensors = session->Run(options, inputNodeNames.data(), &input_tensor, 1, outputNodeNames.data(), outputNodeNames.size());delete[] blob;clock_t starttime_4 = clock();double post_process_time = (double)(starttime_4 - starttime_1) / CLOCKS_PER_SEC * 1000;if (cudaEnable){std::cout << "[DCSP_ONNX(CUDA)]: " << "Cuda warm-up cost " << post_process_time << " ms. " << std::endl;}
#endif}return RET_OK;
}
yolov5v8_dnn.h
#ifndef YOLOV5V8_DNN_H
#define YOLOV5V8_DNN_H// Cpp native
#include <fstream>
#include <vector>
#include <string>
#include <random>// OpenCV / DNN / Inference
#include <opencv2/imgproc.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/dnn.hpp>struct Detection
{int class_id{ 0 };std::string className{};float confidence{ 0.0 };cv::Scalar color{};cv::Rect box{};cv::Mat boxMask;
};class Inference
{
public:Inference(const std::string& onnxModelPath, const cv::Size& modelInputShape = { 640, 640 }, const std::string& classesTxtFile = "", const bool& runWithCuda = true);std::vector<Detection> runInference(const cv::Mat& input);void DrawPred(cv::Mat& img, std::vector<Detection>& result);private:void loadClassesFromFile();void loadOnnxNetwork();void LetterBox(const cv::Mat& image, cv::Mat& outImage,cv::Vec4d& params, //[ratio_x,ratio_y,dw,dh]const cv::Size& newShape = cv::Size(640, 640),bool autoShape = false,bool scaleFill = false,bool scaleUp = true,int stride = 32,const cv::Scalar& color = cv::Scalar(114, 114, 114));void GetMask(const cv::Mat& maskProposals, const cv::Mat& mask_protos, const cv::Vec4d& params, const cv::Size& srcImgShape, std::vector<Detection>& output);private:std::string modelPath{};bool cudaEnabled{};cv::Size2f modelShape{};bool RunSegmentation = false;float modelConfidenceThreshold{ 0.25 };float modelScoreThreshold{ 0.45 };float modelNMSThreshold{ 0.50 };bool letterBoxForSquare = true;cv::dnn::Net net;std::string classesPath{};std::vector<std::string> classes{ "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant","stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella","handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard","tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot","hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard","cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush" };
};#endif // YOLOV5V8_DNN_H
yolov5v8_ort.h
#pragma once#define RET_OK nullptr
#define USE_CUDA#ifdef _WIN32
#include <Windows.h>
#include <direct.h>
#include <io.h>
#endif#include <string>
#include <vector>
#include <cstdio>
#include <opencv2/opencv.hpp>
#include "onnxruntime_cxx_api.h"#ifdef USE_CUDA
#include <cuda_fp16.h>
#endifenum MODEL_TYPE {//FLOAT32 MODELYOLO_ORIGIN_V5 = 0,//supportYOLO_ORIGIN_V8 = 1,//supportYOLO_POSE_V8 = 2,YOLO_CLS_V8 = 3,YOLO_ORIGIN_V8_HALF = 4,//supportYOLO_POSE_V8_HALF = 5,YOLO_CLS_V8_HALF = 6,YOLO_ORIGIN_V5_HALF = 7 //support
};typedef struct _DCSP_INIT_PARAM {std::string ModelPath;MODEL_TYPE ModelType = YOLO_ORIGIN_V8;std::vector<int> imgSize = { 640, 640 };float modelConfidenceThreshold = 0.25;float RectConfidenceThreshold = 0.6;float iouThreshold = 0.5;bool CudaEnable = false;int LogSeverityLevel = 3;int IntraOpNumThreads = 1;
} DCSP_INIT_PARAM;typedef struct _DCSP_RESULT {int classId;std::string className;float confidence;cv::Rect box;cv::Mat boxMask; //矩形框内maskcv::Scalar color;
} DCSP_RESULT;class DCSP_CORE {
public:DCSP_CORE();~DCSP_CORE();public:void DrawPred(cv::Mat& img, std::vector<DCSP_RESULT>& result);char* CreateSession(DCSP_INIT_PARAM& iParams);char* RunSession(cv::Mat& iImg, std::vector<DCSP_RESULT>& oResult);char* WarmUpSession();template<typename N>char* TensorProcess(clock_t& starttime_1, cv::Vec4d& params, cv::Mat& iImg, N* blob, std::vector<int64_t>& inputNodeDims, std::vector<DCSP_RESULT>& oResult);std::vector<std::string> classes{ "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant","stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella","handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard","tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot","hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard","cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush" };private:Ort::Env env;Ort::Session* session;bool cudaEnable;Ort::RunOptions options;bool RunSegmentation = false;std::vector<const char*> inputNodeNames;std::vector<const char*> outputNodeNames;MODEL_TYPE modelType;std::vector<int> imgSize;float modelConfidenceThreshold;float rectConfidenceThreshold;float iouThreshold;};
3.geotpt文件的配置
geotpt文件的配置比较简单,我们只需要写两个文件放入我们的工程就行,代码如下:
getopt.h
# ifndef __GETOPT_H_
# define __GETOPT_H_# ifdef _GETOPT_API
# undef _GETOPT_API
# endif
//------------------------------------------------------------------------------
# if defined(EXPORTS_GETOPT) && defined(STATIC_GETOPT)
# error "The preprocessor definitions of EXPORTS_GETOPT and STATIC_GETOPT \
can only be used individually"
# elif defined(STATIC_GETOPT)
# pragma message("Warning static builds of getopt violate the Lesser GNU \
Public License")
# define _GETOPT_API
# elif defined(EXPORTS_GETOPT)
# pragma message("Exporting getopt library")
# define _GETOPT_API __declspec(dllexport)
# else
# pragma message("Importing getopt library")
# define _GETOPT_API __declspec(dllimport)
# endif# include <tchar.h>
// Standard GNU options
# define null_argument 0 /*Argument Null*/
# define no_argument 0 /*Argument Switch Only*/
# define required_argument 1 /*Argument Required*/
# define optional_argument 2 /*Argument Optional*/
// Shorter Versions of options
# define ARG_NULL 0 /*Argument Null*/
# define ARG_NONE 0 /*Argument Switch Only*/
# define ARG_REQ 1 /*Argument Required*/
# define ARG_OPT 2 /*Argument Optional*/
// Change behavior for C\C++
# ifdef __cplusplus
# define _BEGIN_EXTERN_C extern "C" {
# define _END_EXTERN_C }
# define _GETOPT_THROW throw()
# else
# define _BEGIN_EXTERN_C
# define _END_EXTERN_C
# define _GETOPT_THROW
# endif
_BEGIN_EXTERN_C
extern _GETOPT_API TCHAR* optarg;
extern _GETOPT_API int optind;
extern _GETOPT_API int opterr;
extern _GETOPT_API int optopt;
struct option
{/* The predefined macro variable __STDC__ is defined for C++, and it has the in-teger value 0 when it is used in an #if statement, indicating that the C++ l-anguage is not a proper superset of C, and that the compiler does not confor-m to C. In C, __STDC__ has the integer value 1. */
# if defined (__STDC__) && __STDC__const TCHAR* name;
# elseTCHAR* name;
# endifint has_arg;int* flag;TCHAR val;
};
extern _GETOPT_API int getopt(int argc, TCHAR* const* argv, const TCHAR* optstring) _GETOPT_THROW;
extern _GETOPT_API int getopt_long
(int ___argc, TCHAR* const* ___argv, const TCHAR* __shortopts, const struct option* __longopts, int* __longind) _GETOPT_THROW;
extern _GETOPT_API int getopt_long_only
(int ___argc, TCHAR* const* ___argv, const TCHAR* __shortopts, const struct option* __longopts, int* __longind) _GETOPT_THROW;
// harly.he add for reentrant 12.09/2013
extern _GETOPT_API void getopt_reset() _GETOPT_THROW;
_END_EXTERN_C
// Undefine so the macros are not included
# undef _BEGIN_EXTERN_C
# undef _END_EXTERN_C
# undef _GETOPT_THROW
# undef _GETOPT_API
# endif // __GETOPT_H_
getopt.c
# ifndef _CRT_SECURE_NO_WARNINGS
# define _CRT_SECURE_NO_WARNINGS
# endif# include <stdlib.h>
# include <stdio.h>
# include <tchar.h>
# include "getopt.h"# ifdef __cplusplus
# define _GETOPT_THROW throw()
# else
# define _GETOPT_THROW
# endifenum ENUM_ORDERING
{REQUIRE_ORDER, PERMUTE, RETURN_IN_ORDER
};struct _getopt_data
{int optind;int opterr;int optopt;TCHAR* optarg;int __initialized;TCHAR* __nextchar;int __ordering;int __posixly_correct;int __first_nonopt;int __last_nonopt;
};
static struct _getopt_data getopt_data = { 0, 0, 0, NULL, 0, NULL, 0, 0, 0, 0 };TCHAR* optarg = NULL;
int optind = 1;
int opterr = 1;
int optopt = _T('?');static void exchange(TCHAR** argv, struct _getopt_data* d)
{int bottom = d->__first_nonopt;int middle = d->__last_nonopt;int top = d->optind;TCHAR* tem;while (top > middle && middle > bottom){if (top - middle > middle - bottom){int len = middle - bottom;register int i;for (i = 0; i < len; i++){tem = argv[bottom + i];argv[bottom + i] = argv[top - (middle - bottom) + i];argv[top - (middle - bottom) + i] = tem;}top -= len;}else{int len = top - middle;register int i;for (i = 0; i < len; i++){tem = argv[bottom + i];argv[bottom + i] = argv[middle + i];argv[middle + i] = tem;}bottom += len;}}d->__first_nonopt += (d->optind - d->__last_nonopt);d->__last_nonopt = d->optind;
}static const TCHAR* _getopt_initialize(const TCHAR* optstring, struct _getopt_data* d, int posixly_correct)
{d->__first_nonopt = d->__last_nonopt = d->optind;d->__nextchar = NULL;d->__posixly_correct = posixly_correct| !!_tgetenv(_T("POSIXLY_CORRECT"));if (optstring[0] == _T('-')){d->__ordering = RETURN_IN_ORDER;++optstring;}else if (optstring[0] == _T('+')){d->__ordering = REQUIRE_ORDER;++optstring;}else if (d->__posixly_correct){d->__ordering = REQUIRE_ORDER;}else{d->__ordering = PERMUTE;}return optstring;
}int _getopt_internal_r(int argc, TCHAR* const* argv, const TCHAR* optstring, const struct option* longopts, int* longind, int long_only, struct _getopt_data* d, int posixly_correct)
{int print_errors = d->opterr;if (argc < 1){return -1;}d->optarg = NULL;if (d->optind == 0 || !d->__initialized){if (d->optind == 0){d->optind = 1;}optstring = _getopt_initialize(optstring, d, posixly_correct);d->__initialized = 1;}else if (optstring[0] == _T('-') || optstring[0] == _T('+')){optstring++;}if (optstring[0] == _T(':')){print_errors = 0;}if (d->__nextchar == NULL || *d->__nextchar == _T('\0')){if (d->__last_nonopt > d->optind){d->__last_nonopt = d->optind;}if (d->__first_nonopt > d->optind){d->__first_nonopt = d->optind;}if (d->__ordering == PERMUTE){if (d->__first_nonopt != d->__last_nonopt&& d->__last_nonopt != d->optind){exchange((TCHAR**)argv, d);}else if (d->__last_nonopt != d->optind){d->__first_nonopt = d->optind;}while (d->optind< argc&& (argv[d->optind][0] != _T('-')|| argv[d->optind][1] == _T('\0'))){d->optind++;}d->__last_nonopt = d->optind;}if (d->optind != argc && !_tcscmp(argv[d->optind], _T("--"))){d->optind++;if (d->__first_nonopt != d->__last_nonopt&& d->__last_nonopt != d->optind){exchange((TCHAR**)argv, d);}else if (d->__first_nonopt == d->__last_nonopt){d->__first_nonopt = d->optind;}d->__last_nonopt = argc;d->optind = argc;}if (d->optind == argc){if (d->__first_nonopt != d->__last_nonopt){d->optind = d->__first_nonopt;}return -1;}if ((argv[d->optind][0] != _T('-')|| argv[d->optind][1] == _T('\0'))){if (d->__ordering == REQUIRE_ORDER){return -1;}d->optarg = argv[d->optind++];return 1;}d->__nextchar = (argv[d->optind]+ 1 + (longopts != NULL&& argv[d->optind][1] == _T('-')));}if (longopts != NULL&& (argv[d->optind][1] == _T('-')|| (long_only && (argv[d->optind][2]|| !_tcschr(optstring, argv[d->optind][1]))))){TCHAR* nameend;const struct option* p;const struct option* pfound = NULL;int exact = 0;int ambig = 0;int indfound = -1;int option_index;for (nameend = d->__nextchar;*nameend && *nameend != _T('=');nameend++);for (p = longopts, option_index = 0; p->name; p++, option_index++){if (!_tcsncmp(p->name, d->__nextchar, nameend - d->__nextchar)){if ((unsigned int)(nameend - d->__nextchar)== (unsigned int)_tcslen(p->name)){pfound = p;indfound = option_index;exact = 1;break;}else if (pfound == NULL){pfound = p;indfound = option_index;}else if (long_only|| pfound->has_arg != p->has_arg|| pfound->flag != p->flag|| pfound->val != p->val){ambig = 1;}}}if (ambig && !exact){if (print_errors){_ftprintf(stderr, _T("%s: option '%s' is ambiguous\n"), argv[0], argv[d->optind]);}d->__nextchar += _tcslen(d->__nextchar);d->optind++;d->optopt = 0;return _T('?');}if (pfound != NULL){option_index = indfound;d->optind++;if (*nameend){if (pfound->has_arg){d->optarg = nameend + 1;}else{if (print_errors){if (argv[d->optind - 1][1] == _T('-')){_ftprintf(stderr, _T("%s: option '--%s' doesn't allow ")_T("an argument\n"), argv[0], pfound->name);}else{_ftprintf(stderr, _T("%s: option '%c%s' doesn't allow ")_T("an argument\n"), argv[0], argv[d->optind - 1][0], pfound->name);}}d->__nextchar += _tcslen(d->__nextchar);d->optopt = pfound->val;return _T('?');}}else if (pfound->has_arg == 1){if (d->optind < argc){d->optarg = argv[d->optind++];}else{if (print_errors){_ftprintf(stderr, _T("%s: option '--%s' requires an ")_T("argument\n"), argv[0], pfound->name);}d->__nextchar += _tcslen(d->__nextchar);d->optopt = pfound->val;return optstring[0] == _T(':') ? _T(':') : _T('?');}}d->__nextchar += _tcslen(d->__nextchar);if (longind != NULL){*longind = option_index;}if (pfound->flag){*(pfound->flag) = pfound->val;return 0;}return pfound->val;}if (!long_only|| argv[d->optind][1]== _T('-')|| _tcschr(optstring, *d->__nextchar)== NULL){if (print_errors){if (argv[d->optind][1] == _T('-')){/* --option */_ftprintf(stderr, _T("%s: unrecognized option '--%s'\n"), argv[0], d->__nextchar);}else{/* +option or -option */_ftprintf(stderr, _T("%s: unrecognized option '%c%s'\n"), argv[0], argv[d->optind][0], d->__nextchar);}}d->__nextchar = (TCHAR*)_T("");d->optind++;d->optopt = 0;return _T('?');}}{TCHAR c = *d->__nextchar++;TCHAR* temp = (TCHAR*)_tcschr(optstring, c);if (*d->__nextchar == _T('\0')){++d->optind;}if (temp == NULL || c == _T(':') || c == _T(';')){if (print_errors){_ftprintf(stderr, _T("%s: invalid option -- '%c'\n"), argv[0], c);}d->optopt = c;return _T('?');}if (temp[0] == _T('W') && temp[1] == _T(';')){TCHAR* nameend;const struct option* p;const struct option* pfound = NULL;int exact = 0;int ambig = 0;int indfound = 0;int option_index;if (*d->__nextchar != _T('\0')){d->optarg = d->__nextchar;d->optind++;}else if (d->optind == argc){if (print_errors){_ftprintf(stderr, _T("%s: option requires an argument -- '%c'\n"), argv[0], c);}d->optopt = c;if (optstring[0] == _T(':')){c = _T(':');}else{c = _T('?');}return c;}else{d->optarg = argv[d->optind++];}for (d->__nextchar = nameend = d->optarg;*nameend && *nameend != _T('=');nameend++);for (p = longopts, option_index = 0;p->name;p++, option_index++){if (!_tcsncmp(p->name, d->__nextchar, nameend - d->__nextchar)){if ((unsigned int)(nameend - d->__nextchar)== _tcslen(p->name)){pfound = p;indfound = option_index;exact = 1;break;}else if (pfound == NULL){pfound = p;indfound = option_index;}else if (long_only|| pfound->has_arg != p->has_arg|| pfound->flag != p->flag|| pfound->val != p->val){ambig = 1;}}}if (ambig && !exact){if (print_errors){_ftprintf(stderr, _T("%s: option '-W %s' is ambiguous\n"), argv[0], d->optarg);}d->__nextchar += _tcslen(d->__nextchar);d->optind++;return _T('?');}if (pfound != NULL){option_index = indfound;if (*nameend){if (pfound->has_arg){d->optarg = nameend + 1;}else{if (print_errors){_ftprintf(stderr, _T("%s: option '-W %s' doesn't allow ")_T("an argument\n"), argv[0], pfound->name);}d->__nextchar += _tcslen(d->__nextchar);return _T('?');}}else if (pfound->has_arg == 1){if (d->optind < argc){d->optarg = argv[d->optind++];}else{if (print_errors){_ftprintf(stderr, _T("%s: option '-W %s' requires an ")_T("argument\n"), argv[0], pfound->name);}d->__nextchar += _tcslen(d->__nextchar);return optstring[0] == _T(':') ? _T(':') : _T('?');}}else{d->optarg = NULL;}d->__nextchar += _tcslen(d->__nextchar);if (longind != NULL){*longind = option_index;}if (pfound->flag){*(pfound->flag) = pfound->val;return 0;}return pfound->val;}d->__nextchar = NULL;return _T('W');}if (temp[1] == _T(':')){if (temp[2] == _T(':')){if (*d->__nextchar != _T('\0')){d->optarg = d->__nextchar;d->optind++;}else{d->optarg = NULL;}d->__nextchar = NULL;}else{if (*d->__nextchar != _T('\0')){d->optarg = d->__nextchar;d->optind++;}else if (d->optind == argc){if (print_errors){_ftprintf(stderr, _T("%s: option requires an ")_T("argument -- '%c'\n"), argv[0], c);}d->optopt = c;if (optstring[0] == _T(':')){c = _T(':');}else{c = _T('?');}}else{d->optarg = argv[d->optind++];}d->__nextchar = NULL;}}return c;}
}int _getopt_internal(int argc, TCHAR* const* argv, const TCHAR* optstring, const struct option* longopts, int* longind, int long_only, int posixly_correct)
{int result;getopt_data.optind = optind;getopt_data.opterr = opterr;result = _getopt_internal_r(argc, argv, optstring, longopts, longind, long_only, &getopt_data, posixly_correct);optind = getopt_data.optind;optarg = getopt_data.optarg;optopt = getopt_data.optopt;return result;
}int getopt(int argc, TCHAR* const* argv, const TCHAR* optstring) _GETOPT_THROW
{return _getopt_internal(argc, argv, optstring, (const struct option*)0, (int*)0, 0, 0);
}int getopt_long(int argc, TCHAR* const* argv, const TCHAR* options, const struct option* long_options, int* opt_index) _GETOPT_THROW
{return _getopt_internal(argc, argv, options, long_options, opt_index, 0, 0);
}int _getopt_long_r(int argc, TCHAR* const* argv, const TCHAR* options, const struct option* long_options, int* opt_index, struct _getopt_data* d)
{return _getopt_internal_r(argc, argv, options, long_options, opt_index, 0, d, 0);
}int getopt_long_only(int argc, TCHAR* const* argv, const TCHAR* options, const struct option* long_options, int* opt_index) _GETOPT_THROW
{return _getopt_internal(argc, argv, options, long_options, opt_index, 1, 0);
}int _getopt_long_only_r(int argc, TCHAR* const* argv, const TCHAR* options, const struct option* long_options, int* opt_index, struct _getopt_data* d)
{return _getopt_internal_r(argc, argv, options, long_options, opt_index, 1, d, 0);
}void getopt_reset()
{optarg = NULL;optind = 1;opterr = 1;optopt = _T('?');//getopt_data.optind = 0;getopt_data.opterr = 0;getopt_data.optopt = 0;getopt_data.optarg = NULL;getopt_data.__initialized = 0;getopt_data.__nextchar = NULL;getopt_data.__ordering = 0;getopt_data.__posixly_correct = 0;getopt_data.__first_nonopt = 0;getopt_data.__last_nonopt = 0;
}
做完以上步骤后,就可以进行预测了。
注意:最好配置opencv4.8.1,其他版本的opencv可能会报如下错误:
相关文章:
9.7 visual studio 搭建yolov10的onnx的预测(c++)
1.环境配置 在进行onnx预测前,需要搭建的环境如下: 1.opencv环境的配置,可参考博客:9.2 c搭建opencv环境-CSDN博客 2.libtorch环境的配置,可参考博客:9.4 visualStudio 2022 配置 cuda 和 torch (c)-CSDN博客 3.cuda环境的配置…...
“飞的”点外卖,科技新潮流来袭
一、开篇引入 上个周末,阳光正好,我带着孩子去公园游玩。公园里绿草如茵,花朵绽放,孩子们在草地上嬉笑奔跑,好不快活。玩累了,我们便在草坪上的帐篷里休息。 就在这时,天空中突然传来一阵嗡嗡…...
kubernetes v1.29.XX版本HPA、KPA、VPA并压力测试
序言: 在大型电商、购物、直播活动期间,对于火爆流量的激增,如何保障业务稳定并且做到资源不浪费,自动回收。 场景:kubernetes 原生容器化承载业务流量(非云环境) 方案:kubernetes自…...
java使用poi-tl自定义word模板导出
文章目录 概要整体架构流程创建word模板核心代码导出结果 概要 在软件开发领域,自定义Word模板的使用是导出格式化数据的一种常见做法。poi-tl(Apache POI Template Language)作为一款基于广受认可的Apache POI库的Word模板引擎,…...
云手机技术怎么实现的?
前言 随着亚矩阵云手机在跨境电商、海外社媒矩阵搭建、出海运营、海外广告投放、国内新媒体矩阵运营、品牌应用矩阵运营等领域内的普及和使用,云手机的理念已经被越来越多人所接受和认同。今天我们就一起来浅析一下,到底云手机的技术是怎么实现的&#…...
本地部署Web-Check网站检测与分析利器并实现远程访问实时监测
文章目录 前言1.关于Web-Check2.功能特点3.安装Docker4.创建并启动Web-Check容器5.本地访问测试6.公网远程访问本地Web-Check7.内网穿透工具安装8.创建远程连接公网地址9.使用固定公网地址远程访问 前言 本文我们将详细介绍如何在Ubuntu系统上使用Docker部署Web-Check…...
简洁明快git入门及github实践教程
简洁明快git入门及github快速入门实践教程 前言git知识概要:一:什么是 Git?二:安装 Git三:配置 Git配置git的用户名和邮箱地址创建仓库 四:Git实践五:远程仓库操作(基于git命令使用G…...
doris:本地文件导入
Doris 提供多种方式从本地数据导入: Stream Load Stream Load 是通过 HTTP 协议将本地文件或数据流导入到 Doris 中。Stream Load 是一个同步导入方式,执行导入后返回导入结果,可以通过请求的返回判断导入是否成功。支持导入 CSV、JSON、Pa…...
【网络安全】FortiOS Authentication bypass in Node.js websocket module
文章目录 漏洞说明严重等级影响的产品和解决措施推荐阅读 漏洞说明 FortiOS存在一个使用替代路径或者信道进行身份验证绕过漏洞,可能允许未经身份验证的远程攻击者透过向Node.js WebSocket模块发送特别设计的请求,可能获得超级管理员权限。 Fortinet 官…...
原型与原型链
建议大家看的时候手动画图!!!这点很重要!!! 原型链在结构上很像链表,每个对象中都保存着一个地址,指向当前对象的原型,可以层层向上查找,起到继承的效果。 …...
TIM定时中断
TIM定时中断 文章目录 TIM定时中断1.TIM定时器1.1定时器的定义1.1.1基本定时器1.1.2通用定时器1.1.3高级定时器 2.计数器有预装时序3.定时器结构及涉及的函数解析3.1定时中断基本结构3.2实现步骤3.3TIM本小节的库函数解释说明3.4计数器计数频率和计数器溢出频率 4.定时器定时中…...
Windows 上的 MySQL 8.4.3 和 WSL(Ubuntu)的 MySQL 8.0.40 之间配置 主从同步
在 Windows 上的 MySQL 8.4.3 和 WSL(Ubuntu)的 MySQL 8.0.40 之间配置 主从同步(Master-Slave Replication) 的过程略有不同,因为两者的 MySQL 版本和环境存在差异。以下是详细步骤,帮助你完成跨平台的主从…...
中职网络建设与运维ansible服务
ansible服务 填写hosts指定主机范围和控制节点后创建一个脚本,可以利用简化脚本 1. 在linux1上安装系统自带的ansible-core,作为ansible控制节点,linux2-linux7作为ansible的受控节点 Linux1 Linux1-7 Yum install ansible-core -y Vi /etc/ansible/hosts 添加…...
jmeter事务控制器-勾选Generate Parent Sample
1、打开jmeter工具,添加线程组,添加逻辑控制器-事务控制器 2、在事务控制器,勾选Generate parent sample:生成父样本;说明勾选后,事务控制器会作为父节点,其下面的请求作为子节点 3、执行&#…...
win32汇编环境,窗口程序中对多行编辑框的操作
;运行效果 ;win32汇编环境,窗口程序中对多行编辑框的操作 ;比如生成多行编辑框,显示文本、获取文本、设置滚动条、捕获超出文本长度消息等。 ;直接抄进RadAsm可编译运行。重点部分加备注。 ;下面为asm文件 ;>>>>>>>>>>>>>&g…...
Java反射、静态代理、动态代理
往期推荐 Java io模型-CSDN博客 如何设计一个能根据任务优先级来执行的线程池-CSDN博客 Web实时消息推送的几种方案_setmessageinnerhtml is not defined-CSDN博客 yum、dnf、apt包管理工具-CSDN博客 概述 反射机制是在运行状态中,对于任意一个类,都能够…...
在Android 15的设备上关闭edge-to-edge功能
Android 15在开发上有很多更新,当APP的targetSdk设置为35,且设备系统为Android 15时,APP会自动启动edge-to-edge功能。虽然可视面积变大了,但界面布局也会受影响。 如果要强制关闭edge-to-edge功能。可以在style.xml中将windowOp…...
GIS大模型:交通领域方面的应用
文章目录 1. 实时交通流量预测:2. 动态信号灯控制:3. 交通流模式识别:4. 交通事故预警:5. 路径推荐与导航优化:6. 长期交通规划:7. 事件影响分析:8. 智能停车管理: 大模型在交通流量…...
Vi 和 Vim 文本编辑器使用指南
目录 基本模式 查找与替换 Vi(Visual)和 Vim(Vi Improved)是两款在 Unix/Linux 系统中广泛使用的文本编辑器。Vim 是 Vi 的改进版本,继承了 Vi 的核心功能,并增加了许多新特性,如语法高亮、多…...
redis acl
redis acl redis 安全访问控制 官网 本文基于redis 6.2.17 版本进行测试验证 使用方式 redis 使用 acl 的配置有2种方式:使用 redis.conf 文件配置,和在 redis.conf 文件中配置 aclfile path 指定外部 aclfile 文件路径 使用 redis.conf 文件配置 …...
Linux Centos 安装Jenkins到服务
一、前言 假设你已经下载了jenkins.war 安装了对应的jdk,下面我们来安装jenkins,以服务的形式安装。 二、安装 1)将jenkins.war拷贝到合适的位置,我的位置 /u01/jenkins/ ,位置你自己选。 2)创建系统用户…...
WINFORM - DevExpress -> gridcontrol ---->控件(ColumnEdit控件)
ImageComboBoxEdit--带图片的下拉菜单 DevExpress:带图片的下拉菜单ImageComboBoxEdit_weixin_34313182的博客-CSDN博客 ImageEdit--图片按钮 DevExpress控件中的gridcontrol表格控件,如何在属性中设置某一列显示为图片(图片按钮ÿ…...
工作中redis常用的5种场景
在日常开发工作中,Redis作为一款高性能的内存数据库,凭借其强大的功能特性和卓越的性能表现,已经成为了许多项目中不可或缺的组件。本文将详细介绍Redis在实际工作中最常见的5种应用场景,并附上具体的代码实现。 1. 缓存热点数据…...
得物App利用技术赋能,打造潮流消费“新玩法”
如今,技术的力量正在以前所未有的方式重塑着我们的消费体验。从线上购物到虚拟现实,技术的角色越来越重要,它不仅是推动商业发展的引擎,更是满足年轻消费者多元化、个性化需求的关键。得物App作为一个年轻人喜爱的潮流消费平台&am…...
tomcat状态一直是Exited (1)
docker run -di -p 80:8080 --nametomcat001 你的仓库地址/tomcat:9执行此命令后tomcat一直是Exited(1)状态 解决办法: 用以下命令创建运行 docker run -it --name tomcat001 -p 80:8080 -d 你的仓库地址/tomcat:9 /bin/bash最终结果 tomcat成功启动...
015: 深度学习之正向传播和反向传播
本文为合集收录,欢迎查看合集/专栏链接进行全部合集的系统学习。 合集完整版请参考这里。 上一节介绍了训练和推理的概念,这一节接着训练和推理的概念讲一下,神经网络的正向传播和反向传播。 正反向传播 其实单看正向传播和反向传播这两个…...
ubuntu下安装编译cmake,grpc与protobuf
文章目录 install cmakeinstall grpcinstall protobuf注 install cmake sudo apt-get install -y g make libssl-devcd third_party/cmake-3.17.2./configuresudo make && make installcmake --version install grpc $ sudo apt-get install -y build-essential auto…...
如何在Mac上使用Brew更新Cursor应用程序
在这篇博文中,我们将介绍如何在Mac上更新Cursor应用程序,以及一些相关的使用技巧和功能。 什么是Cursor? Cursor是一款强大的工具,旨在帮助用户更好地编写、编辑和讨论代码。它结合了AI技术,使得编程过程更加高效和便…...
玩转大语言模型——使用graphRAG+Ollama构建知识图谱
系列文章目录 玩转大语言模型——ollama导入huggingface下载的模型 玩转大语言模型——langchain调用ollama视觉多模态语言模型 文章目录 系列文章目录前言下载和安装用下载项目的方式下载并安装用pip方式下载并安装 生成知识图谱初始化文件夹修改模型配置修改知识库生成配置创…...
LevelDB 源码阅读:如何优雅地合并写入和删除操作
LevelDB 支持写入单个键值对和批量写入多个键值对,这两种操作的处理流程本质上是相同的,都会被封装进一个 WriteBatch 对象中,这样就可以提高写操作的效率。 在 LevelDB 中,WriteBatch 是通过一个简单的数据结构实现的࿰…...
Vue.js 组件的生命周期钩子
Vue.js 组件的生命周期钩子 在 Vue.js 中,组件的生命周期是指组件从创建到销毁的整个过程。在这个过程中,Vue 提供了多个 生命周期钩子,让我们可以在不同的阶段执行特定的逻辑。掌握这些钩子非常重要,它们能帮助我们灵活控制组件…...
VD:生成a2l文件
目录 前言Simulink合并地址 ASAP2 editor 前言 我之前的方法都是通过Simulink模型生成代码的过程中顺便就把a2l文件生成出来了,这时的a2l文件还没有地址,所以紧接着会去通过elf文件更新地址,一直以为这是固定的流程和方法,今天无…...
【C++ 类和对象 进阶篇】—— 逻辑森林的灵动精灵,舞动类与对象的奇幻圆舞曲
欢迎来到ZyyOvO的博客✨,一个关于探索技术的角落,记录学习的点滴📖,分享实用的技巧🛠️,偶尔还有一些奇思妙想💡 本文由ZyyOvO原创✍️,感谢支持❤️!请尊重原创…...
【Hive】海量数据存储利器之Hive库原理初探
文章目录 一、背景二、数据仓库2.1 数据仓库概念2.2 数据仓库分层架构2.2.1 数仓分层思想和标准2.2.2 阿里巴巴数仓3层架构2.2.3 ETL和ELT2.2.4 为什么要分层 2.3 数据仓库特征2.3.1 面向主题性2.3.2 集成性2.3.3 非易失性2.3.4 时变性 三、hive库3.1 hive概述3.2 hive架构3.2.…...
前端web
学习笔记: 基本属性 color: 设置文本的颜色。代码:color: red;background-color: 设置元素的背景颜色。background-color: blue;font-size: 设置文本的大小font-size: 16px;font-family: 设置文本的字体font-family: Arial, sans-serif;text-align: 设…...
如何通过 Nginx 实现 CouchDB 集群的负载均衡并监控请求分发
在现代分布式系统中,负载均衡是确保高可用性和性能的关键组件。CouchDB 是一个强大的分布式数据库,而 Nginx 是一个高性能的反向代理和负载均衡器。本文将详细介绍如何通过 Nginx 实现 CouchDB 集群的负载均衡,并监控请求被分发到哪一台 Couc…...
基于代理的RAG实现-Agentic RAG
基于代理的RAG实现-Agentic RAG Agentic RAG 体系结构中,不再被动地响应查询请求,而是主动地分析初步检索到的信息,并基于对任务复杂性的评估,战略性地选择最为合适的工具和方法进行进一步的数据检索和处理。这种多步骤推理和决策…...
ZIP怎么加密?
想要禁止他人随意解压zip压缩包,我们可以加密zip文件,那么zip设置密码的方法有哪些?今天分享三个加密方法 工具:WinRAR,这里需要注意,WinRAR默认压缩格式是.rar,所以我们想要加密zip文件&#…...
森林网络部署,工业4G路由器实现林区组网远程监控
在广袤无垠的林区,每一片树叶的摇曳、每一丝空气的流动,都关乎着生态的平衡与安宁。林区监控正以强大的力量,为这片绿色家园筑起一道坚固的防线。 工业 4G 路由器作为林区监控组网的守护者,凭借着卓越的通讯性能,突破…...
汽车网络信息安全-ISO/SAE 21434解析(上)
目录 概述 第四章-概述 1. 研究对象和范围 2. 风险管理 第五章-组织级网络安全管理 1. 网络安全治理(cybersecurity governance) 2. 网络安全文化(cybersecurity culture) 3. 信息共享(Information Sharing) 4. 管理体系…...
一个方法被多个线程同时调用,确保同样参数的调用只能有一个线程执行,不同参数的调用则可以多个线程同时执行
我们知道通过lock一个固定静态object给代码段加同步锁,可以让多个线程的同时调用以同步执行,因此可以利用字典来给不同参数分配不同的静态对象,方法中不同的参数调用锁住各自不同的静态对象即可实现不同参数不加锁,相同参数才加锁…...
军用通信设备通用规范GJB367A-2001试验
军用通信设备通用规范GJB367A-2001通常适用于地面、舰载和机载设备的型式试验验收标准,地面设备分为便携式设备、固定式设备和车载式设备。 GJB367A-2001军用通信设备通用规范规定了军用通信设备或系统的通用要求和检验验收规则以及试验方法。 GJB367A-2001军用通…...
AWS云计算概览(自用留存)
目录 一、云概念概览 (1)云服务模型 (2)云计算6大优势 (3)web服务 (4)AWS云采用框架(AWS CAF) 二、云经济学 & 账单 (1)定…...
AWS云平台上生成式AI通过项目文档内容分析获知项目风险
要在AWS云平台上设计和实施高性能系统,同时使用生成式人工智能识别项目风险来分析项目文档内容。 利用生成式AI分析项目文档并协助风险管理可以显著提高识别和解决AWS上托管的复杂项目中的风险的速度、准确性和效率。通过将AI的功能与传统的项目管理最佳实践相结合&…...
搜广推日常实习面经一
写在前面:除了校招的面经,实习的面经我也会更新,毕竟俺后续可能还要找一段实习。从八股来看,实习的八股更加的八股一点。和校招的面经有点不一样,所以还是可以学习了解一下。总之一句话:面向工作学习&#…...
分布式CAP理论介绍
分布式CAP理论是分布式系统设计中的一个核心概念,由加州大学伯克利分校的Eric Brewer教授在2000年的ACM研讨会上首次提出,随后在2002年由Seth Gilbert和Nancy Lynch从理论上证明。以下是对分布式CAP理论的详细剖析: 文章目录 一、CAP理论的基本概念二、CAP理论的取舍策略三、…...
网格参数化,Mesh parameterization processing
目录 前言1.Barycentric mappingMapping 步骤实例 2.Laplace mapping3.Laplace improvement4.Coding 前言 多边形网格的类型多种多样。本文所实现的网格多边形参数化是指三角多边形。 不同的表示被用来编码三维物体的几何形状。选择一种表示方式取决于在上游的获取过程和下游的…...
路由环路的产生原因与解决方法(1)
路由环路 路由环路就是数据包不断在这个网络传输,始终到达不了目的地,导致掉线或者网络瘫痪。 TTL (生存时间):数据包每经过一个路由器的转发,其数值减1,当一个数据包的TTL值为0是,路…...
编程工具箱(免费,离线可用)
https://www.yuque.com/huanmin-4bkaa/ii1hx1?# 《工具箱》 常用的大部分工具都有, 比如mysql可视化 redis可视化, json编辑器, 加解密等(免费,离线可用) 后续也会慢慢的集成...
从Arrays源码学习定义工具类
背景 在日常编码中,一个比较好的实践是:我们把一些业务无关的、可复用的一些通用逻辑,封装成工具类、甚至jar包。这样一方面方便通用代码抽取、代码复用,同时也隔离经常变动的业务代码和不变的通用代码。那如何定义好一个工具类呢…...