image_framework_ymj/image_framework/algorithm/Libapi2d.cpp
2024-12-06 16:25:16 +08:00

891 lines
26 KiB
C++
Executable File
Raw Permalink Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

#include "Libapi2d.h"
#include "Libapi3d.h"
#include "IniHelper.h"
#include "Yolov5For928.h"
#include "Log.h"
#include "CameraHelper.h"
#include <vector>
#include <cmath>
#include <algorithm>
#include <atomic>
static std::atomic_flag atomic_lock = ATOMIC_FLAG_INIT;
static cv::Mat Sobel(cv::Mat& mat, double alpha, double beta)
{
cv::Mat tmpImage;
cv::Mat tmpImageX;
cv::Mat tmpImageY;
if (mat.type() != CV_8UC1)
cvtColor(mat, tmpImage, cv::COLOR_BGR2GRAY);
else
tmpImage = mat;
cv::Sobel(tmpImage, tmpImageX, CV_64F, 1, 0, 1);
cv::Sobel(tmpImage, tmpImageY, CV_64F, 0, 1, 1);
cv::convertScaleAbs(tmpImageX, tmpImageX);
cv::convertScaleAbs(tmpImageY, tmpImageY);
cv::bitwise_or(tmpImageX, tmpImageY, tmpImage);
//cv::addWeighted(tmpImageX, alpha, tmpImageY, beta, 0, tmpImage);
return tmpImage;
}
static cv::Mat CopyToEx(cv::Mat& src, cv::Mat& mask, cv::Mat& tar, int x_offset, int y_offset)
{
cv::Mat dst;
tar.copyTo(dst);
cv::Rect roi_rect = cv::Rect(x_offset, y_offset, src.cols, src.rows);
src.copyTo(dst(roi_rect), mask);
return dst;
}
static cv::Rect OutSquare(const std::vector<cv::Point>& vp)
{
int x1 = 9999;
int y1 = 9999;
int x2 = 0;
int y2 = 0;
for (int i = 0; i < vp.size(); i++)
{
if (vp[i].x < x1)
x1 = vp[i].x;
if (vp[i].x > x2)
x2 = vp[i].x;
if (vp[i].y < y1)
y1 = vp[i].y;
if (vp[i].y > y2)
y2 = vp[i].y;
}
return cv::Rect(x1, y1, x2 - x1, y2 - y1);
}
static std::vector<cv::Point> Offset(const std::vector<cv::Point>& vp, int x, int y)
{
std::vector<cv::Point> vpTar;
for (int i = 0; i < vp.size(); i++)
{
cv::Point p(vp[i].x + x, vp[i].y + y);
vpTar.push_back(p);
}
return vpTar;
}
static cv::Mat GetRoi(cv::Mat& mat, const std::vector<cv::Point>& vp)
{
cv::Mat dst;
cv::Rect rt = OutSquare(vp);
cv::Mat src = mat(rt);
cv::Mat roi = cv::Mat::zeros(src.size(), CV_8U);
std::vector<std::vector<cv::Point>> contour;
std::vector<cv::Point> vpOffset = Offset(vp, -rt.x, -rt.y);
//contour.push_back(vpOffset);
//cv::drawContours(roi, contour, 0, cv::Scalar::all(255), -1);
src.copyTo(dst, roi);
return dst;
}
static std::vector<std::vector<float>> SortLines(std::vector<std::vector<float>>& lines4)
{
float Ax = 1, Bx = 0, Cx = 0;
float Ay = 0, By = 1, Cy = 0;
float min_dx = 10000, max_dx = 0, min_dy = 10000, max_dy = 0;
float min_dx_idx = -1, max_dx_idx = -1, min_dy_idx = -1, max_dy_idx = -1;
float cx = 0.0, cy = 0.0;
float dx = 0.0, dy = 0.0;
std::vector<std::vector<float>> sort_lines4(4); // 上、右、下、左四条线
for (int i = 0; i < sort_lines4.size(); ++i) {
sort_lines4[i].resize(6); // x1, y1, x2, y2, theta, r
}
for (int i = 0; i < lines4.size(); ++i)
{
cx = (lines4[i][2] + lines4[i][0]) / 2, cy = (lines4[i][3] + lines4[i][1]) / 2;
dx = abs(Ay * cx + By * cy + Cy) / sqrt(pow(Ay, 2) + pow(By, 2));
dy = abs(Ax * cx + Bx * cy + Cx) / sqrt(pow(Ax, 2) + pow(Bx, 2));
if (dx < min_dx)
{
min_dx = dx;
min_dx_idx = i;
}
if (dx > max_dx)
{
max_dx = dx;
max_dx_idx = i;
}
if (dy < min_dy)
{
min_dy = dy;
min_dy_idx = i;
}
if (dy > max_dy)
{
max_dy = dy;
max_dy_idx = i;
}
}
sort_lines4[0] = lines4[min_dx_idx];
sort_lines4[1] = lines4[max_dy_idx];
sort_lines4[2] = lines4[max_dx_idx];
sort_lines4[3] = lines4[min_dy_idx];
return sort_lines4;
}
static std::vector<std::vector<float>> GetIntersections(cv::Mat& im, std::vector<std::vector<float>>& sort_lines4)
{
float x1, y1, x2, y2;
float x1_next, y1_next, x2_next, y2_next;
float m, m_next;
float A, B, C, A_next, B_next, C_next;
float x_p, y_p;
std::vector<std::vector<float>> sort_conners4(4);
for (int i = 0; i < sort_conners4.size(); ++i) {
sort_conners4[i].resize(2); // x1, y1, x2, y2, theta, r
}
for (int i = 0; i < 4; ++i)
{
std::vector<float> line = sort_lines4[i];
x1 = line[0], y1 = line[1], x2 = line[2], y2 = line[3];
std::vector<float> line_next = sort_lines4[(i + 1) % 4];
x1_next = line_next[0], y1_next = line_next[1], x2_next = line_next[2], y2_next = line_next[3];
// 检查是否为垂直线
if (x2 == x1)
x2 += 0.001f;
m = (y2 - y1) / (x2 - x1);
A = m, B = -1, C = y1 - m * x1;
if (x2_next == x1_next)
x2_next += 0.001f;
m_next = (y2_next - y1_next) / (x2_next - x1_next);
A_next = m_next, B_next = -1, C_next = y1_next - m_next * x1_next;
// 检查是否平行(理论上不会)
if (A * B_next == A_next * B)
continue;
// 计算交点
x_p = (B_next * C - B * C_next) / (A_next * B - A * B_next);
y_p = (A * x_p + C) / -B;
// 确保交点在图像范围内
if (0 <= int(x_p) && int(x_p) <= im.cols && 0 <= int(y_p) && int(y_p) < im.rows)
sort_conners4[(i + 1) % 4] = { x_p, y_p };
}
return sort_conners4;
}
static int GetSortConnersAndEdgess(
cv::Mat& src,
std::vector<std::vector<float>>& sort_lines4,
std::vector<std::vector<float>>& sort_conners4,
int thresh_hold = 80,
int minLineLength = 400)
{
int ret = CAL_OK;
if (G(conners_detect_fake) == "true")
{
sort_conners4.push_back({ 465,436 });
sort_conners4.push_back({ 615, 260 });
sort_conners4.push_back({ 783, 405 });
sort_conners4.push_back({ 630, 581 });
for (int i = 0; i < sort_conners4.size(); ++i)
sort_conners4[i] = {
float(sort_conners4[i][0] * 9344 / 934 / 1.5),
float(sort_conners4[i][1] * 7000 / 700 / 1.5) };
return ret;
}
std::chrono::high_resolution_clock::time_point m_start = std::chrono::high_resolution_clock::now();
cv::Mat gray;
cv::cvtColor(src, gray, cv::COLOR_BGR2GRAY);
cv::GaussianBlur(gray, gray, cv::Size(5, 5), 0, 0);
gray = Sobel(gray, 0.0, 0.0);
cv::Mat mask;
cv::threshold(gray, mask, 5, 255, cv::THRESH_BINARY);
float PI = 3.1415926f;
std::vector<cv::Vec4f> lines;
cv::HoughLinesP(mask, lines, 1, PI / 180, thresh_hold, minLineLength, G(edge_max_line_gap));
float x1, y1, x2, y2, theta, r, m;
float A, B, C;
bool bRepeat = false;
float cx, cy, d, r_max;
std::vector<std::vector<float>> _line4; // 上、右、下、左四条线
//for (int i = 0; i < _line4.size(); ++i) {
// _line4[i].resize(6); // x1, y1, x2, y2, theta, r
//}
for (int l = 0; l < lines.size(); ++l)
{
cv::Vec4f _line = lines[l];
x1 = _line[0];
y1 = _line[1];
x2 = _line[2];
y2 = _line[3];
// 计算极坐标
if (x1 == x2)
x2 += 0.001f;
theta = atan((y2 - y1) / (x2 - x1)) * (180 / PI);
r = sqrt(pow((y2 - y1), 2) + pow((x2 - x1), 2));
// 计算直线的斜率(如果 1 ≠ 2),待优化
m = (y2 - y1) / (x2 - x1);
A = m;
B = -1;
C = y1 - m * x1;
// 查找符合条件的直线边缘
bRepeat = false;
for (int i = 0; i < _line4.size(); ++i)
{
if (abs(abs(theta) - abs(_line4[i][4])) < G(edge_min_angle_th)) // 33°
{
cx = (_line4[i][2] + _line4[i][0]) / 2;
cy = (_line4[i][3] + _line4[i][1]) / 2;
// 中点到目标线的垂线小于系数,选择 r 较长的作为边缘
d = abs(A * cx + B * cy + C) / sqrt(pow(A, 2) + pow(B, 2));
r_max = _line4[i][5] > r ? _line4[i][5] : r;
if (d < r_max / 3)
{
bRepeat = true;
if (r > _line4[i][5])
{
std::vector<float> v;
v.resize(6);
v = { x1, y1, x2, y2, theta, r };
_line4[i] = v;
}
}
}
}
if (!bRepeat)
{
std::vector<float> v;
v.resize(6);
v = { x1, y1, x2, y2, theta, r };
_line4.push_back(v);
}
}
Loger(LEVEL_INFOR, "_line4.size() < 4 is %d", _line4.size());
if (_line4.size() < 4)
{
return CAL_INNER_COONERS_ISNOT_4;
}
sort_lines4 = SortLines(_line4);
if (sort_lines4.size() != 4)
assert(0);
sort_conners4 = GetIntersections(src, sort_lines4);
if (sort_conners4.size() != 4)
assert(0);
std::chrono::high_resolution_clock::time_point m_end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsex = m_end - m_start;
std::cout << "Cal3D static GetSortConnersAndEdgess time is : " << elapsex.count() << std::endl;
return ret;
}
static std::vector<std::vector<float>> UpdateConners(std::vector<std::vector<float>>& sort_conners4)
{
std::vector<std::vector<float>> sort_conners4_new(4);
return sort_conners4_new;
}
static void delay_second(double second)
{
clock_t start_time;
start_time = clock();
for (; (clock() - start_time) < second * CLOCKS_PER_SEC;);
}
//广角摄像机和高清相机图像映射
int Cal2D::LibapiGet2D2DMapRect(cv::Mat& dst_im, bool fake, std::string fakeFilePath)
{
int ret = CAL_OK;
return ret;
}
//读取广角相机图像
int Cal2D::LibapiCapLQImage(cv::Mat& dst_im)
{
int ret = CAL_OK;
return ret;
}
//采集高清图像
static int camera_idxx = 0;
int Cal2D::LibapiCapHQImage(CMeasureInfo* measureInfoPtr, std::shared_ptr<cv::Mat>& dst_im)
{
int ret = CAL_OK;
if (G(camera_cap_fake) == "true")
{
if (G(fake_image_fpath) == "")
assert(0);
else
{
// 读取fake文件获取点云数据
ret = LibapiReadHQImage(G(fake_image_fpath), *dst_im);
if (ret != CAL_OK)
{
return ret;
}
if (dst_im->empty())
{
return CAL_READFILE_FAILED;
}
}
}
else
{
while (atomic_lock.test_and_set(std::memory_order_acquire)) {}
if (camera_idxx == 0)
{
//等待初始化
ret = CCameraHelper::Get()->Init();
Loger(LEVEL_INFOR, "CCameraHelper::Get()->Init() ret is %d", ret);
ret = CCameraHelper::Get()->Start(G(export_time) * 10000);
Loger(LEVEL_INFOR, "CCameraHelper::Get()->Start() ret is %d", ret);
camera_idxx = 1;
}
// cap camera
/*CCameraHelper::Get()->Start();*/
int times = 3;
while (true)
{
void* pData = (void*)(dst_im.get()->data);
int w, h;
ret = CCameraHelper::Get()->Cap((void**) & pData, w, h, 5000);
Loger(LEVEL_INFOR, "Capture Image times : %d", 3 - times +1);
if (ret == 0)
{
Loger(LEVEL_INFOR, "Capture Image sucess!");
break;
}
delay_second(1.5);
times--;
if (times == 0)
{
Loger(LEVEL_INFOR, "Capture Image faild!");
//CCameraHelper::Get()->Stop();
//CCameraHelper::Get()->Init();
//CCameraHelper::Get()->Start();
return CAL_2D_CAP_IMAGE_FAILED;
}
}
atomic_lock.clear(std::memory_order_release);
//CCameraHelper::Get()->Stop();
}
measureInfoPtr->SetIm(dst_im);
return ret;
}
//从文件读取高清图像
int Cal2D::LibapiReadHQImage(const std::string plyPath, cv::Mat& dst_im)
{
int ret = CAL_OK;
dst_im = cv::imread(plyPath, cv::IMREAD_UNCHANGED);
if (dst_im.empty())
{
Loger(LEVEL_INFOR, "dst_im.empty() dst_im : %s", plyPath.c_str());
return CAL_PRAMA_EMPUTY;
//assert(0);
}
Loger(LEVEL_INFOR, "Image read success: %s", plyPath.c_str());
return ret;
}
//保存高清图像
int Cal2D::LibapiCopyHQImage(cv::Mat& src_im, cv::Mat& dst_im)
{
int ret = CAL_OK;
return ret;
}
//图像去畸变
int Cal2D::LibapiUndistort(cv::Mat& src_im, cv::Mat& dst_im)
{
int ret = CAL_OK;
cv::Mat cameraMatrix = (cv::Mat_<double>(3, 3) << G(fx), 0, G(cx), 0, G(fy), G(cy), 0, 0, 1);
cv::Mat distCoeffs = (cv::Mat_<double>(1, 5) << G(k1), G(k2), G(p1), G(p2), G(k3));
cv::undistort(src_im, dst_im, cameraMatrix, distCoeffs);
return ret;
}
//持续采集广角图像(PYTHON)
//AI识别广角图像中的预埋件返回其ROI坐标
//待优化
static int idxx = 0;
int Cal2D::LibapiDetectObj(CMeasureInfo* measureInfo)
{
int ret = CAL_OK;
if (G(npu_fake) == "true")
{
std::shared_ptr<CSubRoi> subRoi1Ptr = std::make_shared<CSubRoi>();
// 待修改
subRoi1Ptr->mRoiPoints.push_back({ 465,436 });
subRoi1Ptr->mRoiPoints.push_back({ 615, 260 });
subRoi1Ptr->mRoiPoints.push_back({ 783, 405 });
subRoi1Ptr->mRoiPoints.push_back({ 630, 581 });
for (int i = 0; i < subRoi1Ptr->mInnerConners.size(); ++i)
subRoi1Ptr->mInnerConners[i] = {
float(subRoi1Ptr->mInnerConners[i].x * 9344 / 934 / 1.5),
float(subRoi1Ptr->mInnerConners[i].y * 7000 / 700 / 1.5) };
std::shared_ptr<CSubRoi> subRoi2Ptr = std::make_shared<CSubRoi>();
subRoi2Ptr->mRoiPoints.push_back({ 465,436 });
subRoi2Ptr->mRoiPoints.push_back({ 615, 260 });
subRoi2Ptr->mRoiPoints.push_back({ 783, 405 });
subRoi2Ptr->mRoiPoints.push_back({ 630, 581 });
for (int i = 0; i < subRoi2Ptr->mInnerConners.size(); ++i)
subRoi2Ptr->mInnerConners[i] = {
float(subRoi2Ptr->mInnerConners[i].x * 9344 / 934 / 1.5),
float(subRoi2Ptr->mInnerConners[i].y * 7000 / 700 / 1.5) };
std::shared_ptr<CSubRoi> subRoi3Ptr = std::make_shared<CSubRoi>();
subRoi3Ptr->mRoiPoints.push_back({ 465,436 });
subRoi3Ptr->mRoiPoints.push_back({ 615, 260 });
subRoi3Ptr->mRoiPoints.push_back({ 783, 405 });
subRoi3Ptr->mRoiPoints.push_back({ 630, 581 });
for (int i = 0; i < subRoi3Ptr->mInnerConners.size(); ++i)
subRoi3Ptr->mInnerConners[i] = {
float(subRoi3Ptr->mInnerConners[i].x * 9344 / 934 / 1.5),
float(subRoi3Ptr->mInnerConners[i].y * 7000 / 700 / 1.5) };
measureInfo->GetSubRois().push_back(subRoi1Ptr);
measureInfo->GetSubRois().push_back(subRoi2Ptr);
measureInfo->GetSubRois().push_back(subRoi3Ptr);
}
else
{
//////////////////////////////////////////////////////////////////////////////////////////
if (idxx == 0)
{
// 待优化
// init sig
ret = CYolov5For928::Get()->pfucLibapiSvpNpuHandleSig();
if (ret != RST_OK)
{
assert(0);
return ret;
}
// init acl
ret = CYolov5For928::Get()->pfucLibapiSvpNpuAclPrepareInit();
if (ret != RST_OK)
{
assert(0);
return ret;
}
// init model
const char* om_model_path = G(yolo_modol_path).c_str();
ret = CYolov5For928::Get()->pfucLibApiSvpNpuLoadModel(om_model_path, 0, false);
if (ret != RST_OK)
{
assert(0);
return ret;
}
idxx = 1;
}
//////////////////////////////////////////////////////////////////////////////////////////
cv::Mat* hq_im = measureInfo->GetIm().get();
// run yolo model to detect
ret = CYolov5For928::Get()->pfucLibApiSvpNpuLoadDataset(0, (void*)hq_im->data, hq_im->total() * hq_im->channels(),
hq_im->size().width , hq_im->size().height , CV_8UC3);
if (ret != CAL_OK)
{
Loger(LEVEL_INFOR, " CYolov5For928::Get()->pfucLibApiSvpNpuLoadDataset() has error %d" , ret);
Loger(LEVEL_INFOR, " w: %d h : %d ch : %d", hq_im->size().width, hq_im->size().height, hq_im->channels());
return CAL_NPU_LOAD_DATASET_FAILED;
}
Loger(LEVEL_INFOR, " CYolov5For928::Get()->pfucLibApiSvpNpuLoadDataset() success");
ret = CYolov5For928::Get()->pfucLibApiSvpNpuModelExecute(0);
if (ret != CAL_OK)
{
CYolov5For928::Get()->pfucLibApiSvpNpuUnloadDataset();
Loger(LEVEL_INFOR, " CYolov5For928::Get()->pfucLibApiSvpNpuModelExecute() has error %d", ret);
return CAL_NPU_MODEL_EXE_FAILED;
}
Loger(LEVEL_INFOR, " CYolov5For928::Get()->pfucLibApiSvpNpuModelExecute() success");
RetrunObject objs;
ret = CYolov5For928::Get()->pfucLibApiSvpNpuGetModelExecuteResult(0, &objs);
if (ret != CAL_OK)
{
CYolov5For928::Get()->pfucLibApiSvpNpuUnloadDataset();
Loger(LEVEL_INFOR, " CYolov5For928::Get()->pfucLibApiSvpNpuGetModelExecuteResult() has error %d", ret);
return CAL_NPU_GET_RESULT_FAILED;
}
Loger(LEVEL_INFOR, "pfucLibApiSvpNpuGetModelExecuteResult objs.count = %d", objs.count);
if (objs.count == 0)
{
Loger(LEVEL_INFOR, " pfucLibApiSvpNpuGetModelExecuteResult() objs.count == 0");
}
else if (objs.count > 128)
{
Loger(LEVEL_INFOR, " pfucLibApiSvpNpuGetModelExecuteResult() objs.count > 128");
}
else
{
for (int i = 0; i < objs.count; ++i)
{
std::cout << " label: " << G(yolo_label) << " - " << objs.objects[i].label
<< " prob: " << G(yolo_prob) << " - " << objs.objects[i].prob << std::endl;
if (objs.objects[i].label == (int)G(yolo_label) && objs.objects[i].prob > (float)G(yolo_prob))
{
std::shared_ptr<CSubRoi> subRoi1Ptr = std::make_shared<CSubRoi>();
//subRoi1Ptr->mRoiPoints.push_back({ (float)(objs.objects[i].x - 20), (float)(objs.objects[i].y + G(roi_y_offset) - 20) });
//subRoi1Ptr->mRoiPoints.push_back({ (float)(objs.objects[i].x + 20 + (float)objs.objects[i].width), (float)(objs.objects[i].y + G(roi_y_offset) - 20) });
//subRoi1Ptr->mRoiPoints.push_back({ (float)(objs.objects[i].x + 20 + (float)objs.objects[i].width), (float)(objs.objects[i].y + objs.objects[i].height + G(roi_y_offset) + 20) });
//subRoi1Ptr->mRoiPoints.push_back({ (float)(objs.objects[i].x - 20), (float)(objs.objects[i].y + objs.objects[i].height + G(roi_y_offset) + 20) });
subRoi1Ptr->mRoiPoints.push_back({ (float)(objs.objects[i].x - 2), (float)(objs.objects[i].y + G(roi_y_offset) - 2) });
subRoi1Ptr->mRoiPoints.push_back({ (float)(objs.objects[i].x + 2 + (float)objs.objects[i].width), (float)(objs.objects[i].y + G(roi_y_offset) - 2) });
subRoi1Ptr->mRoiPoints.push_back({ (float)(objs.objects[i].x + 2 + (float)objs.objects[i].width), (float)(objs.objects[i].y + objs.objects[i].height + G(roi_y_offset) + 2) });
subRoi1Ptr->mRoiPoints.push_back({ (float)(objs.objects[i].x - 2), (float)(objs.objects[i].y + objs.objects[i].height + G(roi_y_offset) + 2) });
for (int j = 0; j < 4; ++j)
{
if (subRoi1Ptr->mRoiPoints[j].x < 0)
subRoi1Ptr->mRoiPoints[j].x = 0;
if (subRoi1Ptr->mRoiPoints[j].x > hq_im->cols)
subRoi1Ptr->mRoiPoints[j].x = hq_im->cols;
if (subRoi1Ptr->mRoiPoints[j].y < 0)
subRoi1Ptr->mRoiPoints[j].y = 0;
if (subRoi1Ptr->mRoiPoints[j].y > hq_im->rows)
subRoi1Ptr->mRoiPoints[j].y = hq_im->rows;
}
measureInfo->GetSubRois().push_back(subRoi1Ptr);
if (measureInfo->GetSubRois().size() == 16)
{
Loger(LEVEL_INFOR, "[ERROR] pfucLibApiSvpNpuGetModelExecuteResult() objs.count > 16");
}
else
{
Loger(LEVEL_INFOR, "measureInfo->GetSubRois().size() is %d", measureInfo->GetSubRois().size());
}
}
}
}
ret = CYolov5For928::Get()->pfucLibApiSvpNpuUnloadDataset();
if (ret != CAL_OK)
{
Loger(LEVEL_INFOR, " CYolov5For928::Get()->pfucLibApiSvpNpuUnloadDataset() has error %d", ret);
return CAL_NPU_UNLOAD_DATASET_FAILED;
}
// 待优化
//////////////////////////////////////////////////////////////////////////////////////////////////////////
//ret = CYolov5For928::Get()->pfucLibApiSvpNpuUnLoadModel(0);
//ret = CYolov5For928::Get()->pfucLibApiSvpNpuAclPrepareExit();
//////////////////////////////////////////////////////////////////////////////////////////////////////////
Loger(LEVEL_INFOR, "LibApiSvpNpuModelExecute success");
}
return ret;
}
//通过一组ROI坐标获取ROI图像(弃用)
//id: parentId
int Cal2D::LibapiGetSubImFromRois(cv::Mat& im, const int& id, const std::string& info,
std::vector<cv::Rect2f> rois, std::vector<CSubRoi>& subMat, bool fake)
{
if (im.empty() || rois.size() == 0)
{
Loger(LEVEL_INFOR, "im.empty() || rois.size() == 0");
return CAL_2D_ROI_DETECTIION_IS_0;
//assert(0);
}
int ret = CAL_OK;
for (int i = 0; i < rois.size(); i++)
{
CSubRoi mat;
//mat
}
return ret;
}
//在单个预埋件图像中计算区域内预埋件边缘和角点
int Cal2D::LibapiGetEdegsAndConners(cv::Mat& src,
std::vector<std::vector<float>>& sort_lines4,
std::vector<std::vector<float>>& sort_conners4)
{
return GetSortConnersAndEdgess(
src,
sort_lines4,
sort_conners4,
80,
G(edge_min_line_len));
}
// 挑选出的内在区域(亚像素检测算法)
int Cal2D::LibapiSelectInnerRegion(CSubRoi& subMat)
{
int ret = CAL_OK;
if (subMat.mRoiPoints.size() != 4)
assert(0);
if (subMat.mRoiPoints.size() != 4)
assert(0);
return ret;
}
// 挑选出2D-3D点对用于计算单应矩阵H
int Cal2D::LibapiSelect2DTo3DPoints(CMeasureInfo& measureInfo, CSubRoi& subRoi,
std::vector<cv::Point2f>& vec2D, std::vector<cv::Point3f>& vec3D)
{
// 该函数弃用
assert(0);
if (measureInfo.GetIm()->empty() || subRoi.mInnerConners.size() != 4)
assert(0);
int ret = CAL_OK;
std::chrono::high_resolution_clock::time_point m_start = std::chrono::high_resolution_clock::now();
// 待修改在roi中遍历
float* hptr;
cv::Mat *im_2D3D = measureInfo.GetIm().get();
int ch = im_2D3D->channels();
for (int h = 0; h < im_2D3D->rows; ++h)
{
hptr = (float*)im_2D3D->ptr<float>(h);
for (int w = 0; w < im_2D3D->cols; ++w)
{
// 待优化
if (hptr[w * ch + 2] >= 3.5)
{
cv::Point2f p2 = { (float)w, (float)h };
if (cv::pointPolygonTest(subRoi.mInnerConners, p2, false))
{
cv::Point3f v3 = { hptr[w * ch], hptr[w * ch + 1], hptr[w * ch + 2] };
vec2D.push_back(p2);
vec3D.push_back(v3);
}
}
}
}
if (vec2D.size() < 2000)
assert(0);
std::cout << vec2D.size() << std::endl;
std::cout << vec3D.size() << std::endl;
std::chrono::high_resolution_clock::time_point m_end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsex = m_end - m_start;
std::cout << "Cal2D::LibapiSelect2DTo3DPoints time is : " << elapsex.count() << std::endl;
return ret;
}
// 计算单应矩阵
int Cal2D::LibapiCalH(const std::vector<cv::Point2f>& vec2D, const std::vector<cv::Point3f>& vec3D, cv::Mat& H)
{
if (vec2D.size() == 0 || vec3D.size() == 0)
{
Loger(LEVEL_INFOR, "vec2D.size() == 0 || vec3D.size() == 0");
return CAL_PRAMA_EMPUTY;
//assert(0);
}
int ret = CAL_OK;
std::chrono::high_resolution_clock::time_point m_start = std::chrono::high_resolution_clock::now();
float ransacReprojThreshold = 5.0;
std::vector<cv::Point3f> vp3;
for (int i = 0; i < vec2D.size(); ++i)
{
cv::Point3f p3 = { vec2D[i].x, vec2D[i].y, 1 };
vp3.push_back(p3);
}
H = cv::findHomography(vec3D, vp3, cv::RANSAC, ransacReprojThreshold);
std::cout << H << std::endl;
std::chrono::high_resolution_clock::time_point m_end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsex = m_end - m_start;
std::cout << "Cal2D::LibapiCalH time is : " << elapsex.count() << std::endl;
return ret;
}
// 计算预埋件角点和中心位置
int Cal2D::LibapiCalScale(const CSubRoi& subConner, int H)
{
int ret = CAL_OK;
return ret;
}
static CMeasureInfo* fake_create2D3D()
{
int ret = CAL_OK;
utility::SetVerbosityLevel(utility::VerbosityLevel::Debug);
auto cloud_ptr = std::make_shared<geometry::PointCloud>();
auto cloud_ptr_by_dist = std::make_shared<geometry::PointCloud>();
auto cloud_ptr_by_correct = std::make_shared<geometry::PointCloud>();
ret = Cal3D::LibapiCapCloudPoints(cloud_ptr);
if (ret != CAL_OK) assert(0);
ret = Cal3D::LibapiRotAndTrans(cloud_ptr);
ret = Cal3D::LibapiFilterCloudPointsByDist(cloud_ptr, cloud_ptr_by_dist,
G(dminx), G(dmaxx), G(dminy), G(dmaxy), G(dminz), G(dmaxz));
if (ret != CAL_OK) assert(0);
if (cloud_ptr_by_dist->IsEmpty()) assert(0);
ret = Cal3D::LibapiCorrectCloudPoint(cloud_ptr_by_dist, cloud_ptr_by_correct, 0);
std::vector<cv::Point3d> vec_point_3d;
ret = Cal3D::LibapiCloudPointToVec(cloud_ptr_by_correct, vec_point_3d);
cv::Mat cameraMatrix = (cv::Mat_<double>(3, 3) << G(fx), 0, G(cx), 0, G(fy), G(cy), 0, 0, 1);
std::vector<cv::Point2d> vec_point_2d;
//cv::Mat test_src_im = cv::imread("output.png");
cv::Mat test_src_im;
//cv::Mat test_src_im;
std::shared_ptr<cv::Mat> im_2D3D = std::make_shared<cv::Mat>(cv::Size(G(w), G(h)), CV_32FC3);
im_2D3D->setTo(cv::Scalar(0, 0, 0)); // 设置为全黑背景
ret = Cal3D::LibapiCloudPointsPorject(vec_point_3d, cameraMatrix, vec_point_2d, im_2D3D.get(), false, test_src_im);
// for test
ret = Cal3D::LibapiCloudPointProjectTest(*im_2D3D);
//cv::FileStorage file("d:\\im_2D3D.yml", cv::FileStorage::WRITE);
//std::string tag = "im_2D3D";
//file << tag << *im_2D3D;
//file.release();
// 创建CMeasureInfo数据结构
long id;
ret = CalUtils::LibapiGenRandomId(id);
std::string info = "abc";
//std::shared_ptr<CMeasureInfo> measureInfoPtr = std::make_shared<CMeasureInfo>(id, info, im_2D3D);
CMeasureInfo* measureInfoPtr = CMeasureInfo::Init(id, info);
measureInfoPtr->SetIm2D3D(im_2D3D);
return measureInfoPtr;
}
//int main()
//{
// G(fx);
// int ret = CAL_OK;
//
// std::shared_ptr<cv::Mat> hqImPtr = std::make_shared<cv::Mat>();
//
// *hqImPtr = cv::imread("./caowei/fe57022fa2c58888.jpg");
//
// std::vector<std::vector<float>> sort_lines4;
// std::vector<std::vector<float>> sort_conners4;
//
// cv::Mat hq_im_cp = hqImPtr->clone();
// GetSortConnersAndEdgess(
// hq_im_cp,
// sort_lines4,
// sort_conners4,
// 80,
// 100);
//
// float x1, y1, x2, y2;
// float x_p, y_p;
//
// for (int i = 0; i < 4; ++i)
// {
// std::vector<float> line = sort_lines4[i];
// x1 = line[0], y1 = line[1], x2 = line[2], y2 = line[3];
// std::vector<float> conners = sort_conners4[i];
// x_p = conners[0], y_p = conners[1];
//
// cv::circle(*hqImPtr, cv::Point(int(x_p), int(y_p)), 2, cv::Scalar(0, 0, 255), -1);
// cv::line(*hqImPtr, cv::Point(int(x1), int(y1)), cv::Point(int(x2), int(y2)), cv::Scalar(0,255, 0), 1);
// }
//
// cv::imshow("im_edge", *hqImPtr);
// cv::waitKey(0);
//
// // 拍摄照片
// CMeasureInfo* measureInfoPtr = CMeasureInfo::Init(1, "");
// ret = Cal2D::LibapiCapHQImage(measureInfoPtr, hqImPtr);
// if (ret != CAL_OK || hqImPtr->empty())
// assert(0);
//
//
// // AI识别划分ROI
// //std::vector<cv::Rect2f> rois;
// //ret = Cal2D::LibapiDetectObj(hq_im, rois, true);
// //if (ret != CAL_OK || rois.size()==0)
// // assert(0);
// measureInfoPtr = fake_create2D3D();
// std::shared_ptr<CSubRoi> subRoi = std::make_shared<CSubRoi>();
// measureInfoPtr->GetSubRois().push_back(subRoi);
//
// subRoi->mInnerConners.push_back({ 465,436 });
// subRoi->mInnerConners.push_back( { 615, 260 } );
// subRoi->mInnerConners.push_back({ 783, 405 } );
// subRoi->mInnerConners.push_back( { 630, 581 } );
// for (int i = 0; i < subRoi->mInnerConners.size(); ++i)
// subRoi->mInnerConners[i] = {
// float(subRoi->mInnerConners[i].x * 9344 / 934 / 1.5),
// float(subRoi->mInnerConners[i].y * 7000 / 700 / 1.5)};
// std::vector<cv::Point2f> vec2D;
// std::vector<cv::Point3f> vec3D;
//
// //ret = Cal2D::LibapiSelect2DTo3DPoints(*measureInfoPtr, *subRoi, vec2D, vec3D);
//
// cv::Mat H;
// //ret = Cal2D::LibapiCalH(vec2D, vec3D, H);
//
// // 开多线程分发ROI进行识别
//
//
// return 0;
//}