lidar_camera_cablition/flow_homo.py

223 lines
7.7 KiB
Python
Raw Permalink Normal View History

2024-12-16 12:30:52 +08:00
# -- coding: utf-8 --
import numpy as np
import cv2
import math
import open3d as o3d
class rect_roi:
def __init__(self, x:int, y:int, w:int, h:int):
self.x = x
self.y = y
self.w = w
self.h = h
class obj_info:
def __init__(self, name:str, id:str, points_2d_to_3d:cv2.Mat, bim=None):
self.name = name
self.id = id
self.rect_points_2d_to_3d = points_2d_to_3d
self.bim = bim
self.edges = []
self.orgin_pt = [0.0, 0.0]
self.centor_pt = [0.0, 0.0]
def _api_cal_homography(_3d_points, _2d_points):
ransacReprojThreshold = 5.0 # Homography: 该值越小,精度越高,对时间没有影像
# 查找单应矩阵
# 修改第三个参数 1.0 -> 5.0
# 利用基于RANSAC的鲁棒算法选择最优的四组配对点再计算转换矩阵H(3*3)并返回,以便于反向投影错误率达到最小
# ransacReprojThreshold 设置为 1.0,精度最高
# H, mask = cv2.findHomography(np.array(src_points), np.array(dst_points), cv2.RANSAC, ransacReprojThreshold, maxIters=1500, confidence=0.998) ## 修改 1.0 -> 5.0
pts_hom = []
for i in range(len(_2d_points)):
pt = _2d_points[i]
x,y = pt[0], pt[1]
pts_hom.append([x*1.0, y*1.0, 1.0])
H, _ = cv2.findHomography(np.array(_3d_points), np.array(pts_hom), cv2.RANSAC, ransacReprojThreshold) ## 修改 1.0 -> 5.0
print(H)
return H
# 暂时不需要
def _api_get_fake_roi_data():
return [[]]
def _api_get_obj_2d_roi(im, rois):
_roi_im_list = []
for i in range(len(rois)):
roi = rois[i]
_roi_im = im[roi.y:roi.y+roi.h, roi.x:roi.x+roi.w]
_roi_im_list.append(_roi_im)
return _roi_im_list
def __get_small_conners(conners, offset):
# pts = np.array([[conners[0][0] - offset, conners[0][1] - offset],
# [conners[1][0] - offset, conners[1][1] - offset],
# [conners[2][0] - offset, conners[2][1] - offset],
# [conners[3][0] - offset, conners[3][1] - offset]],
# np.int32)
pts = [[conners[0][0] + offset, conners[0][1]],
[conners[1][0] - offset, conners[1][1] + offset],
[conners[2][0] - offset, conners[2][1]],
[conners[3][0], conners[3][1] - offset]]
return pts
# cv2.polylines(img, [pts], isClosed=True, color=(0, 255, 0), thickness=2)
def _api_get_sub_rois_with_conners(conners):
_conners = []
_conners.append(__get_small_conners(conners, 200))
return _conners
def _api_get_obj_2d_to_3d_pts(im_2d_to_3d:cv2.Mat, sub_rois_in_conners):
in_rect_2d_to_3d_pts = []
in_rect_2d_pts = []
in_rect_3d_pts = []
for i in range(len(sub_rois_in_conners)):
_sub_rois = sub_rois_in_conners[i]
_sub_rois_mask = np.zeros_like(im_2d_to_3d)
# for test
# normalized_image = np.uint8(_sub_rois_mask)
# _sub_rois_mask = cv2.normalize(normalized_image, None, 0, 255, cv2.NORM_MINMAX)
_sub_rois_mask = cv2.fillPoly(_sub_rois_mask, [np.array(_sub_rois)], color=(255, 255, 255))
im_2d_to_3d_roi = cv2.bitwise_and(im_2d_to_3d, _sub_rois_mask)
# im_2d_to_3d_roi = cv2.resize(im_2d_to_3d_roi, (int(934*1.5), int(700*1.5)), interpolation=cv2.INTER_LINEAR)
# cv2.imshow("1", im_2d_to_3d_roi)
# cv2.waitKey(0)
normalized_image = np.uint8(im_2d_to_3d_roi)
normalized_image = cv2.cvtColor(normalized_image, cv2.COLOR_BGR2GRAY)
_2d_pts = cv2.findNonZero(normalized_image)
for i in range(len(_2d_pts)):
_2d_pt = _2d_pts[i][0].tolist()
if i % 20 == 0:
in_rect_2d_pts.append(_2d_pt)
in_rect_3d_pts.append([im_2d_to_3d[_2d_pt[1],_2d_pt[0]][0],
im_2d_to_3d[_2d_pt[1],_2d_pt[0]][1],
im_2d_to_3d[_2d_pt[1],_2d_pt[0]][2]])
return in_rect_2d_to_3d_pts, in_rect_2d_pts, in_rect_3d_pts
# _conners返回预埋件角点在原图中的坐标非在roi图中的坐标
def _api_get_edges_and_conners(roi_im, roi_rect, fake=True):
_edges = []
_conners = []
rst = "ok" # 如果识别改区域不是预埋件区域,则返回错误
if fake:
# 人工定好4个角点
_conners = [[465,436],[615,260],[783,405],[630,581]]
for i in range(4):
_conners[i] = [int(_conners[i][0]*9344/934/1.5), int(_conners[i][1]*7000/700/1.5)]
return rst, _edges, _conners
# 在未来使用roi_im, roi_rect进行优化
# def _api_cal_3d_edge_scale(roi_im, roi_rect, conners, H):
def _api_cal_3d_edge_scale(conners, H, _2pts, _3pts):
_edges = []
_conners_3d = []
if len(conners) != 4:
assert("_cal_scale: len(conners) is not 4")
# 计算点云的拟合平面
_3pts_cloud = o3d.geometry.PointCloud()
_3pts_cloud.points = o3d.utility.Vector3dVector(_3pts)
[A,B,C,D], inliers = _3pts_cloud.segment_plane(distance_threshold=0.01, ransac_n=3, num_iterations=1500)
# [a,b,c,d]= plane model
H_inv = np.linalg.inv(H)
for i in range(4):
conner = conners[i]
x, y = conner[0], conner[1]
pts_hom = np.array([x, y, 1])
# pts_hom = cv2.convertPointsToHomogeneous(pts)
pts_3d = np.dot(H_inv, pts_hom.T)
pts_3d = pts_3d/pts_3d[2]
# 求线与平面的交点
t = -D / (A * pts_3d[0] + B * pts_3d[1] + C * pts_3d[2])
pts_3d = np.array([t * pts_3d[0], t * pts_3d[1], t * pts_3d[2]])
_conners_3d.append(list(pts_3d))
for i in range(4):
_conner_3d = _conners_3d[i]
_conner_3d_next = _conners_3d[(i+1)%4]
_edge = math.sqrt(pow((_conner_3d[0] - _conner_3d_next[0]), 2)
+ pow((_conner_3d[1] - _conner_3d_next[1]), 2)
+ pow((_conner_3d[2] - _conner_3d_next[2]), 2))
_edges.append(_edge)
return _edges
# 主处理流程
# for test: roi_rects = [[0,0,0,0]]
def api_cal_info(im, im_2d_to_3d, roi_rects):
print("api_cal_info start!")
_obj_info_list = []
# for test人工定义roi区域
# roi_rects = _api_get_fake_roi_data()
# 图像剪切到一个个roi图像
_roi_im_list = _api_get_obj_2d_roi(im, roi_rects)
# 遍历所有预埋件的子区域图像,这里可以分线程跑,提升效率
for i in range(len(_roi_im_list)):
# 获取单个预埋件图像
_roi_im = _roi_im_list[i]
# 在单个预埋件图像中计算区域内预埋件边缘和角点
_rst, _, _2d_conners = _api_get_edges_and_conners(_roi_im, roi_rects[i])
print("api_cal_info _2d_conners ==> ")
print(_2d_conners)
# assert(_rst != "ok")
# 计算预埋件内部用于检测 H矩阵的2d 3d区域
# _sub_rois_in_conners全局坐标系
_sub_rois_in_conners = _api_get_sub_rois_with_conners(_2d_conners)
# 挑选合适的预埋件区域内2d和3d点映射挑选算法需要设计和优化
_, _2pts, _3pts = _api_get_obj_2d_to_3d_pts(im_2d_to_3d, _sub_rois_in_conners)
# 计算H矩阵
H = _api_cal_homography(_3pts, _2pts)
# 通过角点和边缘计算尺寸
# _2d_conners为全局坐标不是roi坐标
# _edgs_scale = _api_cal_3d_edge_scale(_roi_im, roi_rects[i], _2d_conners, H)
_edgs_scale = _api_cal_3d_edge_scale(_2d_conners, H, _2pts, _3pts)
print(_edgs_scale)
# _obj_info = obj_info()
# _obj_info_list.append(_obj_info)
print("api_cal_info end!")
return _obj_info_list
if __name__ == '__main__':
pass
# _edge = math.sqrt(pow(-0.19919018 - 0.271936, 2)
# + pow(-0.48743263 - -0.32997242, 2)
# + pow(3.513779 - 3.527755, 2))
# print(_edge)