calibration_tools_v1.0/cloud_3d_detect.py

1244 lines
52 KiB
Python
Raw Normal View History

2025-02-20 10:45:17 +08:00
# -- coding: utf-8 --
from ctypes import *
import cv2 # 4.9.0
print(cv2.__version__) # 4.9.0
import numpy as np
print(np.__version__) # 1.24.4
import open3d as o3d
print(o3d.__version__) # 0.18.0
import math
import os
import time
# from skimage import feature
import aurco_3d_detect as aurco_3d_detect
from os import abort
from utils import Log
from config import *
# 点云整体移动 kk
# 边缘点散射后需要重新投影到平面
# 顶点算法再优化
# 流程
# 3D点云整理修正kk参数(该参数与物体反射率相关通过MAP映射后期不断更新MAP内容)
# 3D点云空间过滤
# 3D点云提取标定板 seg_mark_cloud
# 生成 im_2d_to_3d (采用相机内参)
# 生成 im_project (采用相机内参)
# 通过 im_project 识别标定板2D区域 obj_roi
# 将 im_2d_to_3d 中的3D点云数据按照 obj_roi 进行归集形成单独的标定板的3D数据文件
# 优化标定板3D数据 correct_mark_cloud
# 3D 数据平滑、游离点\噪点去除
# 使用 RANSAC 平面拟合,计算平面模型 [A,B,C,D]、计算内点集合 inlier_cloud 、计算外点集合 outlier_cloud
# 最小二乘计算 inlier_cloud 拟合的平面
# 通过平面模型 [A,B,C,D] 将内外点重投至该平面,计算 inlier_pcd_correct
# 将 inlier_pcd_correct 和 inlier_cloud 合并至 inlier_pcd_correct
# 3D标定板角点提取 select_conners_from_mark_cloud_2d
# 通过 select_conners_from_mark_cloud_3d 对 inlier_pcd_correct 自动选取 3D 角点 3D角点自动提取算法
# 将 inlier_pcd_correct 投影至 2D
# 生成 im_2d_to_3d_n (n:1-4)(采用相机内参)
# 生成 im_project_n (n:1-4) (采用相机内参)
# 在2D图中人工标记角点
# 通过2D点、H、[A,B,C,D] 得到3D角点 c_lidar[16]
# 【check】通过3D角点 c_lidar[16] 计算得出的标定板边长 l 和实际值的误差绝对误差、4条边的均方差
# 2D标定板角点提取
# 定位 ARUCO 标记,计算外参
# 计算标定板4个角点的3D值 c_camera[16]
# 【check】通过3D角点 c_camera[16] 计算得出的标定板边长 l 和实际值的误差绝对误差、4条边的均方差
# SVD
# 使用SVD c_lidar[16] 和 c_camera[16] 计算相机和雷达的旋转矩阵 RT
# 循环采用不同数据通过SVD进行上述计算采用最大似然优化 RT
# 【check】c_lidar[16] 通过 RT 恢复到的位置和 c_camera[16] 比较绝对误差、4条边的均方差
# 【check】3D点云通过重投影到2D图像的角点和标定板图像角点的误差估计人工
# 推理验证
# 2D识别标定板
# 2D标定板角点提取
# 通过2D点、H、[A,B,C,D] 得到3D角点c_lidar_camera[16]
# 【check】c_lidar_camera[16]、c_lidar[16] 的误差绝对误差、4条边的均方差
# 【check】c_lidar_camera[16]、c_camera[16] 、c_lidar[16]计算得出的边长和实际值的误差绝对误差、4条边的均方差
# 自动化参数优化
# 实现自动推理验证流程
# 生成参数和结果对照记录
# 自动优化参数
# 预埋件实测
def _eulerAnglesToRotationMatrix(theta) :
R_x = np.array([[1, 0, 0 ],
[0, math.cos(theta[0]), -math.sin(theta[0]) ],
[0, math.sin(theta[0]), math.cos(theta[0]) ]
])
R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1]) ],
[0, 1, 0 ],
[-math.sin(theta[1]), 0, math.cos(theta[1]) ]
])
R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],
[math.sin(theta[2]), math.cos(theta[2]), 0],
[0, 0, 1]
])
R = np.dot(R_z, np.dot( R_y, R_x ))
return R
def cal_project_matrix():
# src_size是原始图像的大小
src_size = (w, h)
# 计算新的摄像机矩阵
new_camera_matrix = cv2.getOptimalNewCameraMatrix(camera_matrix, dist_coeffs, src_size, alpha=0)
Log.info("2D 投影矩阵计算结果为:")
print(new_camera_matrix)
# print(new_camera_matrix)
return new_camera_matrix
# 定义获取点云深度最大最小值
def get_pcd_depth_range(pts_3d):
# 初始化最小值和最大值为第一个点的x坐标
min_x = 0
max_x = 0
# 遍历所有点,更新最小值和最大值
for point_3d in pts_3d.points:
x = point_3d[0]
if x < min_x:
min_x = x
elif x > max_x:
max_x = x
return max_x, min_x
def undistort(distorted_img):
# 矫正畸变
undistorted_img = cv2.undistort(distorted_img, camera_matrix, dist_coeffs, None, camera_matrix)
# cv2.imwrite("distorted_img.png", undistorted_img)
# undistorted_img = cv2.imread("distorted_img.png")
# cv2.imshow("1", undistorted_img)
# cv2.waitKey(0)
return undistorted_img
def clear_folder(folder_path):
for file_name in os.listdir(folder_path):
file_path = os.path.join(folder_path, file_name)
if os.path.isfile(file_path):
os.remove(file_path)
else:
clear_folder(file_path)
os.rmdir(file_path)
def sort_marks_pcd_3d(masks):
new_mask = []
left_pt_0, left_pt_1, left_pt_2, left_pt_3 = 0,0,0,0
delete_index = []
coordinate_temp = [0,1,2,3]
max_val = 0
min_val = 999999
for i in range(4):
left_pt = masks[i][0]
val = left_pt[0] + left_pt[1]
if val > max_val:
left_pt_3 = i
max_val = val
if val < min_val:
left_pt_0 = i
min_val = val
delete_index.append(left_pt_0)
delete_index.append(left_pt_3)
coordinate_temp = np.delete(coordinate_temp, delete_index, axis=0)
if masks[coordinate_temp[0]][0][0] > masks[coordinate_temp[1]][0][0]:
left_pt_1 = coordinate_temp[0]
left_pt_2 = coordinate_temp[1]
else:
left_pt_2 = coordinate_temp[0]
left_pt_1 = coordinate_temp[1]
new_mask.append(masks[left_pt_0])
new_mask.append(masks[left_pt_1])
new_mask.append(masks[left_pt_2])
new_mask.append(masks[left_pt_3])
return new_mask
def detect_marks_roi(im_project, show=False):
masks = []
offset = 40
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (30, 30))
im_tmp = cv2.morphologyEx(im_project, cv2.MORPH_DILATE, kernel)
im_tmp = cv2.GaussianBlur(im_tmp, (29, 29), 0)
# im_tmp = cv2.resize(im_tmp, (900, 700))
# cv2.imshow(".",im_tmp)
# cv2.waitKey(0)
# 查找联通区域
gray = cv2.cvtColor(im_tmp, cv2.COLOR_BGR2GRAY)
_, binary = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY)
# num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(binary, connectivity=8, ltype=cv2.CV_32S)
contours, _ = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
colors = [ (255, 0, 0) , (255, 255, 0) , (0, 0, 255) , (255, 0, 255) ]
color_idx = 0
Log.info("3D->2D 投影连通区域的像素点数量:")
for contour in contours:
rect_conner_points = []
size = cv2.contourArea(contour)
top_idx = 0
right_idx = 0
bottom_idx = 0
left_idx = 0
x, y, w, h = cv2.boundingRect(contour)
for idx in range(len(contour)):
if y == contour[idx][0][1]: top_idx = idx
if x == contour[idx][0][0]: left_idx = idx
if y + h - 1 == contour[idx][0][1]: bottom_idx = idx
if x + w - 1 == contour[idx][0][0]: right_idx = idx
contour[left_idx][0][0] -= offset
contour[top_idx][0][1] -= offset
contour[right_idx][0][0] += offset
contour[bottom_idx][0][1] += offset
if contour[left_idx][0][0] < 0 or contour[top_idx][0][1] < 0 or \
contour[right_idx][0][0] > global_w or contour[bottom_idx][0][1] > global_h:
continue
if size > rmin:
print(size)
if size > rmin and size < rmax:
contour[left_idx][0][0] -= offset
contour[top_idx][0][1] -= offset
contour[right_idx][0][0] += offset
contour[bottom_idx][0][1] += offset
cv2.circle(im_project, (contour[top_idx][0][0], contour[top_idx][0][1]), 25, colors[color_idx] , -1)
cv2.circle(im_project, (contour[right_idx][0][0], contour[right_idx][0][1]), 25, colors[color_idx], -1)
cv2.circle(im_project, (contour[bottom_idx][0][0], contour[bottom_idx][0][1]), 25, colors[color_idx], -1)
cv2.circle(im_project, (contour[left_idx][0][0], contour[left_idx][0][1]), 25, colors[color_idx], -1)
rect_conner_points.append(contour[left_idx][0])
rect_conner_points.append(contour[top_idx][0])
rect_conner_points.append(contour[right_idx][0])
rect_conner_points.append(contour[bottom_idx][0])
masks.append(rect_conner_points)
color_idx += 1
if len(masks) != MARK_COUNT:
Log.error("3D 重投影未找到4个mark")
show = True
if show:
im_project = cv2.resize(im_project, (900, 700))
cv2.imshow("im", im_project)
cv2.waitKey(0)
return []
# 按照 左上1右上2左下3右下4的顺序重新排序
masks = sort_marks_pcd_3d(masks)
# for test
if show:
im_project = cv2.resize(im_project, (900, 700))
cv2.imshow("im", im_project)
cv2.waitKey(0)
return masks
def project_cloud(pcd_cloud):
im_project = np.zeros((global_h, global_w, 3), dtype=np.uint8)
im_2d_to_3d = np.zeros((global_h ,global_w, 3), dtype=np.float64)
rvec = np.array([np.array([0.]), np.array([0.]), np.array([0.])])
tvec = np.float64([0.0, 0.0, 0.0])
pts_2d, _ = cv2.projectPoints(np.array(pcd_cloud.points), rvec, tvec, camera_matrix, dist_coeffs)
color = (255,255,255)
list_pts_2d = pts_2d.tolist()
list_pts_2d = [[[int(item) for item in subsublist] for subsublist in sublist] for sublist in list_pts_2d]
points_np = np.asarray(pcd_cloud.points)
for i, pt_2d in enumerate(list_pts_2d):
# x, y = pt_2d.ravel()
# if x == float('nan') or y == float('nan'): continue
# x, y = int(x), int(y)
# x, y = pt_2d[0][0], pt_2d[0][1]
if 0 <= pt_2d[0][0] < global_w and 0 <= pt_2d[0][1] < global_h:
im_project[pt_2d[0][1], pt_2d[0][0]] = color
# im_2d_to_3d[y, x][0] = pcd_cloud.points[i][0]
# im_2d_to_3d[y, x][1] = pcd_cloud.points[i][1]
# im_2d_to_3d[y, x][2] = pcd_cloud.points[i][2]
im_2d_to_3d[pt_2d[0][1], pt_2d[0][0]] = points_np[i]
return im_project, im_2d_to_3d, pts_2d, list_pts_2d
def seg_mark_cloud(ply_file_path, kk, show=False):
marks_pcd = []
src_cloud_load = o3d.io.read_point_cloud(ply_file_path)
if src_cloud_load is None or len(src_cloud_load.points) == 0 :
abort()
start_time = time.time()
# 点云旋转
Rx = src_cloud_load.get_rotation_matrix_from_xyz((roll, 0, 0))
Ry = src_cloud_load.get_rotation_matrix_from_xyz((0, pitch, 0))
Rz = src_cloud_load.get_rotation_matrix_from_xyz((0, 0, yaw))
# t_matrix = np.identity(4)
# t_matrix[:3, 3] = T_vector
src_cloud_load.rotate(Rx, (1,0,0))
src_cloud_load.rotate(Ry, (0,1,0))
src_cloud_load.rotate(Rz, (0,0,1))
src_cloud_load.translate(T_vector)
# 点云筛选
project_inlier_cloud = o3d.geometry.PointCloud()
for point_3d in np.asarray(src_cloud_load.points):
if -2.0 < point_3d[0] < 2.0 and -2.0 < point_3d[1] < 2.0 and point_3d[2] < 3.5: # 筛选x,y,z坐标
_r = math.sqrt(pow(point_3d[0], 2) + pow(point_3d[1], 2) + pow(point_3d[2], 2))
if _r == 0: continue
if point_3d[0] > 0: point_3d[0] = point_3d[0] + point_3d[0] / _r * kk
else: point_3d[0] = point_3d[0] + point_3d[0] / _r * kk
if point_3d[1] > 0: point_3d[1] = point_3d[1] + point_3d[1] / _r * kk
else: point_3d[1] = point_3d[1] + point_3d[1] / _r * kk
point_3d[2] = point_3d[2] + point_3d[2] / _r * kk
# point_3d[2] -= 0.0
project_inlier_cloud.points.append((point_3d[0], point_3d[1], point_3d[2]))
# 投影3D点云
im_project, im_2d_to_3d, pts_2d, list_pts_2d = project_cloud(project_inlier_cloud)
# 自动识别ROI角点
mark_rois = detect_marks_roi(im_project, show)
if len(mark_rois) != MARK_COUNT:
return marks_pcd
# 保存4个点云的数据到文件
# clear_folder("./detect_cloud")
clear_folder(conf_temp_cloud_path)
# list_pts_2d = pts_2d.tolist()
# list_pts_2d = [[[int(item) for item in subsublist] for subsublist in sublist] for sublist in list_pts_2d]
for i in range(len(mark_rois)):
if len(mark_rois[i]) < 4: continue
one_mark_pcd = o3d.geometry.PointCloud()
mark_rois_tmp = np.asarray(mark_rois[i])
for _, pt_2d in enumerate(list_pts_2d):
# x, y = pt_2d.ravel()
# x, y = int(x), int(y)
# x, y = pt_2d[0][0], pt_2d[0][1]
if cv2.pointPolygonTest(mark_rois_tmp, (pt_2d[0][0],pt_2d[0][1]), False) >= 0:
one_mark_pcd.points.append((im_2d_to_3d[pt_2d[0][1], pt_2d[0][0]][0],
im_2d_to_3d[pt_2d[0][1], pt_2d[0][0]][1],
im_2d_to_3d[pt_2d[0][1], pt_2d[0][0]][2]))
# o3d.io.write_point_cloud("./detect_cloud/" + str(i) + ".ply", tmp_cloud)
o3d.io.write_point_cloud(conf_temp_cloud_path + str(i) + ".ply", one_mark_pcd)
if show:
o3d.visualization.draw_geometries([one_mark_pcd], mesh_show_wireframe=True)
marks_pcd.append(one_mark_pcd)
if show:
im_project = cv2.resize(im_project, (int(934), int(700)), interpolation=cv2.INTER_LINEAR)
cv2.imshow("im_project", im_project)
cv2.waitKey(0)
end_time = time.time()
execution_time = end_time - start_time
Log.info("3D 分割点云数据耗时:")
print(execution_time * 1000)
return marks_pcd
def filter_cloud(pcd):
# 获取点云中的点坐标
points = np.asarray(pcd.points)
# 定义 x, y, z 的范围
x_min, x_max = -1.8, 1.8
y_min, y_max = -1.5, 1.5
z_min, z_max = 0.0, global_z_max
# 创建布尔掩膜
mask = (
(points[:, 0] >= x_min) & (points[:, 0] <= x_max) &
(points[:, 1] >= y_min) & (points[:, 1] <= y_max) &
(points[:, 2] >= z_min) & (points[:, 2] <= z_max)
)
# 应用掩膜以过滤点云
filtered_points = points[mask]
# 更新点云对象
pcd.points = o3d.utility.Vector3dVector(filtered_points)
return pcd
def seg_mark_cloud_simple(ply_file_path, kk, show=False):
marks_pcd = []
src_cloud_load = o3d.io.read_point_cloud(ply_file_path)
if src_cloud_load is None or len(src_cloud_load.points) == 0 :
abort()
start_time = time.time()
# 点云旋转
Rx = src_cloud_load.get_rotation_matrix_from_xyz((roll, 0, 0))
Ry = src_cloud_load.get_rotation_matrix_from_xyz((0, pitch, 0))
Rz = src_cloud_load.get_rotation_matrix_from_xyz((0, 0, yaw))
# t_matrix = np.identity(4)
# t_matrix[:3, 3] = T_vector
src_cloud_load.rotate(Rx, (1,0,0))
src_cloud_load.rotate(Ry, (0,1,0))
src_cloud_load.rotate(Rz, (0,0,1))
src_cloud_load.translate(T_vector)
# 点云筛选
project_inlier_cloud = o3d.geometry.PointCloud()
if kk != 0.0:
for point_3d in np.asarray(src_cloud_load.points):
if -2.0 < point_3d[0] < 2.0 and -2.0 < point_3d[1] < 2.0 and point_3d[2] < global_z_max: # 筛选x,y,z坐标
_r = math.sqrt(pow(point_3d[0], 2) + pow(point_3d[1], 2) + pow(point_3d[2], 2))
if _r == 0: continue
if point_3d[0] > 0: point_3d[0] = point_3d[0] + point_3d[0] / _r * kk
else: point_3d[0] = point_3d[0] + point_3d[0] / _r * kk
if point_3d[1] > 0: point_3d[1] = point_3d[1] + point_3d[1] / _r * kk
else: point_3d[1] = point_3d[1] + point_3d[1] / _r * kk
point_3d[2] = point_3d[2] + point_3d[2] / _r * kk
# point_3d[2] -= 0.0
project_inlier_cloud.points.append((point_3d[0], point_3d[1], point_3d[2]))
else:
project_inlier_cloud = filter_cloud(src_cloud_load)
# 投影3D点云
im_project, im_2d_to_3d, pts_2d, list_pts_2d = project_cloud(project_inlier_cloud)
# 自动识别ROI角点
mark_rois = detect_marks_roi(im_project, show)
if len(mark_rois) != MARK_COUNT:
return marks_pcd
# 保存4个点云的数据到文件
# clear_folder("./detect_cloud")
# clear_folder(conf_temp_cloud_path)
# list_pts_2d = pts_2d.tolist()
# list_pts_2d = [[[int(item) for item in subsublist] for subsublist in sublist] for sublist in list_pts_2d]
Log.info("3D->2D project points")
for i in range(len(mark_rois)):
if len(mark_rois[i]) < 4: continue
one_mark_pcd = o3d.geometry.PointCloud()
mark_rois_tmp = np.asarray(mark_rois[i])
select_itmes = 0
for _, pt_2d in enumerate(list_pts_2d):
# x, y = pt_2d.ravel()
# x, y = int(x), int(y)
# x, y = pt_2d[0][0], pt_2d[0][1]
if cv2.pointPolygonTest(mark_rois_tmp, (pt_2d[0][0],pt_2d[0][1]), False) >= 0:
# one_mark_pcd.points.append((im_2d_to_3d[y, x][0], im_2d_to_3d[y, x][1], im_2d_to_3d[y, x][2]))
one_mark_pcd.points.append((im_2d_to_3d[pt_2d[0][1], pt_2d[0][0]][0],
im_2d_to_3d[pt_2d[0][1], pt_2d[0][0]][1],
im_2d_to_3d[pt_2d[0][1], pt_2d[0][0]][2]))
select_itmes += 1
print(select_itmes)
# o3d.io.write_point_cloud("./detect_cloud/" + str(i) + ".ply", tmp_cloud)
# 不写入文件,节省时间
# o3d.io.write_point_cloud(conf_temp_cloud_path + str(i) + ".ply", one_mark_pcd)
# if show:
# o3d.visualization.draw_geometries([one_mark_pcd], mesh_show_wireframe=True)
marks_pcd.append(one_mark_pcd)
if show:
im_project = cv2.resize(im_project, (int(934), int(700)), interpolation=cv2.INTER_LINEAR)
cv2.imshow("im_project", im_project)
cv2.waitKey(0)
end_time = time.time()
execution_time = end_time - start_time
Log.info("3D 分割点云数据耗时:")
print(execution_time * 1000)
return marks_pcd
# distance_threshold (float):
# 平面内点的最大距离阈值。这是指一个点到估计平面的距离,如果距离小于或等于这个阈值,则认为该点属于此平面。
# ransac_n (int):
# 每次迭代中用于估计模型的点数。对于平面分割,通常设置为 3因为三个非共线点可以唯一确定一个平面。
# num_iterations (int):
# RANSAC 算法的迭代次数。更多的迭代次数会增加找到最优解的概率,但也增加了计算时间。
def correct_mark_cloud(mark_pcd):
plane_model, inliers = mark_pcd.segment_plane(distance_threshold=0.0155,
ransac_n=3,
num_iterations=10000)
inlier_pcd = mark_pcd.select_by_index(inliers)
inlier_pcd = reproject_cloud(plane_model, inlier_pcd) #利用投影得到的图像
inlier_pcd = inlier_pcd.paint_uniform_color([1.0, 0, 0])
outlier_cloud = mark_pcd.select_by_index(inliers, invert=True)
# 暂时没有用到
# radius = 0.05
# inlier_pcd = smooth_point_cloud(inlier_pcd, radius) #利用拟合得到的图像
inlier_pcd_correct = reproject_cloud(plane_model, mark_pcd) #利用投影得到的图像
inlier_pcd_correct = inlier_pcd_correct.paint_uniform_color([0.0, 1.0, 0])
# o3d.visualization.draw_geometries([ inlier_pcd, inlier_pcd_correct ])
# inlier_pcd_correct = inlier_pcd # ???
return plane_model, inlier_pcd, inlier_pcd_correct
# 3D标定板角点提取 select_conners_from_mark_cloud_2d
# 通过 select_conners_from_mark_cloud_3d 对 inlier_pcd_correct 自动选取 3D 角点 3D角点自动提取算法
# 将 inlier_pcd_correct 投影至 2D
# 生成 im_2d_to_3d_n (n:1-4)(采用相机内参)
# 生成 im_project_n (n:1-4) (采用相机内参)
# 在2D图中人工标记角点
# 通过2D点、H、[A,B,C,D] 得到3D角点 c_lidar[16]
# 【check】通过3D角点 c_lidar[16] 计算得出的标定板边长 l 和实际值的误差绝对误差、4条边的均方差
def cal_H(im_2d_to_3d, pts_2d, list_pts_2d):
pts_3d = []
# list_pts_2d = pts_2d.tolist()
# list_pts_2d = [[[int(item) for item in subsublist] for subsublist in sublist] for sublist in list_pts_2d]
for i, pt_2d in enumerate(list_pts_2d):
# x, y = pt_2d.ravel()
# x, y = int(x), int(y)
# x, y = pt_2d[0][0], pt_2d[0][1]
pts_3d.append([im_2d_to_3d[pt_2d[0][1], pt_2d[0][0]][0],
im_2d_to_3d[pt_2d[0][1], pt_2d[0][0]][1],
im_2d_to_3d[pt_2d[0][1], pt_2d[0][0]][2]])
ransacReprojThreshold = 3.0
# 查找单应矩阵
# 修改第三个参数 1.0 -> 5.0
# 利用基于RANSAC的鲁棒算法选择最优的四组配对点再计算转换矩阵H(3*3)并返回,以便于反向投影错误率达到最小
# ransacReprojThreshold 设置为 1.0
# H, mask = cv2.findHomography(np.array(src_points), np.array(dst_points), cv2.RANSAC, ransacReprojThreshold, maxIters=1500, confidence=0.998) ## 修改 1.0 -> 5.0
pts_hom = []
for i in range(len(pts_2d)):
pt = pts_2d[i]
x,y = pt[0][0], pt[0][1]
pts_hom.append([x*1.0, y*1.0, 1.0])
H, _ = cv2.findHomography(np.array(pts_3d), np.array(pts_hom), cv2.RANSAC, ransacReprojThreshold) ## 修改 1.0 -> 5.0
Log.info("2D-3D 单应性矩阵计算结果为:")
print(H)
return H
def sort_conners_2d(conners):
sort_conner = []
left, top, right, bottom = 0, 0, 0, 0
# maxx,maxy = 0,0
maxx,maxy = -100000,-100000
minx,miny = 100000,100000
for i in range(len(conners)):
conner = conners[i]
if conner[0] < minx:
minx = conner[0]
left = i
if conner[0] > maxx:
maxx = conner[0]
right = i
if conner[1] < miny:
miny = conner[1]
top = i
if conner[1] > maxy:
maxy = conner[1]
bottom = i
sort_conner.append(conners[left])
sort_conner.append(conners[top])
sort_conner.append(conners[right])
sort_conner.append(conners[bottom])
return sort_conner
def remove_convex_hull_points(contour):
hull = cv2.convexHull(contour, returnPoints=True)
hull_indices = set(hull.flatten())
non_hull_points = [point for i, point in enumerate(contour) if i not in hull_indices]
return np.array(non_hull_points)
def smooth_convex_hull(contour, epsilon=0.1):
# 计算凸包
hull = cv2.convexHull(contour)
# 使用 approxPolyDP 减少顶点数量,从而达到平滑效果
epsilon_value = epsilon * cv2.arcLength(hull, True)
smoothed_hull = cv2.approxPolyDP(hull, epsilon_value, True)
return np.array(smoothed_hull)
from scipy.interpolate import splprep, splev
def spline_fit(points, s=0, k=2):
# 确保有足够的数据点
if len(points) <= k:
return None
tck, u = splprep(points.T, s=s, k=k)
u_new = np.linspace(u.min(), u.max(), 1000)
x_new, y_new = splev(u_new, tck)
return np.vstack((x_new, y_new)).T
def smooth_convex_hull_with_spline(contour, s=0):
hull = cv2.convexHull(contour)
points = hull.reshape(-1, 2)
smoothed_points = spline_fit(points, s=s)
if smoothed_points is None:
return None
return smoothed_points.astype(np.int32)
def find_largest_contour(contours):
if not contours:
return None
# 计算所有轮廓的面积
areas = [cv2.contourArea(cnt) for cnt in contours]
# 找到面积最大的轮廓索引
max_area_index = np.argmax(areas)
# 返回面积最大的轮廓
return contours[max_area_index]
from shapely.geometry import Polygon,MultiPolygon # 需要安装 shapely 库来处理多边形相交
def rect_to_polygon(rect):
box = cv2.boxPoints(rect)
return Polygon(box)
def polygon_intersection(poly1, poly2):
inter_poly = poly1.intersection(poly2)
if not inter_poly.is_empty:
return inter_poly
else:
return None
def fill_intersection_white(image, rect1, rect2):
"""
在给定图像中用白色填充两个旋转矩形的相交区域
参数:
image (numpy.ndarray): 输入图像
rect1 (tuple): 第一个旋转矩形格式为 (center, size, angle)
rect2 (tuple): 第二个旋转矩形格式为 (center, size, angle)
"""
# 将旋转矩形转换为多边形
poly1 = rect_to_polygon(rect1)
poly2 = rect_to_polygon(rect2)
# 计算相交区域
intersection = polygon_intersection(poly1, poly2)
if intersection is not None:
# 创建掩膜并填充白色
mask = np.zeros_like(image)
# 如果相交区域是多边形
if isinstance(intersection, Polygon):
inter_points = np.array(intersection.exterior.coords, dtype=np.int32)
cv2.fillPoly(mask, [inter_points], (255, 255, 255))
elif isinstance(intersection, MultiPolygon):
for geom in intersection.geoms:
inter_points = np.array(geom.exterior.coords, dtype=np.int32)
cv2.fillPoly(mask, [inter_points], (255, 255, 255))
# 应用掩膜到原图
image[mask == 255] = 255
def select_conners_from_mark_cloud_2d_old(mark_pcd, plane_model):
# 将 inlier_pcd_correct 投影至 2D
im_project, im_2d_to_3d, pts_2d, list_pts_2d = project_cloud(mark_pcd)
im_project_gray = cv2.cvtColor(im_project, cv2.COLOR_BGR2GRAY)
non_zero = cv2.findNonZero(im_project_gray)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (20, 20))
im_project = cv2.morphologyEx(im_project, cv2.MORPH_DILATE, kernel)
for point in non_zero:
im_project[point[0][1], point[0][0]] = (255,255,255)
rect = cv2.minAreaRect(non_zero)
box = cv2.boxPoints(rect)
box = np.int0(box)
# for test
# cv2.drawContours(im_project, [box], 0, (255), 10)
# im_project = cv2.resize(im_project, (1600, 1200))
# cv2.imshow("_im", im_project)
# cv2.waitKey(0)
# 计算 mark cloud 点云的单应矩阵
H = cal_H(im_2d_to_3d, pts_2d, list_pts_2d)
# 获取高精确的角点 2D 位置
conners = sort_conners_2d(list(box))
# 通过单应矩阵计算角点 3D 位置
H_inv = np.linalg.inv(H)
conners_pcd = o3d.geometry.PointCloud()
for i in range(4):
conner = conners[i]
x, y = conner[0], conner[1]
pts_hom = np.array([x, y, 1])
pts_3d = np.dot(H_inv, pts_hom.T)
pts_3d = pts_3d/pts_3d[2]
conners_pcd.points.append((pts_3d[0],pts_3d[1],pts_3d[2]))
conners_pcd = reproject_cloud(plane_model, conners_pcd)
Log.info("2D标记角点得到得边长为")
for i in range(4):
print(np.linalg.norm(conners_pcd.points[i] - conners_pcd.points[(i+1)%4]))
return conners_pcd
def select_conners_from_mark_cloud_2d(mark_pcd, plane_model):
# 将 inlier_pcd_correct 投影至 2D
im_project, im_2d_to_3d, pts_2d, list_pts_2d = project_cloud(mark_pcd)
im_project_gray = cv2.cvtColor(im_project, cv2.COLOR_BGR2GRAY)
non_zero = cv2.findNonZero(im_project_gray)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (20, 20))
im_project = cv2.morphologyEx(im_project, cv2.MORPH_DILATE, kernel)
for point in non_zero:
im_project[point[0][1], point[0][0]] = (255,255,255)
rect = cv2.minAreaRect(non_zero)
box = cv2.boxPoints(rect)
box = np.int0(box)
#================================================
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (40, 40))
im_project_gray = cv2.morphologyEx(im_project_gray, cv2.MORPH_DILATE, kernel)
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (40, 40))
im_project_gray = cv2.morphologyEx(im_project_gray, cv2.MORPH_CLOSE, kernel)
# im_project_gray = cv2.resize(im_project_gray, (1600, 1200))
# cv2.imshow("im_project_gray", im_project_gray)
# cv2.waitKey(0)
_, binary_img = cv2.threshold(im_project_gray, 0, 255, cv2.THRESH_BINARY)
contours, _ = cv2.findContours(binary_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# 对每个轮廓进行处理
# processed_contours = [remove_convex_hull_points(cnt) for cnt in contours]
processed_contours = [smooth_convex_hull(cnt) for cnt in contours]
contour = find_largest_contour(processed_contours)
rect2 = cv2.minAreaRect(contour)
box2 = cv2.boxPoints(rect2)
box2 = np.int0(box2)
# box = box2
temp_img = np.zeros((im_project_gray.shape[0], im_project_gray.shape[1], 1), dtype=np.uint8)
fill_intersection_white(temp_img, rect, rect2)
non_zero = cv2.findNonZero(temp_img)
rect3 = cv2.minAreaRect(non_zero)
box3 = cv2.boxPoints(rect3)
box3 = np.int0(box3)
box = box2
# processed_contours = []
# for cnt in contours:
# smoothed_hull = smooth_convex_hull_with_spline(cnt, s=0.0)
# if smoothed_hull is None: continue
# # cv2.polylines(drawing, [smoothed_hull], isClosed=True, color=(0, 255, 0), thickness=2)
# processed_contours.append(smoothed_hull)
# drawing = np.zeros_like(im_project)
# cv2.drawContours(drawing, processed_contours, -1, color=255, thickness=1)
# cv2.imshow("Processed Contours", drawing)
# cv2.waitKey(0)
#
# for test
# cv2.drawContours(im_project, [box2], 0, (255), 15)
# # cv2.drawContours(im_project, processed_contours, -1, color=(0,0,255), thickness=10)
# cv2.drawContours(im_project, [box3], 0, (0,255,0), 20)
# for point in non_zero:
# im_project[point[0][1], point[0][0]] = (255,255,255)
# im_project = cv2.resize(im_project, (1400, 1000))
# cv2.imshow("_im", im_project)
# cv2.waitKey(0)
#================================================
# 计算 mark cloud 点云的单应矩阵
H = cal_H(im_2d_to_3d, pts_2d, list_pts_2d)
# 获取高精确的角点 2D 位置
conners = sort_conners_2d(list(box))
Log.error("3d-2d 的边长")
print(np.linalg.norm(conners[0] - conners[1]))
print(np.linalg.norm(conners[1] - conners[2]))
print(np.linalg.norm(conners[2] - conners[3]))
print(np.linalg.norm(conners[3] - conners[0]))
# 通过单应矩阵计算角点 3D 位置
H_inv = np.linalg.inv(H)
conners_pcd = o3d.geometry.PointCloud()
for i in range(4):
conner = conners[i]
x, y = conner[0], conner[1]
pts_hom = np.array([x, y, 1])
pts_3d = np.dot(H_inv, pts_hom.T)
pts_3d = pts_3d/pts_3d[2]
conners_pcd.points.append((pts_3d[0],pts_3d[1],pts_3d[2]))
conners_pcd = reproject_cloud(plane_model, conners_pcd)
Log.info("2D标记角点得到得边长为")
for i in range(4):
print(np.linalg.norm(conners_pcd.points[i] - conners_pcd.points[(i+1)%4]))
return conners_pcd
def extend_line_3d(pt1, pt2, distance):
"""
计算 3D 空间两个点的延长线上的一个点
参数:
- point1: 第一个点的坐标 (x1, y1, z1)类型为列表或数组
- point2: 第二个点的坐标 (x2, y2, z2)类型为列表或数组
- distance: 从第二个点延长的距离 (正值表示延长方向负值表示反向)
返回:
- 延长线上的点坐标 (x, y, z)类型为数组
"""
# 转换为 NumPy 数组
point1 = np.array(pt1)
point2 = np.array(pt2)
# 计算方向向量
direction = point2 - point1
# 计算方向向量的单位向量
direction_unit = direction / np.linalg.norm(direction)
# 延长线上的点
extended_point = point2 + distance * direction_unit
return extended_point
def correct_mask_conner(inlier_pcd_correct, pcd_conners_2d):
oriented_bounding_box = inlier_pcd_correct.get_axis_aligned_bounding_box()
center_point = oriented_bounding_box.get_center()
# 0.3536
real_len = 0.3535534
pcd_correct_conners = o3d.geometry.PointCloud()
for i in range(4):
error_len = np.linalg.norm(pcd_conners_2d.points[i] - center_point) - real_len
real_conner = extend_line_3d(center_point, pcd_conners_2d.points[i], -error_len)
pcd_correct_conners.points.append(real_conner)
# print("中心点距离:" + str(np.linalg.norm(pcd_conners_2d.points[i] - center_point) ))
# print(real_conner)
return pcd_correct_conners, center_point
def fit_plane_least_squares(points):
# 计算中心点
centroid = np.mean(points, axis=0)
# 计算协方差矩阵
cov_matrix = (points - centroid).T.dot(points - centroid)
# 计算协方差矩阵的特征值和特征向量
eigenvalues, eigenvectors = np.linalg.eigh(cov_matrix)
# 最小特征值对应的特征向量就是平面的法向量
normal = eigenvectors[:, np.argmin(eigenvalues)]
return centroid, normal
def smooth_point_cloud(pcd, radius):
# Create a new PointCloud object
smoothed_pcd = o3d.geometry.PointCloud()
smoothed_points = []
# Build KDTree for the input point cloud
kdtree = o3d.geometry.KDTreeFlann(pcd)
# For each point, perform least squares plane fitting
for point in np.asarray(pcd.points):
# Find points within the radius
[k, idx, _] = kdtree.search_radius_vector_3d(point, radius)
if k < 3:
# If there are not enough points, skip
continue
# Extract neighboring points
neighbors = np.asarray(pcd.points)[idx, :]
# Use least squares to fit a plane
centroid, normal = fit_plane_least_squares(neighbors)
# Project the point onto the fitted plane
point_to_centroid = point - centroid
distance = point_to_centroid.dot(normal)
smoothed_point = point - distance * normal
smoothed_points.append(smoothed_point)
# Set the smoothed points
smoothed_pcd.points = o3d.utility.Vector3dVector(smoothed_points)
return smoothed_pcd
def reproject_cloud(plane_model, cloud_load):
reproject_pcd = o3d.geometry.PointCloud()
A,B,C,D = plane_model
for pts_3d in cloud_load.points:
# 求线与平面的交点
t = -D / (A * pts_3d[0] + B * pts_3d[1] + C * pts_3d[2])
# pts_3d = np.array([t * pts_3d[0], t * pts_3d[1], t * pts_3d[2]])
reproject_pcd.points.append((t * pts_3d[0], t * pts_3d[1], t * pts_3d[2]))
return reproject_pcd
def o2t(o_pcd):
t_pcd = o3d.t.geometry.PointCloud()
t_pcd.point.positions = o3d.core.Tensor(np.asarray(o_pcd.points))
t_pcd.point.colors = o3d.core.Tensor(np.asarray(o_pcd.colors))
o_pcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))
t_pcd.point.normals = o3d.core.Tensor(np.asarray(o_pcd.normals))
return t_pcd
def t2o(t_pcd):
o_pcd = o3d.geometry.PointCloud()
o_pcd.points = o3d.utility.Vector3dVector(t_pcd.point.positions.numpy())
# o_pcd.colors = o3d.utility.Vector3dVector(t_pcd.point.colors.numpy())
return o_pcd
# def calculate_curvature(pcd):
# # 估计法线
# pcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))
# # 计算曲率
# curvatures = []
# for i in range(len(pcd.points)):
# normal = np.array(pcd.normals[i])
# k_neighbors = pcd.select_by_index(pcd.nearest_k_neighbor(i, 10)[1]) # 获取最近的10个邻居
# neighbor_normals = np.array(k_neighbors.normals)
# curvature = np.mean(np.abs(neighbor_normals - normal)) # 简单地取平均绝对差作为曲率估计
# curvatures.append(curvature)
# return np.array(curvatures)
def calculate_curvature(pcd):
# 估计法线
pcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))
# 构建KDTree
pcd_tree = o3d.geometry.KDTreeFlann(pcd)
# 计算曲率
curvatures = []
for i in range(len(pcd.points)):
[k, idx, _] = pcd_tree.search_knn_vector_3d(pcd.points[i], 10) # 获取最近的10个邻居
neighbor_normals = np.array([pcd.normals[j] for j in idx])
normal = np.array(pcd.normals[i])
curvature = np.mean(np.abs(neighbor_normals - normal)) # 简单地取平均绝对差作为曲率估计
curvatures.append(curvature)
return np.array(curvatures)
def select_conners_from_mark_cloud_3d(inlier_cloud):
# 2. 计算凸包以获取边界点
hull, _ = inlier_cloud.compute_convex_hull()
hull_points = np.asarray(hull.vertices)
# 3. 将凸包顶点转换为点云对象以便后续处理
hull_pcd = o3d.geometry.PointCloud()
hull_pcd.points = o3d.utility.Vector3dVector(hull_points)
# 4. 简单地假设凸包是近似方形,计算四边形的四个角点
# 这里采用一种简化方法,即寻找距离最远的两个点对作为对角线端点
distances = np.linalg.norm(hull_points[:, None] - hull_points, axis=2)
max_distance_indices = np.unravel_index(np.argmax(distances), distances.shape)
diagonal_points = []
diagonal_points.append(hull_points[max_distance_indices[0]])
diagonal_points.append(hull_points[max_distance_indices[1]])
diagonal_points = np.asarray(diagonal_points)
# 找到剩余两个角点,它们应位于对角线两端点构成的直线上
remaining_points = np.delete(hull_points, max_distance_indices, axis=0)
idx = []
for i in range(len(remaining_points)):
pt = remaining_points[i]
if np.linalg.norm(pt - diagonal_points[0]) < 0.3 or np.linalg.norm(pt - diagonal_points[1]) < 0.3 :
idx.append(i)
remaining_points = np.delete(remaining_points, idx, axis=0)
distances = np.linalg.norm(remaining_points[:, None] - remaining_points, axis=2)
max_distance_indices = np.unravel_index(np.argmax(distances), distances.shape)
diagonal_points2 = []
diagonal_points2.append(remaining_points[max_distance_indices[0]])
diagonal_points2.append(remaining_points[max_distance_indices[1]])
diagonal_points2 = np.asarray(diagonal_points2)
corner_points = [diagonal_points[0], diagonal_points[1],
diagonal_points2[0], diagonal_points2[1]]
Log.info("3D标记角点得到得边长为")
print(np.linalg.norm(diagonal_points[0] - diagonal_points2[0]))
print(np.linalg.norm(diagonal_points[0] - diagonal_points2[1]))
print(np.linalg.norm(diagonal_points[1] - diagonal_points2[0]))
print(np.linalg.norm(diagonal_points[1] - diagonal_points2[1]))
# 5. 创建一个新的点云来表示角点,并可视化
corner_pcd = o3d.geometry.PointCloud()
corner_pcd.points = o3d.utility.Vector3dVector(corner_points)
corner_pcd.paint_uniform_color([1, 0, 0])
return corner_pcd
from sklearn.cluster import DBSCAN
import copy
import scipy
def select_conners_from_mark_cloud_3d_v2(inlier_cloud):
# 修正
downsampled_inlier_cloud = copy.deepcopy(inlier_cloud)
for i in range(len(downsampled_inlier_cloud.points)):
if i % 2 == 0:
downsampled_inlier_cloud.points[i][0] += 0.0001
# 估计法线
downsampled_inlier_cloud.estimate_normals()
# 寻找边界点
cl, ind = downsampled_inlier_cloud.remove_statistical_outlier(nb_neighbors=20, std_ratio=2.0)
boundary_points = downsampled_inlier_cloud.select_by_index(ind, invert=True)
# 将边界点转换为numpy数组
points = np.asarray(boundary_points.points)
# 假设我们正在寻找的是矩形,我们可以尝试用凸包算法来找到最小包围四边形
hull = scipy.spatial.ConvexHull(points)
# 凸包顶点索引
hull_vertices = hull.vertices
# 如果凸包有4个顶点那么这些可能是我们要找的角点
if len(hull_vertices) == 4:
corner_points = points[hull_vertices]
else:
# 如果不是4个顶点则需要进一步处理
# 这里简化处理,直接返回凸包顶点作为角点
corner_points = points[hull_vertices]
# for i in range(len(downsampled_inlier_cloud.points)):
# if i % 2 == 0:
# downsampled_inlier_cloud.points[i][0] += 0.0001
# obb = downsampled_inlier_cloud.get_oriented_bounding_box()
# 提取角点
# corner_points = np.asarray(obb.get_box_points())
# 创建新的点云对象表示角点
corner_pcd = o3d.geometry.PointCloud()
corner_pcd.points = o3d.utility.Vector3dVector(corner_points)
# 可视化结果
o3d.visualization.draw_geometries([inlier_cloud, corner_pcd])
# corner_points = [diagonal_points[0], diagonal_points[1],
# diagonal_points2[0], diagonal_points2[1]]
# Log.info("3D标记角点得到得边长为")
# print(np.linalg.norm(diagonal_points[0] - diagonal_points2[0]))
# print(np.linalg.norm(diagonal_points[0] - diagonal_points2[1]))
# print(np.linalg.norm(diagonal_points[1] - diagonal_points2[0]))
# print(np.linalg.norm(diagonal_points[1] - diagonal_points2[1]))
# 5. 创建一个新的点云来表示角点,并可视化
# corner_pcd = o3d.geometry.PointCloud()
# corner_pcd.points = o3d.utility.Vector3dVector(corner_points)
# corner_pcd.paint_uniform_color([1, 0, 0])
return corner_pcd
def create_spheres(points, sizes=None, color=[0, 0.706, 1]):
"""
创建一系列球体以替代点云中的点允许每个点有不同的大小
参数:
points: N x 3 数组表示N个点的位置
sizes: N维数组或列表表示对应点的半径大小如果为None则所有球体大小相同
color: RGB颜色值默认是橙色
返回:
spheres: 包含所有球体的Open3D几何对象列表
"""
spheres = []
if sizes is None:
sizes = [1] * len(points) # 如果没有提供大小,则默认所有球体大小相同
for i, point in enumerate(points):
sphere = o3d.geometry.TriangleMesh.create_sphere(radius=sizes[i])
sphere.translate(point) # 将球体移动到点的位置
sphere.paint_uniform_color(color)
spheres.append(sphere)
return spheres
def cal_marks_3d_conners(show=False):
marks_3d_conners = []
for i in range(4):
# mark_ply_file_path = './detect_cloud/' + str(i) + '.ply'
mark_ply_file_path = conf_temp_cloud_path + str(i) + '.ply'
# 计算标定板尺寸
# mark_ply_file_path = './detect_cloud/1.ply'
mark_pcd = o3d.io.read_point_cloud(mark_ply_file_path)
plane_model, inlier_cloud, inlier_pcd_correct = correct_mark_cloud(mark_pcd)
# t_pcd = o2t(inlier_cloud)
# _dir = dir(t_pcd)
# for it in _dir:
# print(it)
# t_boundaries, mask = t_pcd.compute_boundary_points(0.05, 60)
# boundaries = t2o(t_boundaries)
# # 计算曲率
# curvatures = calculate_curvature(inlier_cloud)
# threshold = np.percentile(curvatures, 95) # 使用95百分位数作为阈值
# corner_indices = np.where(curvatures > threshold)[0]
# t_corner_pcd = inlier_cloud.select_by_index(corner_indices.tolist())
# t_corner_pcd.paint_uniform_color([0, 0, 1]) # 将角点涂成红色
used_pcd = inlier_cloud # inlier_cloud / inlier_pcd_correct
pcd_conners_2d = select_conners_from_mark_cloud_2d(used_pcd, plane_model)
pcd_conners_2d_correct, center_point = correct_mask_conner(used_pcd, pcd_conners_2d)
Log.info("修正后2D标记角点得到得边长为")
for i in range(4):
print(np.linalg.norm(pcd_conners_2d_correct.points[i] - pcd_conners_2d_correct.points[(i+1)%4]))
# pcd_conners_3d = select_conners_from_mark_cloud_3d(inlier_pcd_correct)
marks_3d_conners.append(pcd_conners_2d)
if show:
# 角点显示的圆球半径,单位(米)
r = 0.006
sizes = [r,r,r,r]
spheres_2d = create_spheres(pcd_conners_2d.points, sizes, color=[0, 0.706, 1])
spheres_3d = create_spheres(pcd_conners_2d_correct.points, sizes, color=[0, 0, 0])
spheres_center_3d = create_spheres([center_point], [0.01], color=[0, 1, 1])
o3d.visualization.draw_geometries([
spheres_2d[0],spheres_2d[1],spheres_2d[2],spheres_2d[3],
spheres_3d[0],spheres_3d[1],spheres_3d[2],spheres_3d[3],
spheres_center_3d[0],
mark_pcd.paint_uniform_color([1, 0, 0]),
inlier_pcd_correct.paint_uniform_color([0, 1, 0]),])
# mark 顺序按照 左上,右上,左下,右下
# 每个mark的角点按照左、上、右、底顺序返回
return marks_3d_conners
def cal_marks_3d_conners_simple(marks_pcd_4, show=False):
marks_3d_conners = []
for i in range(4):
# mark_ply_file_path = './detect_cloud/' + str(i) + '.ply'
mark_ply_file_path = conf_temp_cloud_path + str(i) + '.ply'
# 计算标定板尺寸
# mark_ply_file_path = './detect_cloud/1.ply'
# 不从文件读取
# mark_pcd = o3d.io.read_point_cloud(mark_ply_file_path)
mark_pcd = marks_pcd_4[i]
plane_model, inlier_cloud, inlier_pcd_correct = correct_mark_cloud(mark_pcd)
# t_pcd = o2t(inlier_cloud)
# _dir = dir(t_pcd)
# for it in _dir:
# print(it)
# t_boundaries, mask = t_pcd.compute_boundary_points(0.05, 60)
# boundaries = t2o(t_boundaries)
# # 计算曲率
# curvatures = calculate_curvature(inlier_cloud)
# threshold = np.percentile(curvatures, 95) # 使用95百分位数作为阈值
# corner_indices = np.where(curvatures > threshold)[0]
# t_corner_pcd = inlier_cloud.select_by_index(corner_indices.tolist())
# t_corner_pcd.paint_uniform_color([0, 0, 1]) # 将角点涂成红色
used_pcd = inlier_cloud # inlier_cloud / inlier_pcd_correct
# inlier_cloud -> box
# inlier_pcd_correct -> box2\box3
pcd_conners_2d = select_conners_from_mark_cloud_2d(used_pcd, plane_model)
pcd_conners_2d_correct, center_point = correct_mask_conner(used_pcd, pcd_conners_2d)
Log.info("修正后2D标记角点得到得边长为")
for i in range(4):
print(np.linalg.norm(pcd_conners_2d_correct.points[i] - pcd_conners_2d_correct.points[(i+1)%4]))
# pcd_conners_3d = select_conners_from_mark_cloud_3d(inlier_pcd_correct)
marks_3d_conners.append(pcd_conners_2d_correct) # pcd_conners_2d / pcd_conners_2d_correct
# print(marks_3d_conners[0].points)
# print(marks_3d_conners[0].points[0])
# print(np.linalg.norm(marks_3d_conners[0].points[0] - marks_3d_conners[0].points[1]))
# print(np.linalg.norm(marks_3d_conners[0].points[1] - marks_3d_conners[0].points[2]))
# print(np.linalg.norm(marks_3d_conners[0].points[2] - marks_3d_conners[0].points[3]))
# print(np.linalg.norm(marks_3d_conners[0].points[3] - marks_3d_conners[0].points[0]))
# Log.info("3D->3D 通过相邻角点计算得到的边长进行对比")
if show:
# 角点显示的圆球半径,单位(米)
r = 0.006
sizes = [r,r,r,r]
spheres_2d = create_spheres(pcd_conners_2d.points, sizes, color=[0, 0.706, 1])
spheres_3d = create_spheres(pcd_conners_2d_correct.points, sizes, color=[0, 0, 0])
spheres_center_3d = create_spheres([center_point], [0.01], color=[0, 1, 1])
o3d.visualization.draw_geometries([
spheres_2d[0],spheres_2d[1],spheres_2d[2],spheres_2d[3],
spheres_3d[0],spheres_3d[1],spheres_3d[2],spheres_3d[3],
spheres_center_3d[0],
mark_pcd.paint_uniform_color([1, 0, 0]),
inlier_pcd_correct.paint_uniform_color([0, 1, 0]),])
# mark 顺序按照 左上,右上,左下,右下
# 每个mark的角点按照左、上、右、底顺序返回
return marks_3d_conners
def print_diff(aurco_pt1, aurco_pt2, lidar_pt1, lidar_pt2):
aurco_distance = np.linalg.norm(aurco_pt1 - aurco_pt2)
lidar_distance = np.linalg.norm(lidar_pt1 - lidar_pt2)
print(aurco_distance)
print(lidar_distance)
print(abs(aurco_distance - lidar_distance))
# 测试2d角点分割算法
def _test_select_conners_from_mark_cloud_2d_2():
pass
if __name__ == '__main__':
marks_3d_conners = []
ply_file_path = os.path.dirname(__file__) + "\\samples\\calibration\\11\\output.ply"
marks_pcd = seg_mark_cloud_simple(ply_file_path, kk)
for i in range(4):
mark_ply_file_path = conf_temp_cloud_path + str(i) + '.ply'
mark_pcd = o3d.io.read_point_cloud(mark_ply_file_path)
plane_model, inlier_cloud, inlier_pcd_correct = correct_mark_cloud(mark_pcd)
# pcd_conners_2d = select_conners_from_mark_cloud_2d(mark_pcd, plane_model)
pcd_conners_2d = select_conners_from_mark_cloud_3d_v2(inlier_pcd_correct)
# image_project = cv2.imread("../caowei/new/output.png")
# # if image_project is None:
# # print("image_project is None!")
# # exit()
# # # 分割标定板
# ply_file_path = '../caowei/new/20241227175529496.ply'
# marks_pcd = seg_mark_cloud(ply_file_path, kk)
# if len(marks_pcd) != MARK_COUNT:
# exit()
# LEFT = 0
# TOP = 1
# RIGHT = 2
# BOTTOM = 3
# frame, marks_aurco = aurco_3d_detect.cal_marks_3d_conners("../caowei/new/output.png")
# marks_pcd = cal_marks_3d_conners()
# marks_pcd_new = []
# # marks_pcd_new.append(marks_pcd[2].points)
# # marks_pcd_new.append(marks_pcd[3].points)
# # marks_pcd_new.append(marks_pcd[1].points)
# # marks_pcd_new.append(marks_pcd[0].points)
# tmmp = []
# for i in range(4):
# tmmp.append(marks_pcd[2].points[i])
# marks_pcd_new.append(tmmp)
# tmmp = []
# for i in range(4):
# tmmp.append(marks_pcd[3].points[i])
# marks_pcd_new.append(tmmp)
# tmmp = []
# for i in range(4):
# tmmp.append(marks_pcd[1].points[i])
# marks_pcd_new.append(tmmp)
# tmmp = []
# for i in range(4):
# tmmp.append(marks_pcd[0].points[i])
# marks_pcd_new.append(tmmp)
# print("======== 0 left - 1 right =========")
# print_diff(marks_aurco[0][LEFT] , marks_aurco[1][RIGHT], marks_pcd_new[0][LEFT], marks_pcd_new[1][RIGHT])
# print("======== 0 left - 2 right =========")
# print_diff(marks_aurco[0][LEFT] , marks_aurco[2][RIGHT], marks_pcd_new[0][LEFT], marks_pcd_new[2][RIGHT])
# print("======== 0 left - 3 right =========")
# print_diff(marks_aurco[0][LEFT] , marks_aurco[3][RIGHT], marks_pcd_new[0][LEFT], marks_pcd_new[3][RIGHT])
# print("======== 0 right - 1 left =========")
# print_diff(marks_aurco[0][RIGHT] , marks_aurco[1][LEFT], marks_pcd_new[0][RIGHT] , marks_pcd_new[1][LEFT])
# print("======== 0 top - 3 bottom =========")
# print_diff(marks_aurco[0][TOP] , marks_aurco[3][BOTTOM], marks_pcd_new[0][TOP] , marks_pcd_new[3][BOTTOM])
# print("======== 2 left - 1 right =========")
# print_diff(marks_aurco[2][LEFT] , marks_aurco[1][RIGHT], marks_pcd_new[2][LEFT] , marks_pcd_new[1][RIGHT])
# print("======== 2 left - 3 right =========")
# print_diff(marks_aurco[2][LEFT] , marks_aurco[3][RIGHT], marks_pcd_new[2][LEFT] , marks_pcd_new[3][RIGHT])
# exit()