802 lines
35 KiB
Python
802 lines
35 KiB
Python
import cv2
|
||
print(cv2.__version__) # 4.9.0
|
||
import open3d as o3d
|
||
print(o3d.__version__) # 0.18.0
|
||
import numpy as np
|
||
from os import abort
|
||
import sys
|
||
sys.path.append(r"./")
|
||
from utils import Log
|
||
from config import *
|
||
import utils
|
||
import pandas as pd
|
||
|
||
|
||
def extend_line_3d(pt1, pt2, distance):
|
||
"""
|
||
计算 3D 空间两个点的延长线上的一个点
|
||
参数:
|
||
- point1: 第一个点的坐标 (x1, y1, z1),类型为列表或数组
|
||
- point2: 第二个点的坐标 (x2, y2, z2),类型为列表或数组
|
||
- distance: 从第二个点延长的距离 (正值表示延长方向,负值表示反向)
|
||
返回:
|
||
- 延长线上的点坐标 (x, y, z),类型为数组
|
||
"""
|
||
# 转换为 NumPy 数组
|
||
point1 = np.array(pt1)
|
||
point2 = np.array(pt2)
|
||
# 计算方向向量
|
||
direction = point2 - point1
|
||
# 计算方向向量的单位向量
|
||
direction_unit = direction / np.linalg.norm(direction)
|
||
# 延长线上的点
|
||
extended_point = point2 + distance * direction_unit
|
||
return extended_point
|
||
|
||
def filter_cloud(pcd):
|
||
# 获取点云中的点坐标
|
||
points = np.asarray(pcd.points)
|
||
|
||
# 定义 x, y, z 的范围
|
||
x_min, x_max = -2.0, 2.0
|
||
y_min, y_max = -2.0, 2.0
|
||
z_min, z_max = 0, global_z_max
|
||
|
||
# 创建布尔掩膜
|
||
mask = (
|
||
(points[:, 0] >= x_min) & (points[:, 0] <= x_max) &
|
||
(points[:, 1] >= y_min) & (points[:, 1] <= y_max) &
|
||
(points[:, 2] >= z_min) & (points[:, 2] <= z_max)
|
||
)
|
||
|
||
# 应用掩膜以过滤点云
|
||
filtered_points = points[mask]
|
||
|
||
# 更新点云对象
|
||
pcd.points = o3d.utility.Vector3dVector(filtered_points)
|
||
return pcd
|
||
|
||
def sort_marks_aruco_3d(masks):
|
||
new_mask = []
|
||
left_pt_0, left_pt_1, left_pt_2, left_pt_3 = 0,0,0,0
|
||
delete_index = []
|
||
coordinate_temp = [0,1,2,3]
|
||
# max_val = 0
|
||
max_val = -999999.0
|
||
min_val = 999999.0
|
||
for i in range(len(masks)):
|
||
left_pt = masks[i][0]
|
||
val = left_pt[0] + left_pt[1]
|
||
if val > max_val:
|
||
left_pt_3 = i
|
||
max_val = val
|
||
if val < min_val:
|
||
left_pt_0 = i
|
||
min_val = val
|
||
delete_index.append(left_pt_0)
|
||
delete_index.append(left_pt_3)
|
||
coordinate_temp = np.delete(coordinate_temp, delete_index, axis=0)
|
||
if masks[coordinate_temp[0]][0][0] > masks[coordinate_temp[1]][0][0]:
|
||
left_pt_1 = coordinate_temp[0]
|
||
left_pt_2 = coordinate_temp[1]
|
||
else:
|
||
left_pt_2 = coordinate_temp[0]
|
||
left_pt_1 = coordinate_temp[1]
|
||
new_mask.append(masks[left_pt_0])
|
||
new_mask.append(masks[left_pt_1])
|
||
new_mask.append(masks[left_pt_2])
|
||
new_mask.append(masks[left_pt_3])
|
||
return new_mask
|
||
|
||
def sort_marks_aruco_3d_v2(masks):
|
||
new_mask = []
|
||
left_pt_0, left_pt_1, left_pt_2, left_pt_3 = 0,0,0,0
|
||
delete_index = []
|
||
coordinate_temp = [0,1,2,3]
|
||
max_val = 0
|
||
min_val = 999999
|
||
for i in range(len(masks)):
|
||
left_pt = masks[i][0][0]
|
||
val = left_pt[0] + left_pt[1]
|
||
if val > max_val:
|
||
left_pt_3 = i
|
||
max_val = val
|
||
if val < min_val:
|
||
left_pt_0 = i
|
||
min_val = val
|
||
delete_index.append(left_pt_0)
|
||
delete_index.append(left_pt_3)
|
||
coordinate_temp = np.delete(coordinate_temp, delete_index, axis=0)
|
||
if masks[coordinate_temp[0]][0][0][0] > masks[coordinate_temp[1]][0][0][0]:
|
||
left_pt_1 = coordinate_temp[0]
|
||
left_pt_2 = coordinate_temp[1]
|
||
else:
|
||
left_pt_2 = coordinate_temp[0]
|
||
left_pt_1 = coordinate_temp[1]
|
||
new_mask.append(masks[left_pt_0])
|
||
new_mask.append(masks[left_pt_1])
|
||
new_mask.append(masks[left_pt_2])
|
||
new_mask.append(masks[left_pt_3])
|
||
return new_mask
|
||
|
||
def cal_camera_marks_3d_conners(file_path):
|
||
marks_3d_conners = []
|
||
frame = cv2.imread(file_path)
|
||
if frame is None:
|
||
abort()
|
||
|
||
# 定义使用的字典类型
|
||
aruco_dict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_ARUCO_ORIGINAL)
|
||
parameters = cv2.aruco.DetectorParameters()
|
||
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
||
|
||
# 检测ArUco标记
|
||
# corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
|
||
aruco_detector = cv2.aruco.ArucoDetector(aruco_dict, parameters)
|
||
corners, ids, rejectedImgPoints = aruco_detector.detectMarkers(gray)
|
||
|
||
rotation_matrices = []
|
||
if ids is not None:
|
||
# 对角点进行亚像素级别的细化以提高精度
|
||
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 50, 0.0003)
|
||
for corner in corners:
|
||
cv2.cornerSubPix(gray, corner, winSize=(3, 3), zeroZone=(-1, -1), criteria=criteria)
|
||
|
||
# 定义标记的世界坐标(假设标记位于z=0平面上)
|
||
obj_points = np.array([[-marker_size/2, -marker_size/2, 0], # 左上
|
||
[ marker_size/2, -marker_size/2, 0], # 右上
|
||
[ marker_size/2, marker_size/2, 0], # 右下
|
||
[-marker_size/2, marker_size/2, 0]], dtype=np.float32) # 左下
|
||
|
||
# obj_points = np.array([[-marker_size/2, marker_size/2, 0], # 左下角
|
||
# [ marker_size/2, marker_size/2, 0], # 右下角
|
||
# [ marker_size/2, -marker_size/2, 0], # 右上角
|
||
# [-marker_size/2, -marker_size/2, 0]], dtype=np.float32) # 左上角
|
||
|
||
# 遍历所有检测到的标记
|
||
Log.info("2D->3D 通过相邻角点计算得到的边长进行对比")
|
||
for i, corner in enumerate(corners):
|
||
Log.error("2d-2d 的边长")
|
||
print(np.linalg.norm(corner[0][0] - corner[0][1]) / 0.7)
|
||
print(np.linalg.norm(corner[0][1] - corner[0][2]) / 0.7)
|
||
print(np.linalg.norm(corner[0][2] - corner[0][3]) / 0.7)
|
||
print(np.linalg.norm(corner[0][3] - corner[0][0]) / 0.7)
|
||
|
||
conners_pcd = o3d.geometry.PointCloud()
|
||
# for j in range(4):
|
||
# conners_pcd.points.append(obj_points[j])
|
||
|
||
# 估计每个标记的姿态
|
||
# _, rvec, tvec = cv2.solvePnP(obj_points, corner, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_DLS)
|
||
|
||
_, rvec, tvec = cv2.solvePnP(obj_points, corner, camera_matrix, dist_coeffs)
|
||
|
||
# 绘制每个标记的轴线
|
||
cv2.drawFrameAxes(frame, camera_matrix, dist_coeffs, rvec, tvec, 0.1)
|
||
|
||
# 将旋转向量转换为旋转矩阵(如果需要)
|
||
rotation_matrix, _ = cv2.Rodrigues(rvec)
|
||
rotation_matrices.append(rotation_matrix)
|
||
|
||
# 使用cv2.invert()计算矩阵的逆
|
||
retval, rotation_matrix_inv = cv2.invert(rotation_matrix)
|
||
# if retval != 0: print("矩阵的逆为:\n", rotation_matrix_inv)
|
||
# else: print("无法计算矩阵的逆")
|
||
|
||
H = np.eye(4)
|
||
H[:3, :3] = rotation_matrix
|
||
H[:3, 3] = tvec.flatten()
|
||
for j in range(4):
|
||
P_world = np.array([obj_points[j][0], obj_points[j][1], obj_points[j][2], 1])
|
||
P_camera_homogeneous = H @ P_world # 齐次坐标相乘
|
||
conners_pcd.points.append(P_camera_homogeneous[:-1])
|
||
|
||
# conners_pcd.rotate(rotation_matrix, (0,0,0))
|
||
# conners_pcd.translate(tvec)
|
||
|
||
# 在图像上绘制角点编号(可选)
|
||
for j, point in enumerate(corner[0]):
|
||
text = str(conners_pcd.points[j])
|
||
if j == 3: cv2.putText(frame, text, tuple(point.astype(int)), cv2.FONT_HERSHEY_SIMPLEX, 2.5, (0, 255, 255), 10, cv2.LINE_AA)
|
||
elif j == 2: cv2.putText(frame, text, tuple(point.astype(int)), cv2.FONT_HERSHEY_SIMPLEX, 2.5, (0, 255, 0), 10, cv2.LINE_AA)
|
||
elif j == 1: cv2.putText(frame, text, tuple(point.astype(int)), cv2.FONT_HERSHEY_SIMPLEX, 2.5, (255, 255, 255), 10, cv2.LINE_AA)
|
||
else: cv2.putText(frame, text, tuple(point.astype(int)), cv2.FONT_HERSHEY_SIMPLEX, 2.5, (0, 0, 255), 10, cv2.LINE_AA)
|
||
|
||
|
||
# 将点云的点转换为 NumPy 数组
|
||
points = np.asarray(conners_pcd.points)
|
||
reversed_points = points[::-1]
|
||
conners_pcd.points = o3d.utility.Vector3dVector(reversed_points)
|
||
|
||
# 计算角点和边长
|
||
e1p1 = extend_line_3d(conners_pcd.points[2], conners_pcd.points[1], b2)
|
||
e1p2 = extend_line_3d(conners_pcd.points[3], conners_pcd.points[0], b2)
|
||
left2 = extend_line_3d(e1p2, e1p1 , b1)
|
||
top = extend_line_3d(e1p1, e1p2, board_size - b1 - marker_size)
|
||
|
||
e2p1 = extend_line_3d(conners_pcd.points[1], conners_pcd.points[0], board_size - b1 - marker_size)
|
||
e2p2 = extend_line_3d(conners_pcd.points[2], conners_pcd.points[3], board_size - b1 - marker_size)
|
||
top2 = extend_line_3d(e2p2, e2p1, b2)
|
||
right = extend_line_3d(e2p1, e2p2, board_size - b2 - marker_size)
|
||
|
||
e3p1 = extend_line_3d(conners_pcd.points[0], conners_pcd.points[3], board_size - b2 - marker_size)
|
||
e3p2 = extend_line_3d(conners_pcd.points[1], conners_pcd.points[2], board_size - b2 - marker_size)
|
||
right2 = extend_line_3d(e3p2, e3p1, board_size - b1 - marker_size)
|
||
bottom = extend_line_3d(e3p1, e3p2, b1)
|
||
|
||
e4p1 = extend_line_3d(conners_pcd.points[3], conners_pcd.points[2], b2)
|
||
e4p2 = extend_line_3d(conners_pcd.points[0], conners_pcd.points[1], b2)
|
||
bottom2 = extend_line_3d(e4p2, e4p1, board_size - b2 - marker_size)
|
||
left = extend_line_3d(e4p1, e4p2, b2)
|
||
|
||
print(
|
||
np.linalg.norm(left - left2),
|
||
np.linalg.norm(top - top2),
|
||
np.linalg.norm(right - right2),
|
||
np.linalg.norm(bottom - bottom2)
|
||
)
|
||
print(
|
||
np.linalg.norm(top - left),
|
||
np.linalg.norm(right - top),
|
||
np.linalg.norm(bottom - right),
|
||
np.linalg.norm(left - bottom),
|
||
)
|
||
marks_3d_conners.append([left, top, right, bottom])
|
||
|
||
marks_3d_conners = sort_marks_aruco_3d(marks_3d_conners)
|
||
# 按照左、上、右、底顺序返回
|
||
return frame, marks_3d_conners
|
||
|
||
def cal_camera_marks_3d_conners_v2(file_path):
|
||
marks_3d_conners = []
|
||
frame = cv2.imread(file_path)
|
||
if frame is None:
|
||
abort()
|
||
|
||
# 定义使用的字典类型
|
||
aruco_dict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_ARUCO_ORIGINAL)
|
||
parameters = cv2.aruco.DetectorParameters()
|
||
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
||
|
||
# 检测ArUco标记
|
||
# corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
|
||
aruco_detector = cv2.aruco.ArucoDetector(aruco_dict, parameters)
|
||
corners, ids, rejectedImgPoints = aruco_detector.detectMarkers(gray)
|
||
|
||
rotation_matrices = []
|
||
if ids is not None:
|
||
# 对角点进行亚像素级别的细化以提高精度
|
||
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 50, 0.0003)
|
||
for corner in corners:
|
||
cv2.cornerSubPix(gray, corner, winSize=(3, 3), zeroZone=(-1, -1), criteria=criteria)
|
||
|
||
# 定义标记的世界坐标(假设标记位于z=0平面上)
|
||
obj_points = np.array([[-marker_size/2, -marker_size/2, 0], # 左上
|
||
[ marker_size/2, -marker_size/2, 0], # 右上
|
||
[ marker_size/2, marker_size/2, 0], # 右下
|
||
[-marker_size/2, marker_size/2, 0]], dtype=np.float32) # 左下
|
||
|
||
# obj_points = np.array([[-marker_size/2, marker_size/2, 0], # 左下角
|
||
# [ marker_size/2, marker_size/2, 0], # 右下角
|
||
# [ marker_size/2, -marker_size/2, 0], # 右上角
|
||
# [-marker_size/2, -marker_size/2, 0]], dtype=np.float32) # 左上角
|
||
|
||
# 遍历所有检测到的标记
|
||
Log.error("2D->2D 通过相邻角点计算得到的边长进行对比")
|
||
# for i, cornner in enumerate(corners):
|
||
print(np.linalg.norm(corners[0][0][0] - corners[0][0][1]))
|
||
print(np.linalg.norm(corners[0][0][1] - corners[0][0][2]))
|
||
print(np.linalg.norm(corners[0][0][2] - corners[0][0][3]))
|
||
print(np.linalg.norm(corners[0][0][3] - corners[0][0][0]))
|
||
for i, corner in enumerate(corners):
|
||
conners_pcd = o3d.geometry.PointCloud()
|
||
# for j in range(4):
|
||
# conners_pcd.points.append(obj_points[j])
|
||
|
||
# 估计每个标记的姿态
|
||
# _, rvec, tvec = cv2.solvePnP(obj_points, corner, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_DLS)
|
||
|
||
_, rvec, tvec = cv2.solvePnP(obj_points, corner, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_EPNP)
|
||
|
||
# 绘制每个标记的轴线
|
||
cv2.drawFrameAxes(frame, camera_matrix, dist_coeffs, rvec, tvec, 0.1)
|
||
|
||
# 将旋转向量转换为旋转矩阵(如果需要)
|
||
rotation_matrix, _ = cv2.Rodrigues(rvec)
|
||
rotation_matrices.append(rotation_matrix)
|
||
|
||
# 使用cv2.invert()计算矩阵的逆
|
||
retval, rotation_matrix_inv = cv2.invert(rotation_matrix)
|
||
# if retval != 0: print("矩阵的逆为:\n", rotation_matrix_inv)
|
||
# else: print("无法计算矩阵的逆")
|
||
|
||
H = np.eye(4)
|
||
H[:3, :3] = rotation_matrix
|
||
H[:3, 3] = tvec.flatten()
|
||
for j in range(4):
|
||
P_world = np.array([obj_points[j][0], obj_points[j][1], obj_points[j][2], 1])
|
||
P_camera_homogeneous = H @ P_world # 齐次坐标相乘
|
||
conners_pcd.points.append(P_camera_homogeneous[:-1])
|
||
|
||
# conners_pcd.rotate(rotation_matrix, (0,0,0))
|
||
# conners_pcd.translate(tvec)
|
||
|
||
# 在图像上绘制角点编号(可选)
|
||
for j, point in enumerate(corner[0]):
|
||
text = str(conners_pcd.points[j])
|
||
if j == 3: cv2.putText(frame, text, tuple(point.astype(int)), cv2.FONT_HERSHEY_SIMPLEX, 2.5, (0, 255, 255), 10, cv2.LINE_AA)
|
||
elif j == 2: cv2.putText(frame, text, tuple(point.astype(int)), cv2.FONT_HERSHEY_SIMPLEX, 2.5, (0, 255, 0), 10, cv2.LINE_AA)
|
||
elif j == 1: cv2.putText(frame, text, tuple(point.astype(int)), cv2.FONT_HERSHEY_SIMPLEX, 2.5, (255, 255, 255), 10, cv2.LINE_AA)
|
||
else: cv2.putText(frame, text, tuple(point.astype(int)), cv2.FONT_HERSHEY_SIMPLEX, 2.5, (0, 0, 255), 10, cv2.LINE_AA)
|
||
|
||
# 将点云的点转换为 NumPy 数组
|
||
points = np.asarray(conners_pcd.points)
|
||
reversed_points = points[::-1]
|
||
conners_pcd.points = o3d.utility.Vector3dVector(reversed_points)
|
||
|
||
left = conners_pcd.points[1]
|
||
top = conners_pcd.points[0]
|
||
right = conners_pcd.points[3]
|
||
bottom = conners_pcd.points[2]
|
||
# left = conners_pcd.points[3]
|
||
# top = conners_pcd.points[0]
|
||
# right = conners_pcd.points[1]
|
||
# bottom = conners_pcd.points[2]
|
||
|
||
print(
|
||
np.linalg.norm(top - left),
|
||
np.linalg.norm(right - top),
|
||
np.linalg.norm(bottom - right),
|
||
np.linalg.norm(left - bottom),
|
||
)
|
||
marks_3d_conners.append([left, top, right, bottom])
|
||
|
||
marks_3d_conners = sort_marks_aruco_3d(marks_3d_conners)
|
||
# 按照左、上、右、底顺序返回
|
||
return frame, marks_3d_conners
|
||
|
||
|
||
def cal_lidar_marks_3d_conners(frame, r_file_path):
|
||
# file_path = os.path.dirname(__file__) + "\\samples\\1.png"
|
||
|
||
frame, marks_3d_conners = [],[]
|
||
file_path = os.path.dirname(__file__) + "\\samples\\output.ply"
|
||
src_cloud_load = o3d.io.read_point_cloud(file_path)
|
||
|
||
reflectivitys = utils.parse_custom_attribute(file_path, "reflectivity")
|
||
# file_path = os.path.dirname(__file__) + "\\samples\\output_data.ply"
|
||
# src_cloud_data = o3d.io.read_point_cloud(file_path)
|
||
|
||
# 点云旋转
|
||
Rx = src_cloud_load.get_rotation_matrix_from_xyz((roll, 0, 0))
|
||
Ry = src_cloud_load.get_rotation_matrix_from_xyz((0, pitch, 0))
|
||
Rz = src_cloud_load.get_rotation_matrix_from_xyz((0, 0, yaw))
|
||
src_cloud_load.rotate(Rx, (1,0,0))
|
||
src_cloud_load.rotate(Ry, (0,1,0))
|
||
src_cloud_load.rotate(Rz, (0,0,1))
|
||
src_cloud_load.translate(T_vector)
|
||
# o3d.visualization.draw_geometries([src_cloud_load], mesh_show_wireframe=True)
|
||
|
||
# 点云过滤
|
||
## 通过距离和反射率过滤后剩下得点云数据
|
||
filte_src_cloud_load = []
|
||
## 点云的反射率数据
|
||
filte_sreflectivitys = []
|
||
src_cloud_load_array = np.asarray(src_cloud_load.points)
|
||
index = 0
|
||
for point_3d in src_cloud_load_array:
|
||
if -2.0 < point_3d[0] < 2.0 and -2.0 < point_3d[1] < 2.0 and point_3d[2] < global_z_max:
|
||
filte_src_cloud_load.append((point_3d[0], point_3d[1], point_3d[2]))
|
||
filte_sreflectivitys.append([reflectivitys[index]])
|
||
index += 1
|
||
|
||
# 点云投影
|
||
## 点云投影后的2D图
|
||
im_project = np.zeros((global_h, global_w, 3), dtype=np.uint8)
|
||
## 点云2D和3D对应的图
|
||
im_2d_to_3d = np.zeros((global_h, global_w, 3), dtype=np.float64)
|
||
rvec = np.array([np.array([0.]), np.array([0.]), np.array([0.])])
|
||
tvec = np.float64([0.0, 0.0, 0.0])
|
||
pts_2d, _ = cv2.projectPoints(np.asarray(filte_src_cloud_load), rvec, tvec, camera_matrix, dist_coeffs)
|
||
|
||
list_pts_2d = pts_2d.tolist()
|
||
list_pts_2d = [[[int(item) for item in subsublist] for subsublist in sublist] for sublist in list_pts_2d]
|
||
list_pts_2d_all_mask = []
|
||
for i, pt_2d in enumerate(list_pts_2d):
|
||
if 0 <= pt_2d[0][0] < global_w and 0 <= pt_2d[0][1] < global_h:
|
||
# 只挑选出反射率小于30的点
|
||
if filte_sreflectivitys[i][0] < 30:
|
||
# 添加2D图像的像素值
|
||
im_project[pt_2d[0][1], pt_2d[0][0]] = (255,255,255)
|
||
# 添加2D-3D映射图像的数值
|
||
im_2d_to_3d[pt_2d[0][1], pt_2d[0][0]] = filte_src_cloud_load[i]
|
||
# 添加2D坐标数值,这里要保持和原始数据一致的信息,所以顺序是 pt_2d[0][0], pt_2d[0][1]
|
||
list_pts_2d_all_mask.append([pt_2d[0][0], pt_2d[0][1]])
|
||
|
||
# 计算图像的最小值和最大值
|
||
min_val = np.min(im_project)
|
||
max_val = np.max(im_project)
|
||
print(min_val)
|
||
print(max_val)
|
||
|
||
# 进行膨胀操作
|
||
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (25, 25))
|
||
|
||
# 计算联通区域
|
||
im_project_delate = cv2.morphologyEx(im_project, cv2.MORPH_DILATE, kernel)
|
||
im_project_delate = cv2.cvtColor(im_project_delate, cv2.COLOR_RGB2GRAY)
|
||
_, im_project_delate_binary = cv2.threshold(im_project_delate, 128, 255, cv2.THRESH_BINARY)
|
||
|
||
# 查找轮廓
|
||
# 计算连通域
|
||
num_labels, labels, stats, centroids = \
|
||
cv2.connectedComponentsWithStats(im_project_delate_binary, connectivity=8)
|
||
|
||
# 忽略背景组件(标签0)
|
||
sizes = stats[1:, -1] # 获取所有连通组件的面积(忽略背景)
|
||
rectangles = stats[1:, :4] # 获取所有连通组件的边界框(忽略背景)
|
||
|
||
# 根据面积大小排序索引,并选择面积最大的前4个
|
||
sorted_indexes_by_size = sorted(range(sizes.shape[0]), key=lambda k: sizes[k], reverse=True)[:4]
|
||
|
||
print("面积最大的4个连通组件索引:")
|
||
print(sorted_indexes_by_size)
|
||
|
||
# 创建一个新的与labels大小相同的数组,用于保存面积最大的4个连通组件
|
||
filtered_labels = np.zeros_like(labels)
|
||
|
||
for i in sorted_indexes_by_size:
|
||
filtered_labels[labels == i + 1] = i + 1 # 注意这里要加1,因为忽略了背景组件
|
||
|
||
# 给每个连通区域分配一种颜色
|
||
colors = np.random.randint(0, 255, size=(len(sorted_indexes_by_size), 3), dtype=np.uint8)
|
||
|
||
mask4_conners = []
|
||
for i, idx in enumerate(sorted_indexes_by_size):
|
||
colored_filtered_labels = np.zeros((filtered_labels.shape[0], filtered_labels.shape[1], 1), dtype=np.uint8)
|
||
color = colors[i]
|
||
mask = (filtered_labels == idx + 1)
|
||
colored_filtered_labels[mask] = 255
|
||
non_zero = cv2.findNonZero(colored_filtered_labels)
|
||
rect = cv2.minAreaRect(non_zero)
|
||
box = cv2.boxPoints(rect)
|
||
box = np.int0(box)
|
||
conners = utils.sort_conners_2d(list(box))
|
||
# 4个角点拍寻 left top right bottom
|
||
mask4_conners.append(conners)
|
||
|
||
# mask 板子排序 左上 右上 左下 右下
|
||
mask4_conners = sort_marks_aruco_3d(mask4_conners)
|
||
## for test mask 板子排序 左上 右上 左下 右下
|
||
# for i in range(4):
|
||
# conners = mask4_conners[i]
|
||
# cv2.drawContours(im_project, [np.asarray(conners)], 0, (255), 10)
|
||
# im_tmp = cv2.resize(im_project, (1000, 750))
|
||
# cv2.imshow("1", im_tmp)
|
||
# cv2.waitKey(0)
|
||
|
||
# 初始化每个标记板的2d坐标数组和3d点云数据数组
|
||
pts_2d_mask_list = []
|
||
im_2d_to_3d_mask_list = []
|
||
for i in range(4):
|
||
pts_2d_mask = []
|
||
im_2d_to_3d_mask = np.zeros((global_h, global_w, 3), dtype=np.float64)
|
||
pts_2d_mask_list.append(pts_2d_mask)
|
||
im_2d_to_3d_mask_list.append(im_2d_to_3d_mask)
|
||
|
||
# 按没个标记版分别归纳2d坐标数据和3d点云数据
|
||
## 遍历过滤后的2d点坐标数据
|
||
for pt_index in list_pts_2d_all_mask:
|
||
for i in range(4):
|
||
conners = mask4_conners[i]
|
||
if cv2.pointPolygonTest(np.asarray(conners), (pt_index[0],pt_index[1]), False) >= 0:
|
||
# 这里要保持2D坐标的顺序,所以是pt_index[0],pt_index[1]
|
||
pts_2d_mask_list[i].append([pt_index[0], pt_index[1]])
|
||
# im_2d_to_3d_mask_list[i].append(im_2d_to_3d[pt_2d[0][1], pt_2d[0][0]])
|
||
im_2d_to_3d_mask_list[i][pt_index[1], pt_index[0]] = im_2d_to_3d[pt_index[1], pt_index[0]]
|
||
break
|
||
|
||
# 遍历4个标记板,计算每隔标记板4个角点的3D坐标
|
||
for m in range(4):
|
||
pts_2d_mask = pts_2d_mask_list[m]
|
||
im_2d_to_3d_mask = im_2d_to_3d_mask_list[m]
|
||
H = utils.cal_H(im_2d_to_3d_mask, pts_2d_mask, pts_2d_mask)
|
||
|
||
# 通过单应矩阵计算角点 3D 单位坐标下的数值
|
||
H_inv = np.linalg.inv(H)
|
||
conners_pcd = o3d.geometry.PointCloud()
|
||
for i in range(4):
|
||
conner = conners[i]
|
||
x, y = conner[0], conner[1]
|
||
pts_hom = np.array([x, y, 1])
|
||
pts_3d = np.dot(H_inv, pts_hom.T)
|
||
pts_3d = pts_3d/pts_3d[2]
|
||
conners_pcd.points.append((pts_3d[0],pts_3d[1],pts_3d[2]))
|
||
|
||
# 生成当前标定板的点云数据
|
||
mark_pcd = o3d.geometry.PointCloud()
|
||
for i, pt_2d in enumerate(pts_2d_mask):
|
||
mark_pcd.points.append([im_2d_to_3d[pt_2d[1], pt_2d[0]][0],
|
||
im_2d_to_3d[pt_2d[1], pt_2d[0]][1],
|
||
im_2d_to_3d[pt_2d[1], pt_2d[0]][2]])
|
||
# 计算标定板点云拟合平面的参数,这里会受到反射率、噪声等的影响
|
||
plane_model, inlier_cloud, inlier_pcd_correct = utils.correct_mark_cloud(mark_pcd)
|
||
# 重新计算3D角点的空间未知
|
||
conners_pcd = utils.reproject_cloud(plane_model, conners_pcd)
|
||
|
||
Log.info("2D标记角点,得到得边长为:")
|
||
for i in range(4):
|
||
print(np.linalg.norm(conners_pcd.points[i] - conners_pcd.points[(i+1)%4]))
|
||
|
||
|
||
return frame, marks_3d_conners
|
||
|
||
|
||
def cal_lidar_marks_3d_conner_v2(r_file_path):
|
||
marks_3d_conners = []
|
||
file_path = r_file_path
|
||
src_cloud_load = o3d.io.read_point_cloud(file_path)
|
||
|
||
reflectivitys = utils.parse_custom_attribute(file_path, "reflectivity")
|
||
|
||
# 点云旋转
|
||
Rx = src_cloud_load.get_rotation_matrix_from_xyz((roll, 0, 0))
|
||
Ry = src_cloud_load.get_rotation_matrix_from_xyz((0, pitch, 0))
|
||
Rz = src_cloud_load.get_rotation_matrix_from_xyz((0, 0, yaw))
|
||
src_cloud_load.rotate(Rx, (1,0,0))
|
||
src_cloud_load.rotate(Ry, (0,1,0))
|
||
src_cloud_load.rotate(Rz, (0,0,1))
|
||
src_cloud_load.translate(T_vector)
|
||
# o3d.visualization.draw_geometries([src_cloud_load], mesh_show_wireframe=True)
|
||
|
||
# 点云过滤
|
||
## 通过距离和反射率过滤后剩下得点云数据
|
||
filte_src_cloud_load = []
|
||
## 点云的反射率数据
|
||
filte_sreflectivitys = []
|
||
src_cloud_load_array = np.asarray(src_cloud_load.points)
|
||
index = 0
|
||
for point_3d in src_cloud_load_array:
|
||
if -2.0 < point_3d[0] < 2.0 and -2.0 < point_3d[1] < 2.0 and point_3d[2] < global_z_max:
|
||
filte_src_cloud_load.append((point_3d[0], point_3d[1], point_3d[2]))
|
||
filte_sreflectivitys.append([reflectivitys[index]])
|
||
index += 1
|
||
|
||
# 点云投影
|
||
## 点云投影后的2D图
|
||
im_project = np.zeros((global_h, global_w, 3), dtype=np.uint8)
|
||
## 点云2D和3D对应的图
|
||
im_2d_to_3d = np.zeros((global_h, global_w, 3), dtype=np.float64)
|
||
rvec = np.array([np.array([0.]), np.array([0.]), np.array([0.])])
|
||
tvec = np.float64([0.0, 0.0, 0.0])
|
||
pts_2d, _ = cv2.projectPoints(np.asarray(filte_src_cloud_load), rvec, tvec, camera_matrix, dist_coeffs)
|
||
|
||
list_pts_2d = pts_2d.tolist()
|
||
list_pts_2d = [[[int(item) for item in subsublist] for subsublist in sublist] for sublist in list_pts_2d]
|
||
list_pts_2d_all_mask = []
|
||
for i, pt_2d in enumerate(list_pts_2d):
|
||
if 0 <= pt_2d[0][0] < global_w and 0 <= pt_2d[0][1] < global_h:
|
||
# 只挑选出反射率小于30的点
|
||
if filte_sreflectivitys[i][0] < 20:
|
||
# 添加2D图像的像素值
|
||
im_project[pt_2d[0][1], pt_2d[0][0]] = (255,255,255)
|
||
# 添加2D-3D映射图像的数值
|
||
im_2d_to_3d[pt_2d[0][1], pt_2d[0][0]] = filte_src_cloud_load[i]
|
||
# 添加2D坐标数值,这里要保持和原始数据一致的信息,所以顺序是 pt_2d[0][0], pt_2d[0][1]
|
||
list_pts_2d_all_mask.append([pt_2d[0][0], pt_2d[0][1]])
|
||
|
||
# 计算图像的最小值和最大值
|
||
min_val = np.min(im_project)
|
||
max_val = np.max(im_project)
|
||
print(min_val)
|
||
print(max_val)
|
||
|
||
# 进行膨胀操作
|
||
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (17, 17))
|
||
|
||
# 计算联通区域
|
||
im_project_delate = cv2.morphologyEx(im_project, cv2.MORPH_DILATE, kernel)
|
||
im_project_delate_color = im_project_delate.copy()
|
||
im_project_delate = cv2.cvtColor(im_project_delate, cv2.COLOR_RGB2GRAY)
|
||
_, im_project_delate_binary = cv2.threshold(im_project_delate, 1, 255, cv2.THRESH_BINARY)
|
||
|
||
# 查找轮廓
|
||
# 计算连通域
|
||
num_labels, labels, stats, centroids = \
|
||
cv2.connectedComponentsWithStats(im_project_delate_binary, connectivity=8)
|
||
|
||
# 忽略背景组件(标签0)
|
||
sizes = stats[1:, -1] # 获取所有连通组件的面积(忽略背景)
|
||
rectangles = stats[1:, :4] # 获取所有连通组件的边界框(忽略背景)
|
||
|
||
# 根据面积大小排序索引,并选择面积最大的前4个
|
||
sorted_indexes_by_size = sorted(range(sizes.shape[0]), key=lambda k: sizes[k], reverse=True)[:4]
|
||
|
||
print("面积最大的4个连通组件索引:")
|
||
print(sorted_indexes_by_size)
|
||
|
||
# 创建一个新的与labels大小相同的数组,用于保存面积最大的4个连通组件
|
||
filtered_labels = np.zeros_like(labels)
|
||
|
||
for i in sorted_indexes_by_size:
|
||
filtered_labels[labels == i + 1] = i + 1 # 注意这里要加1,因为忽略了背景组件
|
||
|
||
# 给每个连通区域分配一种颜色
|
||
colors = np.random.randint(0, 255, size=(len(sorted_indexes_by_size), 3), dtype=np.uint8)
|
||
|
||
mask4_conners = []
|
||
for i, idx in enumerate(sorted_indexes_by_size):
|
||
colored_filtered_labels = np.zeros((filtered_labels.shape[0], filtered_labels.shape[1], 1), dtype=np.uint8)
|
||
color = colors[i]
|
||
mask = (filtered_labels == idx + 1)
|
||
colored_filtered_labels[mask] = 255
|
||
# 使用该区域与原图相与
|
||
im_project_gray = cv2.cvtColor(im_project, cv2.COLOR_BGR2GRAY)
|
||
colored_filtered_labels = cv2.bitwise_and(colored_filtered_labels, im_project_gray)
|
||
non_zero = cv2.findNonZero(colored_filtered_labels)
|
||
|
||
# conners = utils.select_and_sort_conners_2d(non_zero)
|
||
|
||
rect = cv2.minAreaRect(non_zero)
|
||
box = cv2.boxPoints(rect)
|
||
box = np.int0(box)
|
||
conners = utils.sort_conners_2d(list(box))
|
||
# 4个角点拍寻 left top right bottom
|
||
mask4_conners.append(conners)
|
||
|
||
# mask 板子排序 左上 右上 左下 右下
|
||
mask4_conners = sort_marks_aruco_3d(mask4_conners)
|
||
## for test mask 板子排序 左上 右上 左下 右下
|
||
for i in range(4):
|
||
conners = mask4_conners[i]
|
||
cv2.drawContours(im_project_delate_color, [np.asarray(conners)], 0, (0, 255, 0), 20)
|
||
im_tmp = cv2.resize(im_project_delate_color, (1000, 750))
|
||
cv2.imshow("1", im_tmp)
|
||
cv2.waitKey(0)
|
||
|
||
Log.error("3D-2D")
|
||
print(np.linalg.norm(mask4_conners[0][0] - mask4_conners[0][1]))
|
||
print(np.linalg.norm(mask4_conners[0][1] - mask4_conners[0][2]))
|
||
print(np.linalg.norm(mask4_conners[0][2] - mask4_conners[0][3]))
|
||
print(np.linalg.norm(mask4_conners[0][3] - mask4_conners[0][0]))
|
||
|
||
obj_points = np.array([[-marker_size/2, -marker_size/2, 0], # 左上
|
||
[ marker_size/2, -marker_size/2, 0], # 右上
|
||
[ marker_size/2, marker_size/2, 0], # 右下
|
||
[-marker_size/2, marker_size/2, 0]], dtype=np.float32) # 左下
|
||
|
||
# obj_points = np.array([[-marker_size/2, marker_size/2, 0], # 左下角
|
||
# [ marker_size/2, marker_size/2, 0], # 右下角
|
||
# [ marker_size/2, -marker_size/2, 0], # 右上角
|
||
# [-marker_size/2, -marker_size/2, 0]], dtype=np.float32) # 左上角
|
||
|
||
# 遍历所有检测到的标记
|
||
rotation_matrices = []
|
||
Log.info("2D->3D 通过相邻角点计算得到的边长进行对比")
|
||
for i, corner in enumerate(mask4_conners):
|
||
conners_pcd = o3d.geometry.PointCloud()
|
||
# for j in range(4):
|
||
# conners_pcd.points.append(obj_points[j])
|
||
# image_points = np.array([
|
||
# [corner[0][0], corner[0][1]],
|
||
# [corner[1][0], corner[1][1]],
|
||
# [corner[2][0], corner[2][1]],
|
||
# [corner[3][0], corner[3][1]]
|
||
# ], dtype=np.float32)
|
||
|
||
# 0123 -> 2301
|
||
# 与objects3D坐标对齐
|
||
image_points = np.array([
|
||
[corner[2][0], corner[2][1]],
|
||
[corner[3][0], corner[3][1]],
|
||
[corner[0][0], corner[0][1]],
|
||
[corner[1][0], corner[1][1]],
|
||
], dtype=np.float32)
|
||
# 估计每个标记的姿态
|
||
# _, rvec, tvec = cv2.solvePnP(obj_points, corner, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_DLS)
|
||
_, rvec, tvec = cv2.solvePnP(obj_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_EPNP)
|
||
|
||
frame = im_project
|
||
# 绘制每个标记的轴线
|
||
# cv2.drawFrameAxes(frame, camera_matrix, dist_coeffs, rvec, tvec, 0.1)
|
||
|
||
# 将旋转向量转换为旋转矩阵(如果需要)
|
||
rotation_matrix, _ = cv2.Rodrigues(rvec)
|
||
rotation_matrices.append(rotation_matrix)
|
||
|
||
# 使用cv2.invert()计算矩阵的逆
|
||
retval, rotation_matrix_inv = cv2.invert(rotation_matrix)
|
||
# if retval != 0: print("矩阵的逆为:\n", rotation_matrix_inv)
|
||
# else: print("无法计算矩阵的逆")
|
||
|
||
H = np.eye(4)
|
||
H[:3, :3] = rotation_matrix
|
||
H[:3, 3] = tvec.flatten()
|
||
for j in range(4):
|
||
P_world = np.array([obj_points[j][0], obj_points[j][1], obj_points[j][2], 1])
|
||
P_camera_homogeneous = H @ P_world # 齐次坐标相乘
|
||
conners_pcd.points.append(P_camera_homogeneous[:-1])
|
||
|
||
# conners_pcd.rotate(rotation_matrix, (0,0,0))
|
||
# conners_pcd.translate(tvec)
|
||
|
||
# 在图像上绘制角点编号(可选)
|
||
for j, point in enumerate(image_points):
|
||
text = str(conners_pcd.points[j])
|
||
if j == 3: cv2.putText(frame, text, tuple(point.astype(int)), cv2.FONT_HERSHEY_SIMPLEX, 2.5, (0, 255, 255), 10, cv2.LINE_AA)
|
||
elif j == 2: cv2.putText(frame, text, tuple(point.astype(int)), cv2.FONT_HERSHEY_SIMPLEX, 2.5, (0, 255, 0), 10, cv2.LINE_AA)
|
||
elif j == 1: cv2.putText(frame, text, tuple(point.astype(int)), cv2.FONT_HERSHEY_SIMPLEX, 2.5, (255, 255, 255), 10, cv2.LINE_AA)
|
||
else: cv2.putText(frame, text, tuple(point.astype(int)), cv2.FONT_HERSHEY_SIMPLEX, 2.5, (0, 0, 255), 10, cv2.LINE_AA)
|
||
|
||
# 将点云的点转换为 NumPy 数组
|
||
points = np.asarray(conners_pcd.points)
|
||
reversed_points = points[::-1]
|
||
conners_pcd.points = o3d.utility.Vector3dVector(reversed_points)
|
||
|
||
left = conners_pcd.points[3]
|
||
top = conners_pcd.points[0]
|
||
right = conners_pcd.points[1]
|
||
bottom = conners_pcd.points[2]
|
||
conners = [left, top, right, bottom]
|
||
conners = utils.sort_conners_2d(conners)
|
||
|
||
print(
|
||
np.linalg.norm(top - left),
|
||
np.linalg.norm(right - top),
|
||
np.linalg.norm(bottom - right),
|
||
np.linalg.norm(left - bottom),
|
||
)
|
||
# marks_3d_conners.append([left, top, right, bottom])
|
||
marks_3d_conners.append(conners)
|
||
|
||
marks_3d_conners = sort_marks_aruco_3d(marks_3d_conners)
|
||
# 按照左、上、右、底顺序返回
|
||
return frame, marks_3d_conners
|
||
|
||
import struct
|
||
def get_low_8bits(float_num):
|
||
# 把浮点数转换成一个4字节的二进制表示
|
||
bytes_rep = struct.pack('>d', float_num)
|
||
# 解析这个二进制表示为一个整数
|
||
int_rep = int.from_bytes(bytes_rep, 'big')
|
||
# 使用位运算取出低8位
|
||
low_8bits = int_rep & 0xFF
|
||
return low_8bits
|
||
|
||
def check_ply_file(filename):
|
||
with open(filename, 'r') as file:
|
||
lines = file.readlines()
|
||
|
||
# 打印文件头部分
|
||
print("File header:")
|
||
for line in lines[:lines.index('end_header\n') + 1]:
|
||
print(line.strip())
|
||
|
||
# 打印前几个数据行
|
||
print("\nSample data rows:")
|
||
data_start_index = lines.index('end_header\n') + 1
|
||
for i, line in enumerate(lines[data_start_index:data_start_index + 5], start=data_start_index):
|
||
print(f"Line {i}: {line.strip()}")
|
||
|
||
if __name__ == '__main__':
|
||
ile_path = os.path.dirname(__file__) + "\\samples\\output.ply"
|
||
cal_lidar_marks_3d_conner_v2(ile_path)
|
||
|
||
|
||
# o3d.visualization.draw_geometries([src_cloud_load], mesh_show_wireframe=True)
|
||
|
||
#################################
|
||
# FILE_PATH = "./samples/calibration/1/output.png"
|
||
# frame = cv2.imread(FILE_PATH)
|
||
# if frame is None:
|
||
# exit()
|
||
# frame, marks = cal_camera_marks_3d_conners_v2(frame)
|
||
|
||
# # frame, marks = cal_camera_marks_3d_conners(FILE_PATH)
|
||
# Log.info("2D->3D marks conners value")
|
||
# print(marks)
|
||
# frame = cv2.resize(frame, (1000, 750))
|
||
# cv2.imshow('frame', frame)
|
||
# cv2.waitKey(0)
|
||
|
||
# cv2.destroyAllWindows() |