384 lines
14 KiB
Python
384 lines
14 KiB
Python
# -- coding: utf-8 --
|
||
from ctypes import *
|
||
import cv2 # 4.9.0
|
||
print(cv2.__version__) # 4.9.0
|
||
import numpy as np
|
||
print(np.__version__) # 1.24.4
|
||
# import pyc # https://github.com/PointCloudLibrary/pcl/releases/tag/pcl-1.8.1
|
||
import open3d as o3d
|
||
print(o3d.__version__) # 0.18.0
|
||
import convert
|
||
import copy
|
||
import math
|
||
import flow_homo
|
||
import time
|
||
|
||
WIDTH = 9344
|
||
HEIGH = 7000
|
||
step = 40
|
||
|
||
# 相机内参矩阵,这里假设为简单的相机参数
|
||
w = WIDTH
|
||
h = HEIGH
|
||
|
||
fx = 11241.983
|
||
fy = 11244.0599
|
||
cx = 4553.03821
|
||
cy = 3516.9118
|
||
|
||
k1 = -0.04052072
|
||
k2 = 0.22211572
|
||
p1 = 0.00042405
|
||
p2 = -0.00367346
|
||
k3 = -0.15639485
|
||
|
||
###################################################1:不带畸变,不带激光雷达补偿标定
|
||
# R_mat_avg = np.asarray([
|
||
# [0.999835, -0.0162967, 0.00799612],
|
||
# [0.0164626, 0.999641, -0.0211427],
|
||
# [-0.00764869, 0.0212709, 0.999744]])
|
||
|
||
# T_vector = np.array([0.115324, 0.0797254, 0.00324282])
|
||
|
||
# R_T_mat_avg = np.asarray([
|
||
# [0.999835, -0.0162967, 0.00799612, 0.115324],
|
||
# [0.0164626, 0.999641, -0.0211427, 0.0797254],
|
||
# [-0.00764869, 0.0212709, 0.999744, 0.00324282],
|
||
# [0.0, 0.0, 0.0, 1.0]])
|
||
|
||
# R_mat_fin = np.asarray([
|
||
# [0.0079925, -0.999835, 0.0163003],
|
||
# [-0.0211428, -0.0164661, -0.999641],
|
||
# [0.999745, 0.007645, -0.0212709]])
|
||
|
||
# y,p,r = 1.93222, -1.5934, -0.345034
|
||
|
||
###################################################2:带畸变,带激光雷达补偿标定
|
||
R_mat_avg = np.asarray([
|
||
[0.999812, -0.0178348, 0.00765956],
|
||
[0.0179972, 0.999603, -0.021687],
|
||
[-0.00726974, 0.0218207, 0.999735]])
|
||
|
||
T_vector = np.array([0.109253, 0.0710213, 0.0175978])
|
||
|
||
R_T_mat_avg = np.asarray([
|
||
[0.999812, -0.0178348, 0.00765956, 0.109253],
|
||
[0.0179972, 0.999603, -0.021687, 0.0710213],
|
||
[-0.00726974, 0.0218207, 0.999735, 0.0175978],
|
||
[0.0, 0.0, 0.0, 1.0]])
|
||
|
||
R_mat_fin = np.asarray([
|
||
[0.00765987, -0.999812, 0.0178345],
|
||
[-0.021687, -0.0179969, -0.999603],
|
||
[0.999735, 0.00727006, -0.0218207]])
|
||
|
||
y,p,r = 1.91032, -1.5938, -0.321605
|
||
|
||
# y,p,r = 1.57086, -1.57086, -0.0
|
||
|
||
###################################################3:不带畸变,不带激光雷达补偿标定
|
||
# R_mat_avg = np.asarray([
|
||
# [0.999849, -0.048776, 0.00900869],
|
||
# [0.014993, 0.999805, -0.0128836],
|
||
# [-0.00881525, 0.0130167, 0.999876]])
|
||
|
||
# T_vector = np.array([0.120222, 0.0442982, 0.00136219])
|
||
|
||
# R_T_mat_avg = np.asarray([
|
||
# [0.999849, -0.048776, 0.00900869, 0.120222],
|
||
# [0.014993, 0.999805, -0.0128836, 0.0442982],
|
||
# [-0.00881525, 0.0130167, 0.999876, 0.00136219],
|
||
# [0.0, 0.0, 0.0, 1.0]])
|
||
|
||
# R_mat_fin = np.asarray([
|
||
# [0.009009, -0.999849, 0.0148772],
|
||
# [0.0128836, -0.0149927, -0.999805],
|
||
# [0.999876, 0.00881577, -0.0130167]])
|
||
|
||
# y,p,r = 2.18103, -1.58652, -0.595295
|
||
|
||
|
||
# x,y,z
|
||
def _eulerAnglesToRotationMatrix(theta) :
|
||
R_x = np.array([[1, 0, 0 ],
|
||
[0, math.cos(theta[0]), -math.sin(theta[0]) ],
|
||
[0, math.sin(theta[0]), math.cos(theta[0]) ]
|
||
])
|
||
R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1]) ],
|
||
[0, 1, 0 ],
|
||
[-math.sin(theta[1]), 0, math.cos(theta[1]) ]
|
||
])
|
||
R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],
|
||
[math.sin(theta[2]), math.cos(theta[2]), 0],
|
||
[0, 0, 1]
|
||
])
|
||
R = np.dot(R_z, np.dot( R_y, R_x ))
|
||
return R
|
||
|
||
def cal_project_matrix():
|
||
# 假设camera_matrix和dist_coeffs是已知的摄像机内参和畸变参数
|
||
camera_matrix = np.array([[fx, 0, cx],
|
||
[0, fy, cy],
|
||
[0, 0, 1]])
|
||
|
||
camera_matrix = np.array([[11550.4192, 0.00000000, 4672.28001],
|
||
[0.00000000, 11546.8773, 3.49929103],
|
||
[0.00000000e+00, 0.00000000, 1.00000000]])
|
||
|
||
|
||
dist_coeffs = np.array([k1, k2, p1, p2, k3]) # 这里k1, k2, p1, p2, k3是畸变系数
|
||
dist_coeffs = np.array([-0.0198130402, 0.0446498902, 0.000332909792, -0.000424586371, 0.371045970])
|
||
# src_size是原始图像的大小
|
||
src_size = (w, h)
|
||
|
||
# 计算新的摄像机矩阵
|
||
new_camera_matrix = cv2.getOptimalNewCameraMatrix(camera_matrix, dist_coeffs, src_size, alpha=0)
|
||
print(new_camera_matrix)
|
||
# print(new_camera_matrix)
|
||
return new_camera_matrix
|
||
|
||
|
||
def cal_rectification_matrix():
|
||
pass
|
||
|
||
|
||
# 定义获取点云深度最大最小值
|
||
def get_pcd_depth_range(pts_3d):
|
||
# 初始化最小值和最大值为第一个点的x坐标
|
||
min_x = 0
|
||
max_x = 0
|
||
|
||
# 遍历所有点,更新最小值和最大值
|
||
for point_3d in pts_3d:
|
||
x = point_3d[0]
|
||
if x < min_x:
|
||
min_x = x
|
||
elif x > max_x:
|
||
max_x = x
|
||
|
||
return max_x, min_x
|
||
|
||
|
||
# 定义函数根据深度获取颜色
|
||
def get_color(cur_depth, max_depth, min_depth):
|
||
scale = (max_depth - min_depth) / 10
|
||
if cur_depth < min_depth:
|
||
return (0, 0, 255) # 返回蓝色
|
||
elif cur_depth < min_depth + scale:
|
||
green = int((cur_depth - min_depth) / scale * 255)
|
||
return (0, green, 255) # 返回蓝到黄的渐变色
|
||
elif cur_depth < min_depth + scale * 2:
|
||
red = int((cur_depth - min_depth - scale) / scale * 255)
|
||
return (0, 255, 255 - red) # 返回黄到红的渐变色
|
||
elif cur_depth < min_depth + scale * 4:
|
||
blue = int((cur_depth - min_depth - scale * 2) / scale * 255)
|
||
return (blue, 255, 0) # 返回红到绿的渐变色
|
||
elif cur_depth < min_depth + scale * 7:
|
||
green = int((cur_depth - min_depth - scale * 4) / scale * 255)
|
||
return (255, 255 - green, 0) # 返回绿到黄的渐变色
|
||
elif cur_depth < min_depth + scale * 10:
|
||
blue = int((cur_depth - min_depth - scale * 7) / scale * 255)
|
||
return (255, 0, blue) # 返回黄到蓝的渐变色
|
||
else:
|
||
return (255, 0, 255) # 返回紫色
|
||
|
||
|
||
def undistort(distorted_img):
|
||
camera_matrix = np.array([[fx, 0, cx],
|
||
[0, fy, cy],
|
||
[0, 0, 1]])
|
||
dist_coeffs = np.array([k1, k2, p1, p2, k3]) # 畸变系数
|
||
|
||
# 矫正畸变
|
||
undistorted_img = cv2.undistort(distorted_img, camera_matrix, dist_coeffs, None, camera_matrix)
|
||
# cv2.imwrite("distorted_img.png", undistorted_img)
|
||
# undistorted_img = cv2.imread("distorted_img.png")
|
||
# cv2.imshow("1", undistorted_img)
|
||
# cv2.waitKey(0)
|
||
return undistorted_img
|
||
|
||
|
||
def _origin_project(pcd):
|
||
# 删除人才
|
||
pts_3d = []
|
||
for point_3d in np.asarray(pcd.points):
|
||
#筛选x,y,z坐标
|
||
if -2.5 < point_3d[0] < 2.5 and -2.5 < point_3d[1] < 2.5 and point_3d[2] < 3.7:
|
||
pts_3d.append((point_3d[0], point_3d[1], point_3d[2]))
|
||
# pts_3d = np.asarray(pcd.points).tolist()
|
||
|
||
# 3D投影2D
|
||
R_mat = pcd.get_rotation_matrix_from_xyz((0.0, 0.0, 0.0))
|
||
rvec, _ = cv2.Rodrigues(R_mat)
|
||
tvec = np.float64([0.0, 0.0, 0.0])
|
||
camera_matrix = np.float64([[fx, 0, cx],
|
||
[0, fy, cy],
|
||
[0, 0, 1]])
|
||
distCoeffs = np.float64([k1, k2, p1, p2, k3])
|
||
pts_2d, _ = cv2.projectPoints(np.array(pts_3d), rvec, tvec, camera_matrix, distCoeffs)
|
||
|
||
# 2D图像绘制
|
||
max_depth, min_depth = get_pcd_depth_range(pts_3d)
|
||
image_project = np.zeros((h,w,3), dtype=np.uint8)
|
||
for i, point_2d in enumerate(pts_2d):
|
||
x, y = point_2d.ravel()
|
||
x, y = int(x), int(y)
|
||
if 0 <= x < image_project.shape[1] and 0 <= y < image_project.shape[0]:
|
||
cur_depth = pts_3d[i][2]
|
||
color = get_color(cur_depth, max_depth, min_depth) # 根据深度获取颜色
|
||
image_project[y, x] = color
|
||
|
||
image_project = cv2.resize(image_project, (int(934), int(700)), interpolation=cv2.INTER_LINEAR)
|
||
# cv2.imshow("project image", image_project)
|
||
# cv2.waitKey(0)
|
||
|
||
image_project = None
|
||
def _lidar2camera_project(pcd):
|
||
global image_project
|
||
# Extract 3D points within a certain range
|
||
kk = 0.02
|
||
pts_3d_tmp = []
|
||
for point_3d in np.asarray(pcd.points):
|
||
# r = math.sqrt(pow(point_3d[0], 2) + pow(point_3d[1], 2) + pow(point_3d[2], 2))
|
||
# point_3d[0] = point_3d[0] + point_3d[0] / r * kk
|
||
# point_3d[1] = point_3d[1] + point_3d[1] / r * kk
|
||
# point_3d[2] = point_3d[2] + point_3d[2] / r * kk
|
||
point_3d[2] -= 0.02
|
||
pts_3d_tmp.append((point_3d[0], point_3d[1], point_3d[2]))
|
||
|
||
pts_3d = []
|
||
# pts_3d = np.asarray(pcd.points).tolist()
|
||
for point_3d in np.asarray(pts_3d_tmp):
|
||
if -2.5 < point_3d[0] < 2.5 and -2.5 < point_3d[1] < 2.5 and point_3d[2] < 2.8 : #筛选x,y,z坐标
|
||
pts_3d.append((point_3d[0], point_3d[1], point_3d[2]))
|
||
|
||
pcd1 = o3d.geometry.PointCloud()
|
||
for pts in pts_3d:
|
||
pcd1.points.append(pts)
|
||
o3d.visualization.draw_geometries([pcd1], mesh_show_wireframe=True)
|
||
|
||
# R_mat = cloud_load.get_rotation_matrix_from_xyz((0.0, 0.0, 0.0))
|
||
# rvec, _ = cv2.Rodrigues(R_mat)
|
||
rvec = np.array([np.array([0.]), np.array([0.]), np.array([0.])])
|
||
tvec = np.float64([0.0, 0.0, 0.0])
|
||
camera_matrix = np.float64([[fx, 0, cx],
|
||
[0, fy, cy],
|
||
[0, 0, 1]])
|
||
# distCoeffs = np.float64([k1, k2, p1, p2, k3])
|
||
distCoeffs = np.float64([0.0, 0.0, 0.0, 0.0, 0.0])
|
||
pts_2d, _ = cv2.projectPoints(np.array(pts_3d), rvec, tvec, camera_matrix, distCoeffs)
|
||
|
||
image_project_zero = np.zeros((h,w,3), dtype=np.uint8)
|
||
image_project_red = np.zeros((h,w,3), dtype=np.uint8)
|
||
image_project_red[:] = (0,0,255)
|
||
# image_project = cv2.imread("./caowei/output.png")
|
||
image_project = undistort(image_project)
|
||
image_project_2d_to_3d = np.zeros((h,w,3), dtype=np.float64)
|
||
# image_project[:] = (255,255,255)
|
||
|
||
max_depth, min_depth = get_pcd_depth_range(pts_3d)
|
||
# sub_depth = max_depth - min_depth
|
||
for i, point_2d in enumerate(pts_2d):
|
||
x, y = point_2d.ravel()
|
||
x, y = int(x), int(y)
|
||
if 0 <= x < image_project.shape[1] and 0 <= y < image_project.shape[0]:
|
||
cur_depth = pts_3d[i][2] # 获取当前点的深度
|
||
color = get_color(cur_depth, max_depth, min_depth) # 根据深度获取颜色
|
||
# gray = int(cur_depth/sub_depth)
|
||
color = (255,255,255)
|
||
image_project_zero[y, x] = color # 设置点云的颜色
|
||
image_project_2d_to_3d[y, x][0] = pts_3d[i][0]
|
||
image_project_2d_to_3d[y, x][1] = pts_3d[i][1]
|
||
image_project_2d_to_3d[y, x][2] = pts_3d[i][2]
|
||
|
||
# for test
|
||
rect_roi = flow_homo.rect_roi(0,0,0,0)
|
||
flow_homo.api_cal_info(image_project, image_project_2d_to_3d, [rect_roi])
|
||
|
||
image_project_zero = cv2.cvtColor(image_project_zero, cv2.COLOR_RGB2GRAY)
|
||
_, mask = cv2.threshold(image_project_zero, 1,255, cv2.THRESH_BINARY)
|
||
kernel = np.ones((11,11), np.uint8)
|
||
mask = cv2.dilate(mask, kernel, iterations=1)
|
||
# cv2.imshow("project image", mask)
|
||
image_project = cv2.bitwise_and(image_project_red, image_project, image_project, mask)
|
||
|
||
image_project = cv2.resize(image_project, (int(934*1.5), int(700*1.5)), interpolation=cv2.INTER_LINEAR)
|
||
# cv2.imwrite("./project.jpg", image_project)
|
||
# cv2.imshow("project image", image_project)
|
||
# cv2.waitKey(0)
|
||
|
||
|
||
if __name__ == '__main__':
|
||
cal_project_matrix()
|
||
|
||
|
||
|
||
image_project = cv2.imread("./caowei/output.png")
|
||
if image_project is None:
|
||
print("image_project is None!")
|
||
exit()
|
||
# cal_project_matrix()
|
||
|
||
# 读取PLY文件
|
||
ply_file_path = './output.ply'
|
||
cloud_load = o3d.io.read_point_cloud(ply_file_path)
|
||
|
||
|
||
|
||
start_time = time.time()
|
||
pts_3d = np.asarray(cloud_load.points).tolist()
|
||
# 打印坐标点
|
||
# xyz_load = np.asarray(cloud_load.points)
|
||
# print(len(xyz_load))
|
||
# print(xyz_load)
|
||
|
||
# 指定外参:旋转矩阵和平移向量 -0.271002 1.81304 -1.75592
|
||
Rz = cloud_load.get_rotation_matrix_from_xyz((0, 0, y))
|
||
Ry = cloud_load.get_rotation_matrix_from_xyz((0, p, 0))
|
||
Rx = cloud_load.get_rotation_matrix_from_xyz((r, 0, 0))
|
||
RR = _eulerAnglesToRotationMatrix([1.5707963267948966,-1.5707963267948966,0.0])
|
||
print(np.pi/2)
|
||
print(RR)
|
||
|
||
# ypr 不等于 xyz 旋转角
|
||
euler = convert.rot_to_ypr_xyz(R_mat_avg)
|
||
print(euler)
|
||
|
||
t_matrix = np.identity(4)
|
||
t_matrix[:3, 3] = T_vector
|
||
# cloud_load.rotate(R_mat_avg)
|
||
# cloud_load.transform(t_matrix)
|
||
cloud_load_2 = copy.deepcopy(cloud_load).rotate(Rx, (1,0,0))
|
||
cloud_load_2.rotate(Ry, (0,1,0))
|
||
cloud_load_2.rotate(Rz, (0,0,1))
|
||
cloud_load_2.translate(T_vector)
|
||
# print(np.asarray(cloud_load.points))
|
||
# print(np.asarray(cloud_load_2.points))
|
||
|
||
# 要用
|
||
_lidar2camera_project(cloud_load_2)
|
||
|
||
# Rx = cloud_load.get_rotation_matrix_from_xyz((1.5708, 0, 0))
|
||
# Ry = cloud_load.get_rotation_matrix_from_xyz((0, -1.5708, 0))
|
||
# Rz = cloud_load.get_rotation_matrix_from_xyz((0, 0, 1.5708))
|
||
# Rxyz = cloud_load.get_rotation_matrix_from_xyz((1.5708, -1.5708, 0.0))
|
||
# cloud_load_1 = copy.deepcopy(cloud_load).rotate(Rxyz)
|
||
# cloud_load_2 = copy.deepcopy(cloud_load).rotate(Rx,(1,0,0))
|
||
# cloud_load_2.rotate(Ry,(0,1,0))
|
||
# # cloud_load_2 = copy.deepcopy(cloud_load).rotate(Ry,(0,1,0))
|
||
# # cloud_load_2.rotate(Rz,(0,0,1))
|
||
# _origin_project(cloud_load_2)
|
||
end_time = time.time()
|
||
execution_time = end_time - start_time
|
||
print(execution_time * 1000)
|
||
|
||
# 可以选择将读取的网格可视化
|
||
vis = o3d.visualization.Visualizer()
|
||
# 创建三个轴的几何体
|
||
axes = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0)
|
||
# 添加轴到可视化窗口
|
||
vis.add_geometry(axes)
|
||
|
||
o3d.visualization.draw_geometries([cloud_load,cloud_load_2], mesh_show_wireframe=True)
|
||
|
||
exit() |