lidar_camera_cablition/api_alg.py
2024-12-16 12:30:52 +08:00

210 lines
7.4 KiB
Python
Raw Permalink Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

#Import modules
import cv2
import numpy as np
import math
import time
import sys
import json
# 1、src 放标准图还是待检测图
# 2、问题一两张图的尺寸要一致
# 3、问题二相机的z轴 是手臂的y轴
# 4、所有参数应该是 第二个参数到第一个参数的差值::标准图必须做目标图像
# 5、if m.distance < 0.72*n.distance: 这个阈值是个关键参数,越小定位约精准
# 6、assert(isRotationMatrix(R)) 报错是因为截图或者拉伸的原因,使得角度旋转无解
# 7、截图比例会影响旋转矩阵甚至会使得旋转矩阵无解。一定要4:3
# 8、原图中目标图像的像素数量与目标图像的像素数量【缩放比】越接近M的值越准确
# 9、缩放比需要打印
# 10、会改变M的参数变量包括
# img1 = cv2.resize
# interpolation=cv2.INTER_AREA
# cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 1.0) 1.0
# if m.distance < 0.85 * n.distance: 0.85
# 11、编写自动化选择参数脚本自动找到最优解
# 12、可优化参数
# cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
# cv2.resize
########################################################################
#Import pictures
homo = "./image/2_origin_centor.jpg"
homo_train = "./image/10_bigorigin_rz_x.jpg"
homo = "./scp_file//o.jpg"
homo_train = "./scp_file/6_-0.59803_0.264184_0.588617_-1.556_0.343_1.072.jpg"
def cal_homo(homo_train, homo):
start = time.time()
# interpolation=cv2.INTER_AREA 必须加
img1 = cv2.imread(homo_train, cv2.IMREAD_GRAYSCALE)
img1 = cv2.resize(img1, (int(img1.shape[1] / 4), int(img1.shape[0] / 4)), interpolation=cv2.INTER_AREA) # 必须加
img2 = cv2.imread(homo, cv2.IMREAD_GRAYSCALE)
img2 = cv2.resize(img2, (img1.shape[1], img1.shape[0]), interpolation=cv2.INTER_AREA) # 必须加,两张照片必须同样尺寸
#Feature Extraction
MIN_MATCH_COUNT = 20 ##修改
sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m,n in matches:
if m.distance < 0.85 * n.distance:
good.append(m)
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
#Finds homography
# M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 1.0) ## 修改 1.0 -> 5.0
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 3, maxIters=2000, confidence=0.99) ## 修改 1.0 -> 5.0
print("api_alg 单应性矩阵 M:\n", M)
matchesMask = mask.ravel().tolist()
h,w = img1.shape
pts = np.float32([ [0,0], [0,h-1], [w-1,h-1], [w-1,0] ]).reshape(-1, 1, 2)
dst = cv2.perspectiveTransform(pts, M)
img2 = cv2.polylines(img2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)
else:
# print ("api_alg : Not enough matches are found - %d/%d") % (len(good), MIN_MATCH_COUNT)
matchesMask = None
# for draw
# draw_params = dict(matchColor = (0,255,0), # draw matches in green color
# singlePointColor = None,
# matchesMask = matchesMask, # draw only inliers
# flags = 2)
# img3 = cv2.drawMatches(img1, kp1, img2, kp2, good, None, **draw_params)
# plt.imshow(img3, 'gray'),plt.show()
# camera pose from H whitout K
def cameraPoseFromHomography(H):
norm1 = np.linalg.norm(H[:, 0])
norm2 = np.linalg.norm(H[:, 1])
tnorm = (norm1 + norm2) / 2.0
H1 = H[:, 0] / norm1
H2 = H[:, 1] / norm2
H3 = np.cross(H1, H2)
T = H[:, 2] / tnorm
return np.array([H1, H2, H3, T]).transpose()
RT = cameraPoseFromHomography(M)
# print("api_alg RT whitout K", RT)
focal_length = img1.shape[1] # 使用摄像头的宽度(像素)代表焦距
center = (img1.shape[1] / 2, img1.shape[0] / 2) # 使用图像的中心代替光学中心
K = np.array([[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]])
# camera pose from H whit K
num, Rs, Ts, Ns = cv2.decomposeHomographyMat(M, K)
# print("api_alg Rs", Rs)
# print("api_alg Ts", Ts)
# Checks if a matrix is a valid rotation matrix.
def isRotationMatrix(R) :
Rt = np.transpose(R)
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype = R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
return n < 1e-6
# Calculates rotation matrix to euler angles
# The result is the same as MATLAB except the order
# of the euler angles ( x and z are swapped ).
def rotationMatrixToEulerAngles(R) :
# assert(isRotationMatrix(R))
if not isRotationMatrix(R):
# print("api_alg : assert(isRotationMatrix(R)) !!!!")
return np.array([0, 0, 0])
sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])
singular = sy < 1e-6
if not singular :
x = math.atan2(R[2,1] , R[2,2])
y = math.atan2(-R[2,0], sy)
z = math.atan2(R[1,0], R[0,0])
else :
x = math.atan2(-R[1,2], R[1,1])
y = math.atan2(-R[2,0], sy)
z = 0
return np.array([x, y, z])
# Conver the 4 rotation matrix solutions into XYZ Euler angles
# 获取最后一组值
i = 0
for i in range(0, len(Rs)):
# 获取最后一组值
angles = rotationMatrixToEulerAngles(Rs[i])
x = np.degrees(angles[0])
y = np.degrees(angles[1])
z = np.degrees(angles[2])
anglesDeg = np.array([x,y,z])
# print("api_alg 角度: ", anglesDeg)
R = Rs[num - 1]
T = Ts[num - 1]
# print("api_alg : Rs矩阵")
# for i in range(len(Rs)): print(Rs[i])
# print("api_alg : Ts矩阵\n", Ts)
rand = [x / 180 * math.pi, y / 180 * math.pi, z / 180 * math.pi]
print("api_alg 图像计算旋转矩阵 R: \n", R)
print("api_alg 图像计算角度偏移 a: ", anglesDeg)
print("api_alg 图像计算弧度偏移 r: ", rand)
print("api_alg 图像计算平移偏移 t: ", [T[0][0], T[1][0], T[2][0]])
end = time.time()
print("api_alg 程序的运行时间为:{}".format(end-start))
print(rand[0], rand[1], rand[2])
return rand[0], rand[1], rand[2], M[0][0], M[1][1], M[0][2], M[1][2], R, T
if __name__ == '__main__':
resp = {'code': 0, 'message': 'success'}
if len(sys.argv) != 3:
resp['code'] = -1
resp['message'] = '参数错误!'
else:
img1 = sys.argv[1]
img2 = sys.argv[2]
result = cal_homo(img1, img2)
resp['result'] = {
'drx': result[0],
'dry': result[1],
'drz': result[2],
'hs': result[3],
'ws': result[4],
'xf': result[5],
'yf': result[6],
'dx': result[8][0][0],
'dy': result[8][1][0],
'dz': result[8][2][0],
}
print(json.dumps(resp, indent = 0, ensure_ascii = False))