lidar_camera_cablition/flow_edge.py

354 lines
12 KiB
Python
Raw Normal View History

2024-12-16 12:30:52 +08:00
# -- coding: utf-8 --
import numpy as np
import cv2
import math
def _sobel(image):
'''
_sobel
'''
if image.ndim > 2:
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# todo 增加几个参数 http://blog.csdn.net/sunny2038/article/details/9170013
_sobelx = cv2.Sobel(image, cv2.CV_64F, 1, 0, ksize=1)
_sobely = cv2.Sobel(image, cv2.CV_64F, 0, 1, ksize=1)
_sobelx = np.uint8(np.absolute(_sobelx))
_sobely = np.uint8(np.absolute(_sobely))
_sobelcombine = cv2.bitwise_or(_sobelx,_sobely)
return _sobelcombine
# ##################################################################
# # 展平
# img_flat = _im_gray.reshape((_im_gray.shape[0] * _im_gray.shape[1], 1))
# img_flat = np.float32(img_flat)
# # 迭代参数
# criteria = (cv2.TERM_CRITERIA_EPS + cv2.TermCriteria_MAX_ITER, 20, 0.0)
# flags = cv2.KMEANS_RANDOM_CENTERS
# # 进行聚类
# compactness, labels, centers = cv2.kmeans(img_flat, 20, None, criteria, 10, flags)
# segmented_img = labels.reshape(_im_gray.shape)
# _im_gray = np.uint8(segmented_img)
# _im_gray = cv2.equalizeHist(_im_gray)
# cv2.imshow("_im2", im)
# cv2.waitKey(0)
# ##################################################################
def _findContours(image):
'''
_findContours
http://blog.csdn.net/mokeding/article/details/20153325
'''
contours, _ = cv2.findContours(image.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
return sorted(contours, key=cv2.contourArea, reverse=True)
def _drawContours(image, contours, cidx=-1):
'''
_drawContours
'''
image_contour = cv2.drawContours(image, contours, -1, (255, 255, 255), 1)
return image_contour
import time
class CropLayer(object):
def __init__(self, params, blobs):
self.xstart = 0
self.xend = 0
self.ystart = 0
self.yend = 0
# Our layer receives two inputs. We need to crop the first input blob
# to match a shape of the second one (keeping batch size and number of channels)
def getMemoryShapes(self, inputs):
inputShape, targetShape = inputs[0], inputs[1]
batchSize, numChannels = inputShape[0], inputShape[1]
height, width = targetShape[2], targetShape[3]
self.ystart = int((inputShape[2] - targetShape[2]) / 2)
self.xstart = int((inputShape[3] - targetShape[3]) / 2)
self.yend = self.ystart + height
self.xend = self.xstart + width
return [[batchSize, numChannels, height, width]]
def forward(self, inputs):
return [inputs[0][:,:,self.ystart:self.yend,self.xstart:self.xend]]
class Arguments:
def __init__(self):
self.description = ""
self.input = "./images/x.png"
self.prototxt = "./deploy.prototxt"
self.caffemodel = "./hed_pretrained_bsds.caffemodel"
self.width = 360
self.height = 115
self.savefile = "./1.jpg"
def dnn(im):
args = Arguments()
args.width = im.shape[1]
args.height = im.shape[0]
args.input = im
# Load the model.
net = cv2.dnn.readNetFromCaffe(args.prototxt, args.caffemodel)
# 设置网络运行参数
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU) # 或者GPU
# net.setPreferableTarget(cv2.dnn.DNN_TARGET_OPENCL)
cv2.dnn_registerLayer('Crop', CropLayer)
kWinName = 'Holistically-Nested_Edge_Detection'
mean_value = cv2.mean(im)
inp = cv2.dnn.blobFromImage(im, scalefactor=1.5, size=(args.width, args.height),
mean=mean_value,
swapRB=True, crop=True)
net.setInput(inp)
# edges = cv2.Canny(frame,args.width,args.height)
# edges = cv2.Canny(frame,frame.shape[1],frame.shape[0])
out = net.forward()
out = out[0, 0]
_im_gray = cv2.resize(out, (im.shape[1], im.shape[0]))
_im_gray = 255 * _im_gray
# print(out)
_im_gray = _im_gray.astype(np.uint8)
_im_gray = cv2.resize(_im_gray, (640, 480))
cv2.imshow('Input', _im_gray)
cv2.waitKey(0)
def Scharr(image):
'''
_sobel
'''
if image.ndim > 2:
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# todo 增加几个参数 http://blog.csdn.net/sunny2038/article/details/9170013
_scharrx = cv2.Scharr(image, cv2.CV_8U, dx=1, dy=0, scale=1, delta=0, borderType=cv2.BORDER_DEFAULT)
_scharry = cv2.Scharr(image, cv2.CV_8U, dx=0, dy=1, scale=1, delta=0, borderType=cv2.BORDER_DEFAULT)
_scharrx = np.uint8(np.absolute(_scharrx))
_scharry = np.uint8(np.absolute(_scharry))
_sobelcombine = cv2.bitwise_or(_scharrx,_scharry)
return _sobelcombine
def cal_conners_edges_sort(im, thresh_hold = 80, minLineLength = 400):
_im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
# for test
_im_gray = cv2.GaussianBlur(_im_gray, (5, 5), 0)
_im_edge_sobel = _sobel(_im_gray)
# _im_edge_sobel = Scharr(_im_gray)
_, _im_thresh = cv2.threshold(_im_edge_sobel, 5, 255, cv2.THRESH_BINARY)
cnts = _findContours(_im_thresh)
for contour in cnts:
if cv2.contourArea(contour) > 500:
# 计算轮廓的边界框
x, y, w, h = cv2.boundingRect(contour)
# 在原图上绘制矩形
cv2.rectangle(_im, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow("_im2", _im_thresh)
cv2.waitKey(0)
_lines = cv2.HoughLinesP(_im_thresh, 1, np.pi / 180, thresh_hold, minLineLength=minLineLength, maxLineGap=120)
# for test
for _line in _lines:
x1,y1,x2,y2 = _line[0]
cv2.line(_im, (x1,y1), (x2,y2), (0,255,0), 1)
# for test
cv2.imshow("_im", _im)
cv2.imshow("_im2", _im_edge_sobel)
cv2.waitKey(0)
# print(len(_lines))
_line4 = []
for _line in _lines:
x1,y1,x2,y2 = _line[0]
if x2 == x1: x2 += 0.001
theta = np.arctan((y2 - y1) / (x2 - x1)) * (180 / math.pi)
x2 = int(x2)
r = math.sqrt(pow((y2 - y1), 2) + pow((x2 - x1), 2))
# 计算直线的斜率(如果 1 ≠ 2)
if x2 == x1: x2 += 0.001 # 如果x2 == x1会出现分母为0
m = (y2 - y1) / (x2 - x1)
x2 = int(x2)
A, B, C = m, -1, y1 - m * x1
brepeat = False
for i in range(len(_line4)):
if abs(abs(theta) - abs(_line4[i][4])) < 30: # 20°
cx, cy = (_line4[i][2] + _line4[i][0]) / 2, (_line4[i][3] + _line4[i][1]) / 2
d = abs(A * cx + B * cy + C) / math.sqrt(pow(A, 2) + pow(B, 2))
r_max = max(_line4[i][5], r)
if d < r_max / 3:
brepeat = True
if (r > _line4[i][5]) :
_line4[i] = [x1,y1,x2,y2, theta, r]
if not brepeat :
_line4.append([x1,y1,x2,y2, theta, r])
# print(x1,y1,x2,y2, theta, r)
# # for test
for x1,y1,x2,y2, theta, r in _line4:
cv2.line(_im, (x1,y1), (x2,y2), (0,255,0), 10)
print(x1,y1,x2,y2, theta, r)
_im_samll = cv2.resize(_im, (500, 500))
# for test
cv2.imshow("_im", _im_samll)
cv2.waitKey(0)
# 4条边排序
# assert(len(_line4) == 4)
_line4_sort = []
Ax, Bx, Cx = 1, 0, 0
Ay, By, Cy = 0, 1, 0
min_dx, max_dx, min_dy, max_dy = 10000, 0, 10000, 0
min_dx_idx, max_dx_id, min_dy_id, max_dy_id = -1, -1, -1, -1
for i in range(len(_line4)):
cx, cy = (_line4[i][2] + _line4[i][0]) / 2, (_line4[i][3] + _line4[i][1]) / 2
dx = abs(Ay * cx + By * cy + Cy) / math.sqrt(pow(Ay, 2) + pow(By, 2))
dy = abs(Ax * cx + Bx * cy + Cx) / math.sqrt(pow(Ax, 2) + pow(Bx, 2))
if dx < min_dx:
min_dx = dx
min_dx_idx = i
if dx > max_dx:
max_dx = dx
max_dx_id = i
if dy < min_dy:
min_dy = dy
min_dy_id = i
if dy > max_dy:
max_dy = dy
max_dy_id = i
_line4_sort = _line4[min_dx_idx], _line4[max_dy_id], _line4[max_dx_id], _line4[min_dy_id]
print(_line4_sort)
# # for test
for x1,y1,x2,y2, theta, r in _line4_sort:
cv2.line(_im, (x1,y1), (x2,y2), (0,0,255), 10)
print(x1,y1,x2,y2, theta, r)
_im_samll = cv2.resize(_im, (500, 500))
# for test
cv2.imshow("_im", _im_samll)
cv2.waitKey(0)
# 找四个交点
_conners4_sort = [0,0,0,0]
for i in range(len(_line4_sort)):
x1, y1, x2, y2, _, _ = _line4_sort[i]
x1_next, y1_next, x2_next, y2_next, _, _ = _line4_sort[(i + 1) % 4]
# 检查是否为垂直线
# if x2 - x1 == 0:
# A, B, C = 1, 0, -x1 # 垂直线
# else:
if x2 == x1: x2 += 0.00001 # 如果x2 == x1会出现分母为0
m = (y2 - y1) / (x2 - x1)
x2 = int(x2)
A, B, C = m, -1, y1 - m * x1
# if x2_next - x1_next == 0:
# A_next, B_next, C_next = 1, 0, -x1_next # 垂直线
# else:
if x2_next == x1_next: x2_next += 0.001 # 如果x2 == x1会出现分母为0
m_next = (y2_next - y1_next) / (x2_next - x1_next)
x2_next = int(x2_next)
A_next, B_next, C_next = m_next, -1, y1_next - m_next * x1_next
# 检查是否平行
if A * B_next == A_next * B:
continue # 跳过平行的线段
# 计算交点
x_p = (B_next * C - B * C_next) / (A_next * B - A * B_next)
y_p = (A * x_p + C) / - B
# 确保交点在图像范围内
if 0 <= int(x_p) < _im.shape[1] and 0 <= int(y_p) < _im.shape[0]:
_conners4_sort[(i + 1) % 4] = [int(x_p), int(y_p)]
assert(len(_conners4_sort) == 4)
return _conners4_sort, _line4_sort
def cal_sub_conners(im, conners4_sort, line4_sort, sub_pixels=100):
assert(len(conners4_sort)==4)
for i in range(4):
cx, cy = conners4_sort[i]
x = int(cx - sub_pixels / 2)
if x < 0: x = 0
y = int(cy - sub_pixels / 2)
if y < 0: y = 0
w = sub_pixels
h = sub_pixels
roi = im[y:y+h, x:x+w].copy()
roi_gray = cv2.cvtColor(roi, cv2.COLOR_RGB2GRAY)
roi_gray = cv2.Canny(roi_gray, threshold1=100, threshold2=200)
_tmp_corners = cv2.goodFeaturesToTrack(roi_gray, maxCorners=100, qualityLevel=0.001, minDistance=2)
roi_cx = cx - x
roi_cy = cy - y
min = 10000
tar_conner = []
for conner in _tmp_corners:
conner = conner.tolist()[0]
d = math.sqrt(pow((conner[0] - roi_cx), 2) + pow((conner[1] - roi_cy), 2))
if d < min :
min = d
tar_conner = [conner[0], conner[1]]
cv2.circle(_im, (int(conner[0]+x), int(conner[1]+y)), 1, (255,0,0), -1)
cv2.circle(roi, (int(roi_cx), int(roi_cy)), 2, (0,0,255), -1)
cv2.circle(roi, (int(tar_conner[0]), int(tar_conner[1])), 2, (255,0,0), -1)
cv2.circle(_im, (int(roi_cx+x), int(roi_cy+y)), 2, (0,0,255), -1)
cv2.circle(_im, (int(tar_conner[0]+x), int(tar_conner[1]+y)), 2, (255,0,0), -1)
# corners = np.int0(corners)
# for test
cv2.imshow("roi", roi)
cv2.waitKey(0)
if __name__ == '__main__':
_im = cv2.imread("./3_roi_image.png")
edge = np.sqrt(pow((0.323 - 0.32222), 2)
+ pow((1.32- 14.2), 2)
+ pow((41 - 32.1), 2))
cv2.putText(_im, str(edge)[:6], (int((23+23)/2), int((23+32)/2)), \
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
_gray = cv2.cvtColor(_im, cv2.COLOR_BGR2GRAY)
# 测试自适应阈值分割
# thresh = cv2.adaptiveThreshold(_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
# t, result_img = cv2.threshold(_gray, 5, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# print(t)
# cv2.imshow("thresh", result_img)
# cv2.waitKey(0)
_conners4_sort, _line4_sort = cal_conners_edges_sort(_im)
# cal_sub_conners(_im, _conners4_sort, _line4_sort)
for i in range(4):
x1, y1, x2, y2, _, _ = _line4_sort[i]
x_p, y_p = _conners4_sort[i]
# # for test
cv2.circle(_im, (int(x_p), int(y_p)), 2, (0,0,255), -1)
cv2.line(_im, (x1,y1), (x2,y2), (0,255,0), 1)
cv2.imwrite("./2_roi_imagerest.png", _im)
cv2.imshow("im_edge", _im)
cv2.waitKey(0)