Compare commits

..

No commits in common. "9dfa9f24f7554f9c70455920039e5f1befdcd541" and "d471a04c4cf08ed845d6b777baa30dd25b5ccc9e" have entirely different histories.

11 changed files with 355 additions and 557 deletions

Binary file not shown.

Before

Width:  |  Height:  |  Size: 144 KiB

After

Width:  |  Height:  |  Size: 115 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.1 MiB

View File

@ -1,48 +0,0 @@
5257
903
6328
903
6328
1952
5257
1952
6761
218
7819
218
7819
1274
6761
1274
5819
2345
7892
2345
7892
4400
5819
4400
3538
3764
4866
3764
4866
5085
3538
5085
2165
2303
4853
2303
4853
3510
2165
3510
8936
5442
9341
5442
9341
6534
8936
6534

View File

@ -1,98 +1,90 @@
[ [
{ {
"label": 0, "label": 0,
"prob": 0.9319185018539429, "prob": 0.917050302028656,
"x": 210, "x": 189,
"y": 111, "y": 415,
"width": 55, "width": 196,
"height": 52
},
{
"label": 0,
"prob": 0.9214611649513245,
"x": 337,
"y": 214,
"width": 66,
"height": 66
},
{
"label": 0,
"prob": 0.9198867082595825,
"x": 262,
"y": 262,
"width": 45,
"height": 42
},
{
"label": 0,
"prob": 0.9190443158149719,
"x": 288,
"y": 95,
"width": 42,
"height": 40 "height": 40
}, },
{ {
"label": 0, "label": 0,
"prob": 0.8733175992965698, "prob": 0.9155016541481018,
"x": 314, "x": 214,
"y": 168,
"width": 37,
"height": 34
},
{
"label": 0,
"prob": 0.8678057193756104,
"x": 359,
"y": 97,
"width": 41,
"height": 38
},
{
"label": 0,
"prob": 0.8506720662117004,
"x": 216,
"y": 212, "y": 212,
"width": 89, "width": 53,
"height": 38 "height": 48
}, },
{ {
"label": 0, "label": 0,
"prob": 0.8226487636566162, "prob": 0.9061460494995117,
"x": 362, "x": 338,
"y": 146, "y": 309,
"width": 36, "width": 68,
"height": 35 "height": 71
}, },
{ {
"label": 0, "label": 0,
"prob": 0.5786533355712891, "prob": 0.9045385122299194,
"x": 55, "x": 92,
"y": 98, "y": 309,
"width": 42,
"height": 39
},
{
"label": 0,
"prob": 0.5088553428649902,
"x": 93,
"y": 215,
"width": 70, "width": 70,
"height": 69 "height": 69
}, },
{ {
"label": 0, "label": 0,
"prob": 0.3739035725593567, "prob": 0.8958920240402222,
"x": 191, "x": 262,
"y": 314, "y": 358,
"width": 188, "width": 46,
"height": 45
},
{
"label": 0,
"prob": 0.8823522329330444,
"x": 219,
"y": 311,
"width": 85,
"height": 37
},
{
"label": 0,
"prob": 0.8750882744789124,
"x": 289,
"y": 197,
"width": 39,
"height": 36 "height": 36
}, },
{ {
"label": 0, "label": 0,
"prob": 0.3039762079715729, "prob": 0.7151791453361511,
"x": 0, "x": 0,
"y": 117, "y": 212,
"width": 37, "width": 48,
"height": 54 "height": 48
},
{
"label": 0,
"prob": 0.569022536277771,
"x": 354,
"y": 198,
"width": 42,
"height": 36
},
{
"label": 0,
"prob": 0.4601861834526062,
"x": 314,
"y": 265,
"width": 36,
"height": 36
},
{
"label": 0,
"prob": 0.3012613356113434,
"x": 360,
"y": 244,
"width": 35,
"height": 33
} }
] ]

Binary file not shown.

Before

Width:  |  Height:  |  Size: 445 KiB

After

Width:  |  Height:  |  Size: 414 KiB

View File

@ -0,0 +1,162 @@
[
{
"label": 0,
"prob": 0.895011305809021,
"x": 536,
"y": 127,
"width": 49,
"height": 53
},
{
"label": 0,
"prob": 0.8946242332458496,
"x": 547,
"y": 224,
"width": 70,
"height": 37
},
{
"label": 0,
"prob": 0.8866094946861267,
"x": 198,
"y": 216,
"width": 67,
"height": 63
},
{
"label": 0,
"prob": 0.8786224126815796,
"x": 435,
"y": 222,
"width": 63,
"height": 66
},
{
"label": 0,
"prob": 0.873380720615387,
"x": 83,
"y": 115,
"width": 54,
"height": 53
},
{
"label": 0,
"prob": 0.8712457418441772,
"x": 597,
"y": 117,
"width": 36,
"height": 39
},
{
"label": 0,
"prob": 0.8656917810440063,
"x": 583,
"y": 268,
"width": 37,
"height": 41
},
{
"label": 0,
"prob": 0.718810498714447,
"x": 526,
"y": 316,
"width": 112,
"height": 39
},
{
"label": 0,
"prob": 0.6975097060203552,
"x": 621,
"y": 185,
"width": 17,
"height": 32
},
{
"label": 0,
"prob": 0.6963123083114624,
"x": 314,
"y": 119,
"width": 53,
"height": 55
},
{
"label": 0,
"prob": 0.5993771553039551,
"x": 286,
"y": 319,
"width": 194,
"height": 40
},
{
"label": 0,
"prob": 0.2692039906978607,
"x": 386,
"y": 101,
"width": 46,
"height": 48
},
{
"label": 0,
"prob": 0.21311397850513458,
"x": 321,
"y": 221,
"width": 86,
"height": 40
},
{
"label": 0,
"prob": 0.1731298416852951,
"x": 531,
"y": 121,
"width": 98,
"height": 47
},
{
"label": 0,
"prob": 0.1717808097600937,
"x": 69,
"y": 310,
"width": 172,
"height": 37
},
{
"label": 0,
"prob": 0.14592395722866058,
"x": 87,
"y": 217,
"width": 81,
"height": 35
},
{
"label": 0,
"prob": 0.14323937892913818,
"x": 179,
"y": 171,
"width": 38,
"height": 38
},
{
"label": 0,
"prob": 0.10391232371330261,
"x": 156,
"y": 94,
"width": 46,
"height": 47
},
{
"label": 0,
"prob": 0.08634057641029358,
"x": 414,
"y": 176,
"width": 39,
"height": 37
},
{
"label": 0,
"prob": 0.0807800367474556,
"x": 361,
"y": 269,
"width": 51,
"height": 42
}
]

Binary file not shown.

After

Width:  |  Height:  |  Size: 412 KiB

View File

@ -1,11 +1,3 @@
```bash ```bash
conda env create -f environment.yaml conda env create -f environment.yaml
``` ```
bim定位锁定bim区域xx
画出高清摄像头应该在的区域是固定的比例适当的等比例放大一些。xx
获取高清摄像头识别出来的ROI坐标数据
高清摄像头的ROI坐标转换为bim下的坐标xxx
遍历范围内的完整的矩形对比高清摄像头ROI矩形的中心点是否在 bim中矩形的内部
如果在内部说明是同一个编号的预埋件。

392
search.py
View File

@ -1,8 +1,8 @@
import cv2 import cv2
from utils import filter_rectangle, get_hd_cam_rect, get_hd_roi_from_txt from utils import filter_rectangle
print(cv2.__version__) # 4.9.0 print(cv2.__version__) # 4.9.0
import json import json
import math import math
import copy import copy
@ -11,10 +11,8 @@ import time
from scipy.spatial import distance from scipy.spatial import distance
from scipy.optimize import linear_sum_assignment from scipy.optimize import linear_sum_assignment
def get_params(num_param, start, end): def get_params(num_param, start, end):
return copy.deepcopy(num_param[start:end + 1]) return copy.deepcopy(num_param[start:end+1])
def parmas_to_num(text_param): def parmas_to_num(text_param):
for item in text_param: for item in text_param:
@ -25,7 +23,6 @@ def parmas_to_num(text_param):
item['h'] = int(item['h']) item['h'] = int(item['h'])
return text_param return text_param
def parmas_to_text(num_param): def parmas_to_text(num_param):
for item in num_param: for item in num_param:
item['center'] = str(item['center']) item['center'] = str(item['center'])
@ -44,33 +41,28 @@ def parmas_to_text(num_param):
item['y_center'] = str(item['y_center']) item['y_center'] = str(item['y_center'])
return num_param return num_param
def sort_params(params): def sort_params(params):
sorted_params = sorted(params, key=lambda item: (item['center'], item['x'])) sorted_params = sorted(params, key=lambda item: (item['center'], item['x']))
return sorted_params return sorted_params
def print_params(sort_params): def print_params(sort_params):
for param in sort_params: for param in sort_params:
print(param["center"], param["x"], param["w"], param["h"]) print(param["center"], param["x"], param["w"], param["h"])
def print_path(search_path): def print_path(search_path):
for path in search_path: for path in search_path:
print(path[0], path[1]) print(path[0], path[1])
def search_path(sort_params): def search_path(sort_params):
searchPath = [] searchPath = []
for i in range(len(sort_params) - 1): for i in range(len(sort_params) - 1):
(r, theta) = cartesian_to_polar(sort_params[i]["x"], sort_params[i]["center"], (r, theta) = cartesian_to_polar(sort_params[i]["x"], sort_params[i]["center"],
sort_params[i + 1]["x"], sort_params[i + 1]["center"]) sort_params[i + 1]["x"], sort_params[i + 1]["center"])
searchPath.append([r, theta]) searchPath.append([r, theta])
return searchPath return searchPath
def normalize_params_and_path(sort_params, search_path, index=0): def normalize_params_and_path(sort_params, search_path, index=0):
base = sort_params[index]["h"] base = sort_params[index]["h"]
for param in sort_params: for param in sort_params:
param['center'] /= base param['center'] /= base
param['x'] /= base param['x'] /= base
@ -83,29 +75,25 @@ def normalize_params_and_path(sort_params, search_path, index=0):
# path[1] /= base # path[1] /= base
return sort_params, search_path return sort_params, search_path
def read_from_json(file_path): def read_from_json(file_path):
with open(file_path, 'r') as f: with open(file_path, 'r') as f:
loaded_array = json.load(f) loaded_array = json.load(f)
return loaded_array return loaded_array
def cartesian_to_polar(x1, y1, x2, y2): def cartesian_to_polar(x1, y1, x2, y2):
dx = x2 - x1 dx = x2 - x1
dy = y2 - y1 dy = y2 - y1
r = math.sqrt(dx ** 2 + dy ** 2) r = math.sqrt(dx**2 + dy**2)
theta = math.atan2(dy, dx) theta = math.atan2(dy, dx)
return r, theta return r, theta
def calculate_second_point(x1, y1, r, theta_radians): def calculate_second_point(x1, y1, r, theta_radians):
# theta_radians = math.radians(theta_degrees) # theta_radians = math.radians(theta_degrees)
x2 = x1 + r * math.cos(theta_radians) x2 = x1 + r * math.cos(theta_radians)
y2 = y1 + r * math.sin(theta_radians) y2 = y1 + r * math.sin(theta_radians)
return x2, y2 return x2, y2
def cal_c1c2c3c4(param, heigt): def cal_c1c2c3c4(param, heigt):
''' '''
按照上左上右下右下左的顺时针顺序. 按照上左上右下右下左的顺时针顺序.
@ -127,15 +115,14 @@ def cal_c1c2c3c4(param, heigt):
param['y_center'] = int((param['y1'] + param['y3']) / 2) param['y_center'] = int((param['y1'] + param['y3']) / 2)
return param return param
def gen_im_from_params(params, type="lines"): def gen_im_from_params(params, type="lines"):
# 依据bim数据生成bim图 # 依据bim数据生成bim图
# type: # line points # type: # line points
## 确定整个bim图的长度和宽度边界 ## 确定整个bim图的长度和宽度边界
max_y = -999999 # y坐标最大值 max_y = -999999 # y坐标最大值
max_y_idx = -1 # y坐标最大值的索引 max_y_idx = -1 # y坐标最大值的索引
max_x = -999999 # x坐标最大值 max_x = -999999 # x坐标最大值
max_x_idx = -1 # x坐标最大值的索引 max_x_idx = -1 # x坐标最大值的索引
# 遍历,找到矩形x坐标最大值和矩形y坐标最大值 # 遍历,找到矩形x坐标最大值和矩形y坐标最大值
for i, param in enumerate(params): for i, param in enumerate(params):
@ -150,8 +137,11 @@ def gen_im_from_params(params, type="lines"):
max_y = param["center"] + param["h"] / 2 max_y = param["center"] + param["h"] / 2
max_y_idx = i max_y_idx = i
bim_width = int(max_x) padding_value = 0 # 内边距,避免整个bim图片贴着边缘展示
bim_height = int(max_y) bim_width = int(max_x) + padding_value
print(f"[bim_width] ====== [{bim_width}]")
bim_height = int(max_y) + padding_value
print(f"[bim_height] ====== [{bim_height}]")
bim_channels = 3 bim_channels = 3
im = np.zeros((bim_height, bim_width, bim_channels), dtype=np.uint8) im = np.zeros((bim_height, bim_width, bim_channels), dtype=np.uint8)
@ -159,16 +149,16 @@ def gen_im_from_params(params, type="lines"):
cal_c1c2c3c4(param, bim_height) cal_c1c2c3c4(param, bim_height)
if type == "lines": if type == "lines":
pts = np.asarray([[param['x1'], param['y1']], pts = np.asarray([[param['x1'], param['y1']],
[param['x2'], param['y2']], [param['x2'], param['y2']],
[param['x3'], param['y3']], [param['x3'], param['y3']],
[param['x4'], param['y4']]]) [param['x4'], param['y4']]])
cv2.polylines(im, [pts], True, (255, 255, 0), 8) cv2.polylines(im, [pts], True, (255,255,0), 8)
elif type == "points": elif type == "points":
cv2.circle(im, (param['x1'], param['y1']), 1, 255, 20) cv2.circle(im, (param['x1'], param['y1']), 1, 255, 20)
cv2.circle(im, (param['x2'], param['y2']), 1, 255, 20) cv2.circle(im, (param['x2'], param['y2']), 1, 255, 20)
cv2.circle(im, (param['x3'], param['y3']), 1, 255, 20) cv2.circle(im, (param['x3'], param['y3']), 1, 255, 20)
cv2.circle(im, (param['x4'], param['y4']), 1, 255, 20) cv2.circle(im, (param['x4'], param['y4']), 1, 255, 20)
cv2.circle(im, (param['x'], int((param['y3'] + param['y2']) / 2)), 1, (255, 0, 0), 8) cv2.circle(im, (param['x'], int((param['y3'] + param['y2']) / 2 )), 1, (255,0,0), 8)
return im return im
@ -179,13 +169,11 @@ def gen_im_from_params(params, type="lines"):
False 不在 False 不在
如果没有提供区域,不做判断,返回True 如果没有提供区域,不做判断,返回True
""" """
def is_inside_roi(point, roi_w, roi_h): def is_inside_roi(point, roi_w, roi_h):
if (roi_w != None and roi_h != None): if (roi_w != None and roi_h != None):
x = point[0] x = point[0]
y = point[1] y = point[1]
if (x <= 0 or y <= 0 or y >= roi_h or x >= roi_w): if (x <= 0 or y <= 0 or y >= roi_h or x >= roi_w ):
# 不在区域内部 # 不在区域内部
return False return False
else: else:
@ -196,7 +184,7 @@ def is_inside_roi(point, roi_w, roi_h):
return True return True
def gen_points_from_params(params, roi_w=None, roi_h=None): def gen_points_from_params(params,roi_w=None,roi_h=None):
# 依据bim数据生成bim图 # 依据bim数据生成bim图
# type line points # type line points
## 计算bim图长和宽 ## 计算bim图长和宽
@ -219,21 +207,20 @@ def gen_points_from_params(params, roi_w=None, roi_h=None):
if (roi_w == None and roi_h == None): if (roi_w == None and roi_h == None):
cal_c1c2c3c4(param, bim_height) cal_c1c2c3c4(param, bim_height)
# 过滤点,把在roi区域之外的点全部过滤掉. # 过滤点,把在roi区域之外的点全部过滤掉.
if (is_inside_roi([param['x1'], param['y1']], roi_w, roi_h)): if(is_inside_roi([param['x1'], param['y1']], roi_w, roi_h)):
points.append([param['x1'], param['y1'], i]) points.append([param['x1'], param['y1'], i])
if (is_inside_roi([param['x2'], param['y2']], roi_w, roi_h)): if(is_inside_roi([param['x2'], param['y2']], roi_w, roi_h)):
points.append([param['x2'], param['y2'], i]) points.append([param['x2'], param['y2'], i])
if (is_inside_roi([param['x3'], param['y3']], roi_w, roi_h)): if(is_inside_roi([param['x3'], param['y3']], roi_w, roi_h)):
points.append([param['x3'], param['y3'], i]) points.append([param['x3'], param['y3'], i])
if (is_inside_roi([param['x4'], param['y4']], roi_w, roi_h)): if(is_inside_roi([param['x4'], param['y4']], roi_w, roi_h)):
points.append([param['x4'], param['y4'], i]) points.append([param['x4'], param['y4'], i])
if (is_inside_roi([param['x_center'], param['y_center']], roi_w, roi_h)): if(is_inside_roi([param['x_center'], param['y_center']], roi_w, roi_h)):
points.append([param['x_center'], param['y_center'], i]) points.append([param['x_center'], param['y_center'], i])
if (roi_w != None and roi_h != None): if (roi_w != None and roi_h != None):
print(f"[经区域过滤之后的点数一共为] ====== [{len(points)}]") print(f"[经区域过滤之后的点数一共为] ====== [{len(points)}]")
return points return points
def topological_similarity(adj1, adj2): def topological_similarity(adj1, adj2):
""" """
计算两个拓扑结构的相似度 计算两个拓扑结构的相似度
@ -242,7 +229,6 @@ def topological_similarity(adj1, adj2):
similarity = np.sum(adj1 == adj2) / (adj1.shape[0] * adj2.shape[1]) similarity = np.sum(adj1 == adj2) / (adj1.shape[0] * adj2.shape[1])
return similarity return similarity
def find_topological_matches(points1, adj1, points2, adj2, threshold=0.8): def find_topological_matches(points1, adj1, points2, adj2, threshold=0.8):
""" """
基于拓扑结构寻找匹配点集 基于拓扑结构寻找匹配点集
@ -258,10 +244,8 @@ def find_topological_matches(points1, adj1, points2, adj2, threshold=0.8):
matches.sort(key=lambda x: x[2], reverse=True) matches.sort(key=lambda x: x[2], reverse=True)
return matches return matches
from sklearn.linear_model import RANSACRegressor from sklearn.linear_model import RANSACRegressor
def ransac_shape_matching(points, reference_points): def ransac_shape_matching(points, reference_points):
model_ransac = RANSACRegressor(random_state=42) model_ransac = RANSACRegressor(random_state=42)
try: try:
@ -269,13 +253,12 @@ def ransac_shape_matching(points, reference_points):
except ValueError as e: except ValueError as e:
print("Error fitting the model:", e) print("Error fitting the model:", e)
return [] return []
inlier_mask = model_ransac.inlier_mask_ inlier_mask = model_ransac.inlier_mask_
best_match_subset = points[inlier_mask] best_match_subset = points[inlier_mask]
return best_match_subset return best_match_subset
def polar_to_cartesian(polar_points): def polar_to_cartesian(polar_points):
r = polar_points[:, 0] r = polar_points[:, 0]
theta = polar_points[:, 1] theta = polar_points[:, 1]
@ -283,7 +266,6 @@ def polar_to_cartesian(polar_points):
y = r * np.sin(theta) y = r * np.sin(theta)
return np.vstack((x, y)).T return np.vstack((x, y)).T
def compute_shape_context(points, nbins_r=5, nbins_theta=12): def compute_shape_context(points, nbins_r=5, nbins_theta=12):
n = points.shape[0] n = points.shape[0]
shape_contexts = [] shape_contexts = []
@ -299,16 +281,14 @@ def compute_shape_context(points, nbins_r=5, nbins_theta=12):
H, _, _ = np.histogram2d(thetas, rs, bins=[theta_edges, r_edges]) H, _, _ = np.histogram2d(thetas, rs, bins=[theta_edges, r_edges])
H /= np.sum(H) + 1e-8 # 归一化 H /= np.sum(H) + 1e-8 # 归一化
shape_contexts.append(H.flatten()) shape_contexts.append(H.flatten())
return np.array(shape_contexts) return np.array(shape_contexts)
def match_shapes(sc1, sc2): def match_shapes(sc1, sc2):
cost_matrix = distance.cdist(sc1, sc2, metric='sqeuclidean') cost_matrix = distance.cdist(sc1, sc2, metric='sqeuclidean')
row_ind, col_ind = linear_sum_assignment(cost_matrix) row_ind, col_ind = linear_sum_assignment(cost_matrix)
return cost_matrix[row_ind, col_ind].sum() return cost_matrix[row_ind, col_ind].sum()
def _sobel(image): def _sobel(image):
''' '''
_sobel _sobel
@ -321,10 +301,9 @@ def _sobel(image):
_sobelx = np.uint8(np.absolute(_sobelx)) _sobelx = np.uint8(np.absolute(_sobelx))
_sobely = np.uint8(np.absolute(_sobely)) _sobely = np.uint8(np.absolute(_sobely))
_sobelcombine = cv2.bitwise_or(_sobelx, _sobely) _sobelcombine = cv2.bitwise_or(_sobelx,_sobely)
return _sobelcombine return _sobelcombine
def _findContours(image): def _findContours(image):
''' '''
_findContours _findContours
@ -333,114 +312,11 @@ def _findContours(image):
contours, _ = cv2.findContours(image.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) contours, _ = cv2.findContours(image.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
return sorted(contours, key=cv2.contourArea, reverse=True) return sorted(contours, key=cv2.contourArea, reverse=True)
if __name__ == "__main__":
def bim_compare_to_hd_roi(hd_roi_list, hd_img_width, hd_img_height, bim_im_height,
bim_sub_area, bim_rect_list):
"""
预埋件号码匹配
将高清摄像头照片的ROI和实际BIM图中的号码匹配上
参数:
hd_roi_list : 高清摄像头的ROI数据列表每个ROI表示一个可能的预埋件区域
1.每个第二层子数组中的坐标顺序为左上右上右下左下顺时针
2.是以左上角为原点的常规的图片坐标系
格式如下
[
[[x1, y1], [x2, y2], [x3, y3], [x4, y4]],
[[x1, y1], [x2, y2], [x3, y3], [x4, y4]],
]
hd_img_width (int): 高清相机拍摄图片原始宽度
hd_img_height (int): 高清相机拍摄图片原始高度
bim_im_height(int): bim图的高度
bim_sub_area : 包含高清广角等坐标矩形区域数据
bim_rect_list : bim的原始数据坐标系上以左下角为原点的数学坐标系
返回: 数组数组内包含多个字典每个字典有如下元素
[
{
"bim_rect": bim_rect_item,
"hd_roi": hd_roi
}
]
"""
# 高清相机坐标转换为bim图坐标,宽高肯定都是等比的直接按照bim上面高清矩形的宽度算下比例。
scale_w = bim_sub_area["hd_cam_w_bim"] / hd_img_width
scale_h = bim_sub_area["hd_cam_h_bim"] / hd_img_height
hd_roi_out_of_range_index = [] # 超出边界的ROI的索引
for hd_roi_index, hd_roi in enumerate(hd_roi_list):
if hd_roi_index == 5:
print("hd_roi_index === 5")
for i in range(4):
# 所有高清照片的ROI先按照这个比例转换 并且加上坐标偏移转换为bim上实际坐标
hd_roi[i][0] = int(hd_roi[i][0] * scale_w) + bim_sub_area["hd_cam_x_bim"]
hd_roi[i][1] = int(hd_roi[i][1] * scale_h) + bim_sub_area["hd_cam_y_bim"]
# 如果ROI坐标形成的矩形横跨bim图上高清矩形范围边界框就剔除这个矩形
is_x_cross_left_border_line = hd_roi[0][0] < bim_sub_area["hd_cam_x_bim"] <= hd_roi[1][0]
is_x_cross_right_border_line = hd_roi[0][0] < bim_sub_area["hd_cam_x_bim"] + bim_sub_area["hd_cam_w_bim"] <= \
hd_roi[1][0]
is_y_cross_top_border_line = hd_roi[0][1] < bim_sub_area["hd_cam_y_bim"] <= hd_roi[2][1]
is_y_cross_bottom_border_line = hd_roi[0][1] < bim_sub_area["hd_cam_y_bim"] + bim_sub_area["hd_cam_h_bim"] <= \
hd_roi[2][1]
if is_x_cross_left_border_line or is_x_cross_right_border_line or is_y_cross_top_border_line or is_y_cross_bottom_border_line:
hd_roi_out_of_range_index.append(hd_roi_index)
# 画出来试试
# bim_im = cv2.rectangle(bim_im, (hd_roi[0][0], hd_roi[0][1]), (hd_roi[2][0], hd_roi[2][1]), (0, 0, 255), 30)
# 写上i编号
# cv2.putText(bim_im, str(hd_roi_index), (hd_roi[0][0], hd_roi[0][1]), cv2.FONT_HERSHEY_SIMPLEX, 5, (0, 0, 255),20)
# print(f"超出边界的ROI的索引是:{hd_roi_out_of_range_index}")
# bim所有的矩形注意bim原数据是以左下角为原点的坐标系
# bim所有的矩形注意bim原数据是以左下角为原点的坐标系
# bim所有的矩形注意bim原数据是以左下角为原点的坐标系
# 遍历bim所有的矩形找到与高清相机ROI匹配的矩形
bim_rect_hit_list = []
for bim_rect_index, bim_rect_item in enumerate(bim_rect_list):
# x不需要转换
bim_rect_item_center_x = bim_rect_item["x"]
# 把y坐标系转换成左上角坐标系
bim_rect_item_center_y = bim_im_height - bim_rect_item["center"]
# 逐一和高清相机ROI坐标进行匹配
for hd_roi_index, hd_roi in enumerate(hd_roi_list):
if(hd_roi_index in hd_roi_out_of_range_index):
continue
# 如果中心点的坐标在某个高清相机ROI内就认为匹配上了。
bim_rect_item_center_x_inside = hd_roi[0][0] < bim_rect_item_center_x < hd_roi[1][0]
bim_rect_item_center_y_inside = hd_roi[0][1] < bim_rect_item_center_y < hd_roi[2][1]
if bim_rect_item_center_x_inside and bim_rect_item_center_y_inside:
bim_rect_hit_list.append({
"bim_rect": bim_rect_item,
"hd_roi": hd_roi
})
return bim_rect_hit_list
# 画出bim_rect_hit_list
# for bim_rect_hit_item in bim_rect_hit_list:
# bim_rect = bim_rect_hit_item["bim_rect"]
# hd_roi = bim_rect_hit_item["hd_roi"]
# bim_im = cv2.rectangle(bim_im, (hd_roi[0][0], hd_roi[0][1]), (hd_roi[2][0], hd_roi[2][1]), (0, 0, 255), 30)
# # 写上i编号
# cv2.putText(bim_im, str(bim_rect["code"]), (hd_roi[0][0]+100, hd_roi[0][1]+200), cv2.FONT_HERSHEY_SIMPLEX, 5, (0, 0, 255),
# 20)
def search(data_bim_json, data_sub_json, wide_cam_img_width, wide_cam_img_height):
"""
根据广角相机的照片定位到bim图上面对应的位置
参数
data_bim_json bim的json数据左下角为原点的数学坐标系
data_sub_json 广角识别之后的ROI数据左上角为原点的图片坐标系
wide_cam_img_width:原始广角图片的宽度
wide_cam_img_height原始广角图片的高度
返回
找到了返回包含一堆坐标数据的字典
找不到返回None
"""
# 读取并处理数据 # 读取并处理数据
data_bim = {} data_bim = {}
data_bim["type"] = 0 data_bim["type"] = 0
data_bim["params"] = data_bim_json data_bim["params"] = read_from_json("data_bim.json")
data_bim["point"] = [] data_bim["point"] = []
data_bim["params"] = parmas_to_num(data_bim["params"]) data_bim["params"] = parmas_to_num(data_bim["params"])
@ -448,12 +324,6 @@ def search(data_bim_json, data_sub_json, wide_cam_img_width, wide_cam_img_height
data_sub["type"] = 0 data_sub["type"] = 0
data_sub["params"] = [] data_sub["params"] = []
# 广角相机拍照照片之后左右边界的裁剪比例。
wide_cam_left_cut_rate = 0.1
wide_cam_right_cut_rate = 0.1
# 高清相机的视场矩形在广角相机里面的坐标。cv2下的图片坐标系以左上角为坐标原点
hd_cam_x, hd_cam_y, hd_cam_w, hd_cam_h, = get_hd_cam_rect(wide_cam_left_cut_rate)
# 创建测试子集 # 创建测试子集
# sub_im = cv2.imread("wide_image.png") # sub_im = cv2.imread("wide_image.png")
# sub_zero = np.zeros_like(sub_im) # sub_zero = np.zeros_like(sub_im)
@ -468,13 +338,12 @@ def search(data_bim_json, data_sub_json, wide_cam_img_width, wide_cam_img_height
# _im_edge_sobel = _sobel(sub_zero) # _im_edge_sobel = _sobel(sub_zero)
# _, _im_thresh = cv2.threshold(_im_edge_sobel, 5, 255, cv2.THRESH_BINARY) # _, _im_thresh = cv2.threshold(_im_edge_sobel, 5, 255, cv2.THRESH_BINARY)
# cnts = _findContours(_im_thresh) # cnts = _findContours(_im_thresh)
original_rectangle = read_from_json("data_sub/test_1/data_sub.json")
# 过滤矩形 # 过滤矩形
# cnts 过滤之后的矩形 # cnts 过滤之后的矩形
# sub_im 裁剪之后的图像 # sub_im 裁剪之后的图像
cnts, wide_cam_img_width_after_cut = filter_rectangle(wide_cam_img_width, wide_cam_img_height, data_sub_json, cnts,sub_im = filter_rectangle("data_sub/test_1/wide_image.png", original_rectangle)
wide_cam_left_cut_rate, sub_zero = np.zeros_like(sub_im)
wide_cam_right_cut_rate)
# sub_zero = np.zeros_like(sub_im)
for contour in cnts: for contour in cnts:
# x, y, w, h = cv2.boundingRect(contour) # x, y, w, h = cv2.boundingRect(contour)
x = contour["x"] x = contour["x"]
@ -483,7 +352,7 @@ def search(data_bim_json, data_sub_json, wide_cam_img_width, wide_cam_img_height
h = contour["height"] h = contour["height"]
# 由于定位框大小大于预埋件大小,因此这里需要做缩放处理 # 由于定位框大小大于预埋件大小,因此这里需要做缩放处理
kh = int(h * 0.01) # roi1 kh = int(h * 0.01) # roi1
kw = int(w * 0.01) # roi1 kw = int(w * 0.01) # roi1
x += int(kw) x += int(kw)
y += int(kh) y += int(kh)
@ -503,15 +372,15 @@ def search(data_bim_json, data_sub_json, wide_cam_img_width, wide_cam_img_height
param["w"] = param["x2"] - param["x1"] param["w"] = param["x2"] - param["x1"]
param["h"] = param["y3"] - param["y1"] param["h"] = param["y3"] - param["y1"]
param["x"] = int((param["x1"] + param["x2"]) / 2) param["x"] = int((param["x1"] + param["x2"]) / 2)
param['center'] = wide_cam_img_height - int((param["y1"] + param["y3"]) / 2) param['center'] = sub_im.shape[0] - int((param["y1"] + param["y3"]) / 2)
data_sub["params"].append(param) data_sub["params"].append(param)
# cv2.rectangle(sub_zero, (x, y), (x + w, y + h), (0, 255, 0), 1) cv2.rectangle(sub_zero, (x, y), (x + w, y + h), (0, 255, 0), 1)
# bim_im = gen_im_from_params(data_bim["params"]) bim_im = gen_im_from_params(data_bim["params"])
# cv2.namedWindow("bim", cv2.WINDOW_NORMAL) # cv2.namedWindow("bim", cv2.WINDOW_NORMAL)
# cv2.imshow("bim", bim_im) # cv2.imshow("bim", bim_im)
# cv2.imwrite("bim_im.png", bim_im) cv2.imwrite("bim_im.png", bim_im)
# cv2.waitKey(0) # cv2.waitKey(0)
# cv2.imshow("sub_zero", sub_zero) cv2.imshow("sub_zero", sub_zero)
# cv2.waitKey(0) # cv2.waitKey(0)
# data_sub = {} # data_sub = {}
@ -531,37 +400,37 @@ def search(data_bim_json, data_sub_json, wide_cam_img_width, wide_cam_img_height
# sub_roi_height = int(max_y) #???? # sub_roi_height = int(max_y) #????
# sub_roi_width = int(max_x) #???? # sub_roi_width = int(max_x) #????
sub_roi_height = wide_cam_img_height # 广角图片的高度没有裁剪 sub_roi_height = sub_im.shape[0]
sub_roi_width = wide_cam_img_width_after_cut # 广角图片的宽度裁剪过后变化了 sub_roi_width = sub_im.shape[1]
sub_roi_params_select_id = -1 sub_roi_params_select_id = -1
sub_roi_w_base_len = 0 # 选中预埋件的宽度作为基础长度 sub_roi_w_base_len = 0 # 选中预埋件的宽度作为基础长度
sub_roi_divide_w_h = 1 sub_roi_divide_w_h = 1
polar_origin_x = 0 # 极坐标原点 x polar_origin_x = 0 # 极坐标原点 x
polar_origin_y = 0 # 极坐标原点 y polar_origin_y = 0 # 极坐标原点 y
start_x = 0 start_x = 0
start_y = 0 start_y = 0
## 1.2 选择一块完整的预埋件 ## 1.2 选择一块完整的预埋件
for i, param in enumerate(_sub_sort_params): for i, param in enumerate(_sub_sort_params):
if 0 < param['x1'] and param['x2'] < sub_roi_width \ if 0 < param['x1'] and param['x2'] < sub_roi_width \
and 0 < param['y1'] and param['y3'] < sub_roi_height: and 0 < param['y1'] and param['y3'] < sub_roi_height:
sub_roi_params_select_id = i sub_roi_params_select_id = i
sub_roi_w_base_len = param['x2'] - param['x1'] sub_roi_w_base_len = param['x2'] - param['x1']
sub_roi_divide_w_h = param['w'] / param['h'] sub_roi_divide_w_h = param['w'] / param['h']
polar_origin_x = int(param['x1']) # 当前选择的预埋件的左上角 x 坐标 polar_origin_x = int(param['x1']) # 当前选择的预埋件的左上角 x 坐标
polar_origin_y = int(param['y1']) # 当前选择的预埋件的左上角 y 坐标 polar_origin_y = int(param['y1']) # 当前选择的预埋件的左上角 y 坐标
break break
if sub_roi_params_select_id == -1 or sub_roi_w_base_len == 0: if sub_roi_params_select_id == -1 or sub_roi_w_base_len == 0 :
print("[ERROR]\t 拍摄的图像中没有完整的预埋件信息\n") print("[ERROR]\t 拍摄的图像中没有完整的预埋件信息\n")
assert (0) assert(0)
## 1.2.2 将其他预埋件相对于它的极坐标进行填写 ## 1.2.2 将其他预埋件相对于它的极坐标进行填写
for i, param in enumerate(_sub_sort_params): for i, param in enumerate(_sub_sort_params):
if i != sub_roi_params_select_id: if i != sub_roi_params_select_id:
param['r'], param['theta'] = cartesian_to_polar(_sub_sort_params[sub_roi_params_select_id]['x_center'], param['r'], param['theta'] = cartesian_to_polar(_sub_sort_params[sub_roi_params_select_id]['x_center'],
_sub_sort_params[sub_roi_params_select_id]['y_center'], _sub_sort_params[sub_roi_params_select_id]['y_center'],
param['x_center'], param['y_center']) param['x_center'],param['y_center'])
## 1.3计算所有点到该预埋件左上点的,点个数,平均极半径 和 平均极角度 ## 1.3计算所有点到该预埋件左上点的,点个数,平均极半径 和 平均极角度
sum_r, sum_theta = 0.0, 0 sum_r, sum_theta = 0.0,0
count = 0 count = 0
# 测试,画出所有的pts # 测试,画出所有的pts
# for i, p in enumerate(_sub_sort_params): # for i, p in enumerate(_sub_sort_params):
@ -569,15 +438,14 @@ def search(data_bim_json, data_sub_json, wide_cam_img_width, wide_cam_img_height
# cv2.circle(sub_im, (p["x_center"], p["y_center"]), 2, (0, 255, 255), -1) # cv2.circle(sub_im, (p["x_center"], p["y_center"]), 2, (0, 255, 255), -1)
# cv2.imshow("_sub_sort_params", sub_im) # cv2.imshow("_sub_sort_params", sub_im)
# cv2.waitKey(0) # cv2.waitKey(0)
pts = gen_points_from_params(_sub_sort_params, sub_roi_width, sub_roi_height) pts = gen_points_from_params(_sub_sort_params,sub_roi_width,sub_roi_height)
# # 测试,画出所有的pts # # 测试,画出所有的pts
# for i, p in enumerate(pts): for i, p in enumerate(pts):
# # 画点 # 画点
# cv2.circle(sub_im, (p[0], p[1]), 2, (0, 255, 255), -1) cv2.circle(sub_im, (p[0], p[1]), 2, (0, 255, 255), -1)
# # 写编号 # 写编号
# cv2.putText(sub_im, str(i), (p[0], p[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1) cv2.putText(sub_im, str(i), (p[0], p[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
# cv2.rectangle(sub_im, (hd_cam_x, hd_cam_y), (hd_cam_x + hd_cam_w, hd_cam_y + hd_cam_h), (0, 255, 0), 4) cv2.imshow("sub_im_points", sub_im)
# cv2.imshow("sub_im_points", sub_im)
# cv2.waitKey(0) # cv2.waitKey(0)
polar_list = [] polar_list = []
for i, pt in enumerate(pts): for i, pt in enumerate(pts):
@ -588,6 +456,9 @@ def search(data_bim_json, data_sub_json, wide_cam_img_width, wide_cam_img_height
polar_list.append([r, theta]) polar_list.append([r, theta])
sum_r /= count * sub_roi_w_base_len sum_r /= count * sub_roi_w_base_len
sum_theta /= count sum_theta /= count
print(f"[所有点到该预埋件左上点的个数] ====== [{count}]")
print(f"[所有点到该预埋件左上点的平均极半径] ====== [{sum_r}]")
print(f"[所有点到该预埋件左上点的平均极角] ====== [{sum_theta}]")
# 初始化候选预埋件 # 初始化候选预埋件
candi_params = [] candi_params = []
@ -599,38 +470,32 @@ def search(data_bim_json, data_sub_json, wide_cam_img_width, wide_cam_img_height
rst_params = [] rst_params = []
bim_all_pts = gen_points_from_params(data_bim["params"]) bim_all_pts = gen_points_from_params(data_bim["params"])
# bim_im = gen_im_from_params(data_bim["params"]) bim_im = gen_im_from_params(data_bim["params"])
# sub_im = gen_im_from_params(data_sub["params"])# 需要读取 # sub_im = gen_im_from_params(data_sub["params"])# 需要读取
min_match_score = 999999 min_match_score = 999999
for i, param in enumerate(candi_params): for i, param in enumerate(candi_params):
tmp_roi_w_base_len = param['x2'] - param['x1'] tmp_roi_w_base_len = param['x2'] - param['x1']
scale = tmp_roi_w_base_len / sub_roi_w_base_len scale = tmp_roi_w_base_len / sub_roi_w_base_len
tmp_roi_width = int(scale * sub_roi_width) tmp_roi_width = int(scale * sub_roi_width)
tmp_roi_height = int(scale * sub_roi_height) tmp_roi_height = int(scale * sub_roi_height)
# 计算广角矩形相对于bim图的坐标 # 相对于bim图的坐标
tmp_roi_start_x = int(param['x1'] - scale * polar_origin_x) tmp_roi_start_x = int(param['x1'] - scale * polar_origin_x)
tmp_roi_end_x = tmp_roi_start_x + tmp_roi_width tmp_roi_end_x = tmp_roi_start_x + tmp_roi_width
tmp_roi_start_y = int(param['y1'] - scale * polar_origin_y) tmp_roi_start_y = int(param['y1'] - scale * polar_origin_y)
tmp_roi_end_y = tmp_roi_start_y + tmp_roi_height tmp_roi_end_y = tmp_roi_start_y + tmp_roi_height
# 计算高清矩形在bim上面的坐标。相当于被scale一起放大了
hd_cam_x_bim = int(scale * hd_cam_x + tmp_roi_start_x) # 左上角x
hd_cam_y_bim = int(scale * hd_cam_y + tmp_roi_start_y) # 左上角y
hd_cam_w_bim = int(scale * hd_cam_w)
hd_cam_h_bim = int(scale * hd_cam_h)
tmp_roi_conners = [[tmp_roi_start_x, tmp_roi_start_y], tmp_roi_conners = [[tmp_roi_start_x, tmp_roi_start_y],
[tmp_roi_end_x, tmp_roi_start_y], [tmp_roi_end_x, tmp_roi_start_y],
[tmp_roi_end_x, tmp_roi_end_y], [tmp_roi_end_x, tmp_roi_end_y],
[tmp_roi_start_x, tmp_roi_end_y]] [tmp_roi_start_x, tmp_roi_end_y]]
tmp_sum_r, tmp_sum_theta = 0.0, 0 tmp_sum_r, tmp_sum_theta = 0.0, 0
tmp_count = 0 tmp_count = 0
tmp_polar_list = [] tmp_polar_list = []
# 这里需要把选中的预埋件也提取出来 # 这里需要把选中的预埋件也提取出来
param['effective_points'] = [] param['effective_points'] = []
for j, pt in enumerate(bim_all_pts): for j, pt in enumerate(bim_all_pts):
if cv2.pointPolygonTest(np.asarray(tmp_roi_conners), (pt[0], pt[1]), False) > 0: if cv2.pointPolygonTest(np.asarray(tmp_roi_conners), (pt[0],pt[1]), False) > 0:
r, theta = cartesian_to_polar(param['x1'], param['y1'], pt[0], pt[1]) r, theta = cartesian_to_polar(param['x1'], param['y1'], pt[0], pt[1])
tmp_sum_r += r tmp_sum_r += r
tmp_sum_theta += theta tmp_sum_theta += theta
@ -640,49 +505,45 @@ def search(data_bim_json, data_sub_json, wide_cam_img_width, wide_cam_img_height
param['effective_points'].append(pt) param['effective_points'].append(pt)
tmp_sum_r /= tmp_count * tmp_roi_w_base_len tmp_sum_r /= tmp_count * tmp_roi_w_base_len
tmp_sum_theta /= tmp_count tmp_sum_theta /= tmp_count
# 预埋件数量相差 30%,则不进行计算 # 预埋件数量相差 30%,则不进行计算
# if tmp_count / count > 1.3 or tmp_count / count < 0.77: continue # if tmp_count / count > 1.3 or tmp_count / count < 0.77: continue
if abs(tmp_count - count) == 0: if abs(tmp_count - count) == 0: score = 0.5
score = 0.5 elif abs(tmp_count - count) <= 10: score = 0.4
elif abs(tmp_count - count) <= 10: elif abs(tmp_count - count) <= 20: score = 0.3
score = 0.4 elif abs(tmp_count - count) <= 30: score = 0.2
elif abs(tmp_count - count) <= 20: else: score = 0.0
score = 0.3
elif abs(tmp_count - count) <= 30:
score = 0.2
else:
score = 0.0
# else: score = (1 / abs(tmp_count - count) ) * 0.7 # else: score = (1 / abs(tmp_count - count) ) * 0.7
score += (1 - abs(tmp_sum_r - sum_r) / sub_roi_width) * 0.25 score += (1 - abs(tmp_sum_r - sum_r) / sub_roi_width) * 0.25
score += (1 - abs(tmp_sum_theta - sum_theta) / 3.14) * 0.35 score += (1 - abs(tmp_sum_theta - sum_theta) / 3.14) * 0.35
print("score=======", str(score)) print("score=======", str(score))
if score > 0.6: # ???? if score > 0.6: #????
cartesian_points1 = polar_to_cartesian(np.asarray(tmp_polar_list)) # bim上的坐标 cartesian_points1 = polar_to_cartesian(np.asarray(tmp_polar_list)) # bim上的坐标
cartesian_points2 = polar_to_cartesian(np.asarray(polar_list)) # sub上的坐标 cartesian_points2 = polar_to_cartesian(np.asarray(polar_list)) # sub上的坐标
sc1 = compute_shape_context(cartesian_points1) sc1 = compute_shape_context(cartesian_points1)
sc2 = compute_shape_context(cartesian_points2) sc2 = compute_shape_context(cartesian_points2)
match_score = match_shapes(sc1, sc2) match_score = match_shapes(sc1, sc2)
print("score>0.6") print("score>0.6")
print(f"[score] ====== [{score}]") print(f"[score] ====== [{score}]")
if match_score < 5.0: # ???? print(f"[match_score] ====== [{match_score}]")
print(f"[tmp_count] ====== [{tmp_count}]")
print(f"[tmp_sum_r] ====== [{tmp_sum_r}]")
print(f"[tmp_sum_theta] ====== [{tmp_sum_theta}]")
if match_score < 5.0: #????
param["start_point"] = (tmp_roi_start_x, tmp_roi_start_y) param["start_point"] = (tmp_roi_start_x, tmp_roi_start_y)
param["end_point"] = (tmp_roi_end_x, tmp_roi_end_y) param["end_point"] = (tmp_roi_end_x, tmp_roi_end_y)
param["score"] = (score, tmp_count, tmp_sum_r * tmp_roi_w_base_len, tmp_sum_theta) param["score"] = (score, tmp_count, tmp_sum_r*tmp_roi_w_base_len, tmp_sum_theta)
param['match_score'] = match_score param['match_score'] = match_score
# 高清相机的矩形数据
param['hd_cam_x_bim'] = hd_cam_x_bim
param['hd_cam_y_bim'] = hd_cam_y_bim
param['hd_cam_w_bim'] = hd_cam_w_bim
param['hd_cam_h_bim'] = hd_cam_h_bim
if min_match_score > match_score: if min_match_score > match_score:
min_match_score = match_score min_match_score = match_score
print(f"[start_point] ====== [{param["start_point"]}]")
print(f"[end_point] ====== [{param["end_point"]}]")
rst_params.append(param) rst_params.append(param)
# 找到得分最大的
max_score = -99999 max_score = -99999
max_index = -1 max_index = -1
for i, param in enumerate(rst_params): for i, param in enumerate(rst_params):
@ -691,19 +552,7 @@ def search(data_bim_json, data_sub_json, wide_cam_img_width, wide_cam_img_height
if abs(score[0] / match_score) > max_score: if abs(score[0] / match_score) > max_score:
max_score = abs(score[0] / match_score) max_score = abs(score[0] / match_score)
max_index = i max_index = i
elapsed_time = time.time() - start_time bim_im = cv2.rectangle(bim_im,rst_params[max_index]["start_point"], rst_params[max_index]["end_point"], 100 * (i + 1), 50)
print(f"Execution time: {elapsed_time:.4f} seconds")
if max_index == -1:
return None
else:
return rst_params[max_index]
# 画出广角相机矩形框
# bim_im = cv2.rectangle(bim_im, rst_p["start_point"], rst_p["end_point"], 100 * (i + 1), 50)
# 画出高清相机矩形框
# bim_im = cv2.rectangle(bim_im, (rst_p["hd_cam_x_bim"], rst_p["hd_cam_y_bim"]), (
# rst_p["hd_cam_x_bim"] + rst_p["hd_cam_w_bim"], rst_p["hd_cam_y_bim"] + rst_p["hd_cam_h_bim"],), (0, 0, 255), 40)
# # 预埋件匹配 # # 预埋件匹配
# for i, param in enumerate(rst_params): # for i, param in enumerate(rst_params):
# score = param["score"] # score = param["score"]
@ -727,10 +576,12 @@ def search(data_bim_json, data_sub_json, wide_cam_img_width, wide_cam_img_height
# print(f"[end_point] ====== [{param["end_point"]}]") # print(f"[end_point] ====== [{param["end_point"]}]")
# #
# bim_im = cv2.rectangle(bim_im, param["start_point"], param["end_point"], 100 * (i + 1), 50) # bim_im = cv2.rectangle(bim_im, param["start_point"], param["end_point"], 100 * (i + 1), 50)
# img_matches = cv2.resize(bim_im, (int(bim_im.shape[1] / 6), int(bim_im.shape[0] / 6))) elapsed_time = time.time() - start_time
print(f"Execution time: {elapsed_time:.4f} seconds")
img_matches = cv2.resize(bim_im, (int(bim_im.shape[1]/6), int(bim_im.shape[0]/6)))
# sub_im = cv2.resize(sub_im, (int(sub_im.shape[1]/10), int(sub_im.shape[0]/10))) # sub_im = cv2.resize(sub_im, (int(sub_im.shape[1]/10), int(sub_im.shape[0]/10)))
# cv2.imshow("2", img_matches) cv2.imshow("2", img_matches)
# cnts的矩形画在sub_im 上 # cnts的矩形画在sub_im 上
# for i in range(len(cnts)): # for i in range(len(cnts)):
# p = cnts[i] # p = cnts[i]
@ -738,51 +589,4 @@ def search(data_bim_json, data_sub_json, wide_cam_img_width, wide_cam_img_height
# # 写编号 # # 写编号
# cv2.putText(sub_im, str(i), (p['x'], p['y']), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) # cv2.putText(sub_im, str(i), (p['x'], p['y']), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
# cv2.imshow("sub_im_after_filter", sub_im) # cv2.imshow("sub_im_after_filter", sub_im)
# cv2.waitKey(0) cv2.waitKey(0)
if __name__ == "__main__":
# ====================== 广角定位 开始 =========================
data_bim_json = read_from_json("data_bim.json") # bim数据
data_sub_json = read_from_json("data_sub/test_1/data_sub.json") # 广角识别之后的ROI数据
wide_cam_img_width = 640
wide_cam_img_height = 480
bim_sub_area = search(data_bim_json, data_sub_json, wide_cam_img_width, wide_cam_img_height)
if bim_sub_area == None:
print("未找到匹配区域")
exit(0)
else:
print("bingo!!!!!!!")
# ====================== 广角定位 结束 =========================
# ====================== 预埋件号码匹配 开始 =========================
bim_im = gen_im_from_params(data_bim_json) # bim图
hd_roi_list = get_hd_roi_from_txt("data_sub/test_1/roi_conners.txt") # 高清摄像头的ROI数据
hd_img_width = 9344
hd_img_height = 7000
bim_im_height = bim_im.shape[0]
bim_rect_hit_list = bim_compare_to_hd_roi(hd_roi_list, hd_img_width, hd_img_height, bim_im_height, bim_sub_area,data_bim_json)
# ===================== 预埋件号码匹配 结束 ===========================
# ========== 下面仅仅是测试画效果图 开始 ==========
# 画出广角相机矩形框
cv2.rectangle(bim_im, bim_sub_area["start_point"], bim_sub_area["end_point"], 100, 50)
# 画出高清相机矩形框
cv2.rectangle(bim_im, (bim_sub_area["hd_cam_x_bim"], bim_sub_area["hd_cam_y_bim"]), (
bim_sub_area["hd_cam_x_bim"] + bim_sub_area["hd_cam_w_bim"], bim_sub_area["hd_cam_y_bim"] + bim_sub_area["hd_cam_h_bim"],), (0, 0, 255), 40)
# 在bim上画出高清识别对应的件号
for bim_rect_hit_item in bim_rect_hit_list:
bim_rect = bim_rect_hit_item["bim_rect"]
hd_roi = bim_rect_hit_item["hd_roi"]
cv2.rectangle(bim_im, (hd_roi[0][0], hd_roi[0][1]), (hd_roi[2][0], hd_roi[2][1]), (0, 0, 255), 30)
# 写上i编号
cv2.putText(bim_im, str(bim_rect["code"]), (hd_roi[0][0]+100, hd_roi[0][1]+200), cv2.FONT_HERSHEY_SIMPLEX, 5, (0, 0, 255),
20)
bim_im_resize = cv2.resize(bim_im, (int(bim_im.shape[1] / 6), int(bim_im.shape[0] / 6)))
cv2.imshow("bim_im_resize", bim_im_resize)
cv2.waitKey(0)
# ========== 下面仅仅是测试画效果图 结束 ==========

18
test.py
View File

@ -1,18 +0,0 @@
import cv2
from utils import get_hd_cam_rect
def _darw_rect(im):
x, y, w, h = get_hd_cam_rect(0)
print(w)
print(h)
cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 5)
sub_im = cv2.imread("data_sub/test_1/wide_image.png")
_darw_rect(sub_im)
cv2.imshow("sub_zero", sub_im)
cv2.waitKey(0)

148
utils.py
View File

@ -3,39 +3,39 @@ import logging
import cv2 import cv2
def re_cal_point(point, offset):
"""
根据裁剪之后的图片,每个点的坐标需要重新计算,以新的图片的宽高作为坐标系
"""
"""
根据裁剪之后的图片,每个点的坐标需要重新计算,以新的图片的宽高作为坐标系
"""
def re_cal_point(point,offset):
# 相当于所有的x坐标向左平移了offset个距离 # 相当于所有的x坐标向左平移了offset个距离
point['x'] = point['x'] - offset point['x'] = point['x'] - offset
def filter_rectangle(image_width,image_height, points, wide_cam_left_cut_rate, wide_cam_right_cut_rate): """
""" 过滤矩形
根据左右裁剪之后的图像过滤矩形在裁剪之后整个矩形已经不在图片里面的就去掉 1 高度过大的不要
1 高度过大的不要 2 整个矩形全部身体都在裁剪区域之外的不要
2 整个矩形全部身体都在裁剪区域之外的不要
返回值:
image_width:原始广角图片的宽度 1 过滤之后的矩形
image_height:原始广角图片的高度 2 裁剪之后的图片
points要过滤的点 """
wide_cam_left_cut_rate# 左边界的裁剪比例,从左边开始裁剪百分之多少 def filter_rectangle(image_path, points):
wide_cam_right_cut_rate # 右边界的裁剪比例,从右边开始裁剪百分之多少
返回值:
filtered_points过滤之后的全部都矩形坐标
wide_cam_img_width_after_cut裁剪之后的图片的宽度
"""
# 高度过大矩形过滤参数 # 高度过大矩形过滤参数
max_height_rate = 0.5 # 矩形高度占整个画面高度的最大比例,如果超过该比例,则认为是无效矩形 max_height_rate = 0.5 # 矩形高度占整个画面高度的最大比例,如果超过该比例,则认为是无效矩形
image_x_min = int(image_width * wide_cam_left_cut_rate) # 左边界的裁剪点 # 裁剪参数
image_x_max = int(image_width * (1 - wide_cam_right_cut_rate)) # 右边界的裁剪点 left_x_cut_rate=0.1 # 左边界的裁剪比例,从左边开始裁剪百分之多少
right_x_cut_rate=0.1# 右边界的裁剪比例,从右边开始裁剪百分之多少
image = cv2.imread(image_path)
image_height = image.shape[0] # 获取图片高度
image_width = image.shape[1] # 获取图片宽度
image_x_min = int(image_width * left_x_cut_rate) # 左边界的裁剪点
image_x_max = int(image_width * (1 - right_x_cut_rate)) # 右边界的裁剪点
# 开始过滤矩形 #开始过滤矩形
bad_point_index = [] bad_point_index = []
print(f'开始过滤矩形,原有矩形数为{len(points)}') print(f'开始过滤矩形,原有矩形数为{len(points)}')
for index in range(len(points)): for index in range(len(points)):
@ -47,8 +47,8 @@ def filter_rectangle(image_width,image_height, points, wide_cam_left_cut_rate, w
continue continue
# x坐标范围过滤,整个矩形全部身体都在裁剪区域之外的不要 # x坐标范围过滤,整个矩形全部身体都在裁剪区域之外的不要
x_min = point['x'] # 矩形四个矩形坐标中x的最小值 x_min = point['x'] # 矩形四个矩形坐标中x的最小值
x_max = point['x'] + point['width'] # 矩形四个矩形坐标中x的最大值 x_max = point['x'] + point['width'] # 矩形四个矩形坐标中x的最大值
# 如果矩形x的 最大值 小于 左边界,去除这个矩形 # 如果矩形x的 最大值 小于 左边界,去除这个矩形
if x_max < image_x_min: if x_max < image_x_min:
bad_point_index.append(index) bad_point_index.append(index)
@ -64,14 +64,14 @@ def filter_rectangle(image_width,image_height, points, wide_cam_left_cut_rate, w
# 如果当前矩形的索引在bad_point_index中则去除这个矩形 # 如果当前矩形的索引在bad_point_index中则去除这个矩形
if i not in bad_point_index: if i not in bad_point_index:
# 重新计算点的坐标 # 重新计算点的坐标
re_cal_point(point, image_x_min) re_cal_point(point,image_x_min)
# 塞入结果 # 塞入结果
filtered_points.append(point) filtered_points.append(point)
print(f'过滤矩形结束,过滤之后的矩形数为{len(filtered_points)}') print(f'过滤矩形结束,过滤之后的矩形数为{len(filtered_points)}')
# 图片裁剪 # 图片裁剪
# 裁剪图片 (height方向不变宽度方向裁剪) # 裁剪图片 (height方向不变宽度方向裁剪)
# cropped_image = image[:, image_x_min:image_x_max] cropped_image = image[:, image_x_min:image_x_max]
# 展示 # 展示
# cv2.imshow("cropped_image", cropped_image) # cv2.imshow("cropped_image", cropped_image)
# cv2.imshow("image", image) # cv2.imshow("image", image)
@ -82,100 +82,14 @@ def filter_rectangle(image_width,image_height, points, wide_cam_left_cut_rate, w
# cv2.putText(cropped_image, str(i), (p['x'], p['y']), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) # cv2.putText(cropped_image, str(i), (p['x'], p['y']), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
# cv2.imshow("cropped_image_draw", cropped_image) # cv2.imshow("cropped_image_draw", cropped_image)
# cv2.waitKey(0) # cv2.waitKey(0)
wide_cam_img_width_after_cut = image_x_max - image_x_min return filtered_points, cropped_image
return filtered_points, wide_cam_img_width_after_cut
def get_hd_cam_rect(wide_cam_left_cut_rate): # 测试代码
"""
获取高清相机的视场矩形在广角相机里面的坐标cv2下的图片坐标系以左上角为坐标原点
wide_cam_left_cut_rate# 广角相机,左边界的裁剪比例,从左边开始裁剪百分之多少
"""
# 下面参数是几乎标准无偏差的高清相机在广角相机里面的视场角矩形 广角为640x480
x = 128
y = 140
w = 312
h = 234
# 按照k比例放大,因为高清在bim图上面定位的区域不一定是准确的可能比广角的原宽度要小。 所以适当放大高清相机的矩形框,
# 广角左边右边,裁剪之前,放大之后的矩形框
k = 0
width_scale_pixel = k * w
height_scale_pixel = k * h
scale_x = x - width_scale_pixel
scale_y = y - height_scale_pixel
scale_w = w + 2 * width_scale_pixel
scale_h = h + 2 * height_scale_pixel
# 广角裁剪之后,高清矩形在新的广角图片里面的坐标。
original_wide_cam_image_width = 640 # 原本广角图片的宽度
cut_image_x_min = int(original_wide_cam_image_width * wide_cam_left_cut_rate) # 左边界的裁剪点
scale_x = scale_x - cut_image_x_min # 因为只是左右裁剪只影响左上角坐标的x值
return int(scale_x), int(scale_y), int(scale_w), int(scale_h)
def get_hd_roi_from_txt(file_path):
"""
读取本地文件解析ROI矩形坐标
每八行为一个ROI矩形坐标每两行为xy顺序为左上右上右下左下顺时针
Args:
file_path (str): 文件路径
Returns:
list:数组格式第一层是全部都矩形第二次是每个矩形的全部的坐标第三层是每个坐标都xy
[
[[x1, y1], [x2, y2], [x3, y3], [x4, y4]],
[[x1, y1], [x2, y2], [x3, y3], [x4, y4]],
]
"""
roi_list = []
try:
with open(file_path, 'r') as f:
lines = f.readlines()
# 确保行数是8的倍数
if len(lines) % 8 != 0:
raise ValueError("文件格式错误行数不是8的倍数")
for i in range(0, len(lines), 8):
roi = []
for j in range(0, 8, 2):
x = float(lines[i + j].strip())
y = float(lines[i + j + 1].strip())
roi.append([x, y])
roi_list.append(roi)
except FileNotFoundError:
print(f"文件未找到: {file_path}")
except ValueError as e:
print(f"数据格式错误: {e}")
except Exception as e:
print(f"发生未知错误: {e}")
return roi_list
# 测试 get_hd_rou_from_txt
# rois = get_hd_roi_from_txt("data_sub/test_1/roi_conners.txt")
# print(rois[0])
# sub_im = cv2.imread("data_sub/test_1/output.jpg")
# for roi in rois:
# cv2.rectangle(sub_im, (int(roi[0][0]), int(roi[0][1])), (int(roi[2][0]), int(roi[2][1])), (0, 0, 255), 100)
# # 图片缩小展示
# sub_im = cv2.resize(sub_im, (int(sub_im.shape[1]/12), int(sub_im.shape[0]/12)))
# cv2.imshow("sub_im", sub_im)
# cv2.waitKey(0)
# 测试filter_rectangle
# def read_from_json(file_path): # def read_from_json(file_path):
# with open(file_path, 'r') as f: # with open(file_path, 'r') as f:
# loaded_array = json.load(f) # loaded_array = json.load(f)
# return loaded_array # return loaded_array
# cnts = read_from_json("data_sub/test_1/data_sub.json") # cnts = read_from_json("data_sub/test_1/data_sub.json")
# filter_rectangle("data_sub/test_1/wide_image.png",cnts) # filter_rectangle("data_sub/test_1/wide_image.png",cnts)