131 lines
5.2 KiB
Python
131 lines
5.2 KiB
Python
import json
|
||
import logging
|
||
|
||
import cv2
|
||
|
||
"""
|
||
根据裁剪之后的图片,每个点的坐标需要重新计算,以新的图片的宽高作为坐标系
|
||
"""
|
||
|
||
|
||
def re_cal_point(point, offset):
|
||
# 相当于所有的x坐标向左平移了offset个距离
|
||
point['x'] = point['x'] - offset
|
||
|
||
|
||
"""
|
||
根据左右裁剪之后的图像,过滤矩形。在裁剪之后,整个矩形已经不在图片里面的,就去掉。
|
||
1 高度过大的不要
|
||
2 整个矩形全部身体都在裁剪区域之外的不要
|
||
|
||
image_path:图片路径
|
||
points:要过滤的点
|
||
wide_cam_left_cut_rate:# 左边界的裁剪比例,从左边开始裁剪百分之多少
|
||
wide_cam_right_cut_rate # 右边界的裁剪比例,从右边开始裁剪百分之多少
|
||
|
||
返回值:
|
||
1 过滤之后的矩形
|
||
2 裁剪之后的图片
|
||
"""
|
||
|
||
|
||
def filter_rectangle(image_path, points, wide_cam_left_cut_rate, wide_cam_right_cut_rate):
|
||
# 高度过大矩形过滤参数
|
||
max_height_rate = 0.5 # 矩形高度占整个画面高度的最大比例,如果超过该比例,则认为是无效矩形
|
||
|
||
image = cv2.imread(image_path)
|
||
image_height = image.shape[0] # 获取图片高度
|
||
image_width = image.shape[1] # 获取图片宽度
|
||
image_x_min = int(image_width * wide_cam_left_cut_rate) # 左边界的裁剪点
|
||
image_x_max = int(image_width * (1 - wide_cam_right_cut_rate)) # 右边界的裁剪点
|
||
|
||
# 开始过滤矩形
|
||
bad_point_index = []
|
||
print(f'开始过滤矩形,原有矩形数为{len(points)}')
|
||
for index in range(len(points)):
|
||
point = points[index]
|
||
|
||
# 高度过大过滤
|
||
if point['height'] > image_height * max_height_rate:
|
||
bad_point_index.append(index)
|
||
continue
|
||
|
||
# x坐标范围过滤,整个矩形全部身体都在裁剪区域之外的不要
|
||
x_min = point['x'] # 矩形四个矩形坐标中x的最小值
|
||
x_max = point['x'] + point['width'] # 矩形四个矩形坐标中x的最大值
|
||
# 如果矩形x的 最大值 小于 左边界,去除这个矩形
|
||
if x_max < image_x_min:
|
||
bad_point_index.append(index)
|
||
continue
|
||
# 如果矩形x的 最小值 大于 右边界,去除这个矩形
|
||
if x_min > image_x_max:
|
||
bad_point_index.append(index)
|
||
continue
|
||
|
||
# 过滤,只保留有效矩形
|
||
filtered_points = []
|
||
for i, point in enumerate(points):
|
||
# 如果当前矩形的索引在bad_point_index中,则去除这个矩形
|
||
if i not in bad_point_index:
|
||
# 重新计算点的坐标
|
||
re_cal_point(point, image_x_min)
|
||
# 塞入结果
|
||
filtered_points.append(point)
|
||
print(f'过滤矩形结束,过滤之后的矩形数为{len(filtered_points)}')
|
||
|
||
# 图片裁剪
|
||
# 裁剪图片 (height方向不变,宽度方向裁剪)
|
||
cropped_image = image[:, image_x_min:image_x_max]
|
||
# 展示
|
||
# cv2.imshow("cropped_image", cropped_image)
|
||
# cv2.imshow("image", image)
|
||
# for i in range(len(filtered_points)):
|
||
# p = filtered_points[i]
|
||
# cv2.rectangle(cropped_image, (p['x'], p['y']), (p['x'] + p['width'], p['y'] + p['height']), (0, 0, 255), 2)
|
||
# # 写编号
|
||
# cv2.putText(cropped_image, str(i), (p['x'], p['y']), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
|
||
# cv2.imshow("cropped_image_draw", cropped_image)
|
||
# cv2.waitKey(0)
|
||
return filtered_points, cropped_image
|
||
|
||
|
||
"""
|
||
获取高清相机的视场矩形在广角相机里面的坐标。cv2下的图片坐标系,以左上角为坐标原点
|
||
|
||
wide_cam_left_cut_rate:# 广角相机,左边界的裁剪比例,从左边开始裁剪百分之多少
|
||
wide_cam_right_cut_rate # 广角相机,右边界的裁剪比例,从右边开始裁剪百分之多少
|
||
"""
|
||
|
||
|
||
def get_hd_cam_rect(wide_cam_left_cut_rate):
|
||
# 下面参数是几乎标准无偏差的高清相机在广角相机里面的视场角矩形 广角为640x480
|
||
x = 128
|
||
y = 140
|
||
w = 312
|
||
h = 234
|
||
|
||
# 按照k比例放大,因为高清在bim图上面定位的区域不一定是准确的,可能比广角的原宽度要小。 所以适当放大高清相机的矩形框,
|
||
# 广角左边右边,裁剪之前,放大之后的矩形框
|
||
k = 0
|
||
width_scale_pixel = k * w
|
||
height_scale_pixel = k * h
|
||
scale_x = x - width_scale_pixel
|
||
scale_y = y - height_scale_pixel
|
||
scale_w = w + 2 * width_scale_pixel
|
||
scale_h = h + 2 * height_scale_pixel
|
||
|
||
# 广角裁剪之后,高清矩形在新的广角图片里面的坐标。
|
||
original_wide_cam_image_width = 640 # 原本广角图片的宽度
|
||
cut_image_x_min = int(original_wide_cam_image_width * wide_cam_left_cut_rate) # 左边界的裁剪点
|
||
scale_x = scale_x - cut_image_x_min # 因为只是左右裁剪,只影响左上角坐标的x值
|
||
|
||
return int(scale_x), int(scale_y), int(scale_w), int(scale_h)
|
||
|
||
# 测试代码
|
||
# def read_from_json(file_path):
|
||
# with open(file_path, 'r') as f:
|
||
# loaded_array = json.load(f)
|
||
# return loaded_array
|
||
# cnts = read_from_json("data_sub/test_1/data_sub.json")
|
||
# filter_rectangle("data_sub/test_1/wide_image.png",cnts)
|