基于Opencv和行空板完成大疆EP自瞄-装甲板检测
本帖最后由 Cm4iHKyN3ptj 于 2022-9-7 16:43 编辑去年年底就关注到DF的行空板,从接口和处理器以及价钱来看是一款不错的硬件,因为自己手上也有比较多的DF传感器,所以一直想入手,在漂流活动中,成功的获取到了行空板,自己又在做自瞄,就想尝试一下行空板,刚刚行空板也有货了,所以就果断入手了几块,为了完成任务,就写了这个帖子,有什么不对的地方,请大家多多指教。
整个程序主要分为5部分,分别为图像获取——图像处理——轮廓检测——数据下发——EP控制,这一篇文章先完成前面四部分。
用的硬件主要有DF行空板,Microsoft摄像头,EP机器人,主要用到的语言为python
https://mc.dfrobot.com.cn/forum.php?mod=image&aid=146634&size=300x300&key=47db6c6e13bb4fd8&nocache=yes&type=fixnonehttps://mc.dfrobot.com.cn/forum.php?mod=image&aid=146635&size=300x300&key=29c8fde655d943d9&nocache=yes&type=fixnonehttps://mc.dfrobot.com.cn/forum.php?mod=image&aid=146636&size=300x300&key=ebb786a91e8b520c&nocache=yes&type=fixnone
1.图像获取
因为是才用摄像头和opencv,所以图像获取是采用以下程序。
cap = cv2.VideoCapture(VIDEO_INDEX) # 打开摄像头
frame_h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # 480图像的高
frame_w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) # 640图像的宽
gun_point = (int(frame_w/2),int(frame_h/2) + GUN_ERROR)#图像的中心2.图像处理
图像处理主要的内容为摄像头的畸变矫正,以及颜色过滤。
areas = []
ret, frame = cap.read()#视频信息处理
newcameramtx, roi=cv2.getOptimalNewCameraMatrix(left_camera_matrix,left_distortion,(frame_w,frame_h),1,(frame_w,frame_h))#<span style="color: rgb(77, 77, 77); font-family: -apple-system, "SF UI Text", Arial, "PingFang SC", "Hiragino Sans GB", "Microsoft YaHei", "WenQuanYi Micro Hei", sans-serif; font-size: 16px; font-variant-ligatures: no-common-ligatures; background-color: rgb(255, 255, 255);">去</span>除畸<span style="color: rgb(77, 77, 77); font-family: -apple-system, "SF UI Text", Arial, "PingFang SC", "Hiragino Sans GB", "Microsoft YaHei", "WenQuanYi Micro Hei", sans-serif; font-size: 16px; font-variant-ligatures: no-common-ligatures; background-color: rgb(255, 255, 255);">变矫正后图像四周黑色的区域</span>
frame = cv2.undistort(frame, left_camera_matrix, left_distortion, None, newcameramtx)#<span style="background-color: rgb(255, 255, 255); color: rgb(79, 79, 79); font-family: "PingFang SC", "Microsoft YaHei", SimHei, Arial, SimSun; font-size: 18px; font-variant-ligatures: no-common-ligatures;">像素坐标去畸变</span>
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)#灰度处理
blur = cv2.GaussianBlur(gray, (3,3), 0)#高斯滤波,降噪
ret,imged = cv2.threshold(blur,HIGHTLIGHT_THR,255, cv2.THRESH_BINARY)#图像阈值处理
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (8, 10)) # (5,7)
highlight_img = cv2.dilate(imged, kernel)#图像膨胀COLOR_TYPE=0
if COLOR_TYPE == 0: # 红方
sub_rg = cv2.subtract(r, g)
sub_rb = cv2.subtract(r, b)
ret,res_rg = cv2.threshold(sub_rg,COLOR_THR,255, cv2.THRESH_BINARY)
ret,res_rb = cv2.threshold(sub_rb,COLOR_THR,255, cv2.THRESH_BINARY)
final = cv2.bitwise_and(res_rg,res_rb)
else: # 蓝方
sub_bg = cv2.subtract(b, g)
sub_br = cv2.subtract(b, r)
ret,res_bg = cv2.threshold(sub_bg,COLOR_THR,255, cv2.THRESH_BINARY)
ret,res_br = cv2.threshold(sub_br,COLOR_THR,255, cv2.THRESH_BINARY)
final = cv2.bitwise_and(res_bg,res_br)#主要完成红蓝颜色的过滤
kernelx = cv2.getStructuringElement(cv2.MORPH_RECT, (8, 8))
color_img = cv2.dilate(final, kernelx)
kernele = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))
preprocessed = cv2.bitwise_and(color_img,highlight_img)
preprocessed = cv2.dilate(preprocessed, kernele)
3.轮廓检测
这个部分主要对过滤完的图片信息进行轮廓提取并进行大小区域判断,画出装甲板的框。
contours, hierarchy = cv2.findContours(preprocessed, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)#查找轮廓
cv2.drawContours(frame,contours,-1,(255,0,0),2)#<font color="#2a2b2e" face="PingFang SC, Segoe UI, Arial, Microsoft YaHei, 微软雅黑, 宋体, Malgun Gothic, sans-serif"><span style="font-size: 12px; background-color: rgb(255, 255, 255);">绘制轮廓</span></font>
for contour in contours:#遍历检测出来的轮廓
areas.append(cv2.contourArea(contour))
try:
max_id = areas.index(max(areas))
except:
continue
x, y, w, h = cv2.boundingRect(contours)#轮廓面积判断,根据装甲板的大小
if ( w * h > 1000 ) :
thetaX = degrees(atan2(x-left_camera_matrix,left_camera_matrix))
thetaY = degrees(atan2(y-left_camera_matrix,left_camera_matrix))
dist = distance_to_camera(w)
yield(thetaX,thetaY,dist)
# 绘制
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,255), 2)#绘制轮廓
cv2.circle(frame,gun_point,5,(0,0,255),-1)
cv2.imshow("orgin_pic", frame) 4.数据下发
主要的下发的内容是检测出的装甲板的中心点和距离机器人视角的距离,以及距离装甲板的距离
for (yaw,pitch,dist) in aim():
yaw2=yaw
pitch=pitch-2
pitch2=pitch
if ( abs(yaw) < 1.5 ) :
yaw = 0
if ( abs(pitch) < 1.5 ):
pitch = 0
if abs(yaw2) and abs(pitch2)<2.5:
fire = 1
else:
fire = 0
print("%d,%.2f,%.2f,%.2f,%d" % (COLOR_TYPE,yaw,pitch,dist,fire))5.实现效果
https://www.bilibili.com/video/BV1ye4y1h7SG/?vd_so
6。完整代码
import cv2
#import serial
from math import *
from user_config import *
#import RPi.GPIO as GPIO
# 距离计算函数
def distance_to_camera(perWidth):
return (ARMOUR_WIDTH * FOCAL_LENS) / perWidth
def aim():
global DEBUG_MODE,COLOR_TYPE,HIGHTLIGHT_THR,COLOR_THR
cap = cv2.VideoCapture(VIDEO_INDEX) # 打开摄像头
frame_h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # 480
frame_w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) # 640
gun_point = (int(frame_w/2),int(frame_h/2) + GUN_ERROR)
if DEBUG_MODE == True:
def get_pixel(event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDBLCLK:
print(x,y)
cv2.setMouseCallback('orgin_pic',get_pixel)
def nothing():
pass
cv2.namedWindow("win_control")
cv2.createTrackbar('hl_thr','win_control',0,255,nothing)
cv2.createTrackbar('cl_thr','win_control',0,255,nothing)
cv2.setTrackbarPos('hl_thr','win_control',HIGHTLIGHT_THR)
cv2.setTrackbarPos('cl_thr','win_control',COLOR_THR)
while True:
areas = []
ret, frame = cap.read()
newcameramtx, roi=cv2.getOptimalNewCameraMatrix(left_camera_matrix,left_distortion,(frame_w,frame_h),1,(frame_w,frame_h))
if DEBUG_MODE == True:
COLOR_THR = cv2.getTrackbarPos('cl_thr','win_control')
HIGHTLIGHT_THR = cv2.getTrackbarPos('hl_thr','win_control')
# matRotate = cv2.getRotationMatrix2D((frame_w*0.5, frame_h*0.5), 180, 1.0)
# frame = cv2.warpAffine(frame, matRotate, (frame_w,frame_h))
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (3,3), 0)
ret,imged = cv2.threshold(blur,HIGHTLIGHT_THR,255, cv2.THRESH_BINARY)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (8, 10)) # (5,7)
highlight_img = cv2.dilate(imged, kernel)
(b,g,r) = cv2.split(frame)
#COLOR_TYPE = GPIO.input(DETECT_GPIO_PIN)#设置引脚模式:
COLOR_TYPE=0
if COLOR_TYPE == 0: # 红方
sub_rg = cv2.subtract(r, g)
sub_rb = cv2.subtract(r, b)
ret,res_rg = cv2.threshold(sub_rg,COLOR_THR,255, cv2.THRESH_BINARY)
ret,res_rb = cv2.threshold(sub_rb,COLOR_THR,255, cv2.THRESH_BINARY)
final = cv2.bitwise_and(res_rg,res_rb)
else: # 蓝方
sub_bg = cv2.subtract(b, g)
sub_br = cv2.subtract(b, r)
ret,res_bg = cv2.threshold(sub_bg,COLOR_THR,255, cv2.THRESH_BINARY)
ret,res_br = cv2.threshold(sub_br,COLOR_THR,255, cv2.THRESH_BINARY)
final = cv2.bitwise_and(res_bg,res_br)
kernelx = cv2.getStructuringElement(cv2.MORPH_RECT, (8, 8))
color_img = cv2.dilate(final, kernelx)
kernele = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))
preprocessed = cv2.bitwise_and(color_img,highlight_img)
preprocessed = cv2.dilate(preprocessed, kernele)
contours, hierarchy = cv2.findContours(preprocessed, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(frame,contours,-1,(255,0,0),2)
for contour in contours:
areas.append(cv2.contourArea(contour))
try:
max_id = areas.index(max(areas))
except:
continue
x, y, w, h = cv2.boundingRect(contours)
if ( w * h > 1000 ) :
thetaX = degrees(atan2(x-left_camera_matrix,left_camera_matrix))
thetaY = degrees(atan2(y-left_camera_matrix,left_camera_matrix))
dist = distance_to_camera(w)
yield(thetaX,thetaY,dist)
# 绘制
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,255), 2)
cv2.circle(frame,gun_point,5,(0,0,255),-1)
cv2.imshow("orgin_pic", frame)
if DEBUG_MODE == True:
cv2.imshow("hl_pic",highlight_img)
cv2.imshow("cl_pic",color_img)
cv2.imshow("final_pic", preprocessed)
cv2.waitKey(1)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if __name__ == "__main__":
#ser = serial.Serial(PORT_NAME,BAUD_RATE,timeout=50)
fire = 0
#GPIO.setmode(GPIO.BOARD)#设置引脚模式:
#GPIO.setup(DETECT_GPIO_PIN, GPIO.IN)#设置引脚为输入:
for (yaw,pitch,dist) in aim():
yaw2=yaw
pitch=pitch-2
pitch2=pitch
if ( abs(yaw) < 1.5 ) :
yaw = 0
if ( abs(pitch) < 1.5 ):
pitch = 0
if abs(yaw2) and abs(pitch2)<2.5:
fire = 1
else:
fire = 0
print("%d,%.2f,%.2f,%.2f,%d" % (COLOR_TYPE,yaw,pitch,dist,fire))import cv2
#import serial
from math import *
from user_config import *
#import RPi.GPIO as GPIO
# 距离计算函数
def distance_to_camera(perWidth):
return (ARMOUR_WIDTH * FOCAL_LENS) / perWidth
def aim():
global DEBUG_MODE,COLOR_TYPE,HIGHTLIGHT_THR,COLOR_THR
cap = cv2.VideoCapture(VIDEO_INDEX) # 打开摄像头
frame_h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # 480
frame_w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) # 640
gun_point = (int(frame_w/2),int(frame_h/2) + GUN_ERROR)
if DEBUG_MODE == True:
def get_pixel(event,x,y,flags,param):
if event == cv2.EVENT_LBUTTONDBLCLK:
print(x,y)
cv2.setMouseCallback('orgin_pic',get_pixel)
def nothing():
pass
cv2.namedWindow("win_control")
cv2.createTrackbar('hl_thr','win_control',0,255,nothing)
cv2.createTrackbar('cl_thr','win_control',0,255,nothing)
cv2.setTrackbarPos('hl_thr','win_control',HIGHTLIGHT_THR)
cv2.setTrackbarPos('cl_thr','win_control',COLOR_THR)
while True:
areas = []
ret, frame = cap.read()
newcameramtx, roi=cv2.getOptimalNewCameraMatrix(left_camera_matrix,left_distortion,(frame_w,frame_h),1,(frame_w,frame_h))
if DEBUG_MODE == True:
COLOR_THR = cv2.getTrackbarPos('cl_thr','win_control')
HIGHTLIGHT_THR = cv2.getTrackbarPos('hl_thr','win_control')
# matRotate = cv2.getRotationMatrix2D((frame_w*0.5, frame_h*0.5), 180, 1.0)
# frame = cv2.warpAffine(frame, matRotate, (frame_w,frame_h))
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (3,3), 0)
ret,imged = cv2.threshold(blur,HIGHTLIGHT_THR,255, cv2.THRESH_BINARY)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (8, 10)) # (5,7)
highlight_img = cv2.dilate(imged, kernel)
(b,g,r) = cv2.split(frame)
#COLOR_TYPE = GPIO.input(DETECT_GPIO_PIN)#设置引脚模式:
COLOR_TYPE=0
if COLOR_TYPE == 0: # 红方
sub_rg = cv2.subtract(r, g)
sub_rb = cv2.subtract(r, b)
ret,res_rg = cv2.threshold(sub_rg,COLOR_THR,255, cv2.THRESH_BINARY)
ret,res_rb = cv2.threshold(sub_rb,COLOR_THR,255, cv2.THRESH_BINARY)
final = cv2.bitwise_and(res_rg,res_rb)
else: # 蓝方
sub_bg = cv2.subtract(b, g)
sub_br = cv2.subtract(b, r)
ret,res_bg = cv2.threshold(sub_bg,COLOR_THR,255, cv2.THRESH_BINARY)
ret,res_br = cv2.threshold(sub_br,COLOR_THR,255, cv2.THRESH_BINARY)
final = cv2.bitwise_and(res_bg,res_br)
kernelx = cv2.getStructuringElement(cv2.MORPH_RECT, (8, 8))
color_img = cv2.dilate(final, kernelx)
kernele = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))
preprocessed = cv2.bitwise_and(color_img,highlight_img)
preprocessed = cv2.dilate(preprocessed, kernele)
contours, hierarchy = cv2.findContours(preprocessed, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(frame,contours,-1,(255,0,0),2)
for contour in contours:
areas.append(cv2.contourArea(contour))
try:
max_id = areas.index(max(areas))
except:
continue
x, y, w, h = cv2.boundingRect(contours)
if ( w * h > 1000 ) :
thetaX = degrees(atan2(x-left_camera_matrix,left_camera_matrix))
thetaY = degrees(atan2(y-left_camera_matrix,left_camera_matrix))
dist = distance_to_camera(w)
yield(thetaX,thetaY,dist)
# 绘制
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,255), 2)
cv2.circle(frame,gun_point,5,(0,0,255),-1)
cv2.imshow("orgin_pic", frame)
if DEBUG_MODE == True:
cv2.imshow("hl_pic",highlight_img)
cv2.imshow("cl_pic",color_img)
cv2.imshow("final_pic", preprocessed)
cv2.waitKey(1)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if __name__ == "__main__":
#ser = serial.Serial(PORT_NAME,BAUD_RATE,timeout=50)
fire = 0
#GPIO.setmode(GPIO.BOARD)#设置引脚模式:
#GPIO.setup(DETECT_GPIO_PIN, GPIO.IN)#设置引脚为输入:
for (yaw,pitch,dist) in aim():
yaw2=yaw
pitch=pitch-2
pitch2=pitch
if ( abs(yaw) < 1.5 ) :
yaw = 0
if ( abs(pitch) < 1.5 ):
pitch = 0
if abs(yaw2) and abs(pitch2)<2.5:
fire = 1
else:
fire = 0
print("%d,%.2f,%.2f,%.2f,%d" % (COLOR_TYPE,yaw,pitch,dist,fire))import numpy as np
PORT_NAME = "COM8" # 串口名称
BAUD_RATE = 115200
VIDEO_INDEX = 0 # 摄像头索引
DEBUG_MODE = False
COLOR_TYPE = 0 # 灯条颜色参数,0为红,1为蓝
HIGHTLIGHT_THR = 190 # 高光通道阈值
COLOR_THR = 80 # 颜色通道饱和度阈值
ARMOUR_WIDTH =55 # 装甲板宽度,mm
ARMOUR_HEIGHT = 35 # 装甲板高度,mm
RATE = ARMOUR_WIDTH / ARMOUR_HEIGHT # 长宽比
ERROR = 0.1 # 识别到的长宽比误差
RATE_RANGE = ( RATE - ERROR , RATE + ERROR ) # 误差范围
FOCAL_LENS = 618.34872 # 焦距,mm
GUN_ERROR = -60 # 枪口pitch轴误差
DETECT_GPIO_PIN = 23 # 检测的GPIO引脚
#内参矩阵
left_camera_matrix = np.array([,
,
])
# 畸变系数
left_distortion = np.array([[-0.39063 , 0.18552 , 0.00857 , -0.00777, 0.00000]])
EP不需要使用额外的摄像头,通过wifi连接后可以通过TCP获取视频流数据。
还有EP的车轮螺丝是红色的,你这个不支持二次开发的S1不是EP,至于为什么带拓展基板我就不知道了,或许你还藏着另一台EP或者厂家给你发了黑色螺丝?
页:
[1]