运行环境:python3.8, numpy=1.19.5,opencv-python=4.5.1
注意:以下内容只提供了代码与运行结果,具体原理与细节见https://zhuanlan.zhihu.com/p/54866418
代码文件:见文章尾部
效果展示:

一、文章目录

1、相机标定

2、视频畸形修正

3、透视变换

4、提取车道线

5、矩形滑窗

6、跟踪车道线

7、求取曲率与偏移量

8、逆投影到原图

9、视频车道线检测

二、实操

注意:后一节代码是在上一节代码的基础上进行修改(排版比较繁琐,可以选择章节进行运行)

1、相机标定

import cv2
import glob
import numpy as np# Step 1 读入图片、预处理图片、检测交点、标定相机的一系列操作
def getCameraCalibrationCoefficients(chessboardname, nx, ny):# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)objp = np.zeros((ny * nx, 3), np.float32)objp[:, :2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2)# Arrays to store object points and image points from all the images.objpoints = []  # 3d points in real world spaceimgpoints = []  # 2d points in image plane.images = glob.glob(chessboardname)if len(images) > 0:print("images num for calibration : ", len(images))else:print("No image for calibration.")returnret_count = 0for idx, fname in enumerate(images):img = cv2.imread(fname)gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)img_size = (img.shape[1], img.shape[0])# Finde the chessboard cornersret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)# If found, add object points, image pointsif ret == True:ret_count += 1objpoints.append(objp)imgpoints.append(corners)ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)print('Do calibration successfully')return ret, mtx, dist, rvecs, tvecs# 传入计算得到的畸变参数,即可将畸变的图像进行畸变修正处理
def undistortImage(distortImage, mtx, dist):return cv2.undistort(distortImage, mtx, dist, None, mtx)if __name__ == "__main__":nx = 9ny = 6# Step 1 获取畸变参数rets, mtx, dist, rvecs, tvecs = getCameraCalibrationCoefficients('camera_cal/calibration*.jpg', nx, ny)# Read distorted chessboard image,测试test_distort_image = cv2.imread('./camera_cal/calibration4.jpg')# Do undistortiontest_undistort_image = undistortImage(test_distort_image, mtx, dist)# 显示cv2.imshow('img_0', test_distort_image)cv2.imshow('img', test_undistort_image)cv2.waitKey(0)cv2.destroyAllWindows()

2、视频畸形修正

import cv2
import glob
import numpy as np# Step 1 读入图片、预处理图片、检测交点、标定相机的一系列操作
def getCameraCalibrationCoefficients(chessboardname, nx, ny):# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)objp = np.zeros((ny * nx, 3), np.float32)objp[:, :2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2)# Arrays to store object points and image points from all the images.objpoints = []  # 3d points in real world spaceimgpoints = []  # 2d points in image plane.images = glob.glob(chessboardname)if len(images) > 0:print("images num for calibration : ", len(images))else:print("No image for calibration.")returnret_count = 0for idx, fname in enumerate(images):img = cv2.imread(fname)gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)img_size = (img.shape[1], img.shape[0])# Finde the chessboard cornersret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)# If found, add object points, image pointsif ret == True:ret_count += 1objpoints.append(objp)imgpoints.append(corners)ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)print('Do calibration successfully')return ret, mtx, dist, rvecs, tvecs# Step 2 传入计算得到的畸变参数,即可将畸变的图像进行畸变修正处理
def undistortImage(distortImage, mtx, dist):return cv2.undistort(distortImage, mtx, dist, None, mtx)if __name__ == "__main__":nx = 9ny = 6# Step 1获取畸变参数rets, mtx, dist, rvecs, tvecs = getCameraCalibrationCoefficients('camera_cal/calibration*.jpg', nx, ny)# 视频输入video_input = 'challenge.mp4'cap = cv2.VideoCapture(video_input)count = 1while True:ret, image = cap.read()if ret:undistort_image = undistortImage(image, mtx, dist)cv2.imwrite('original_image/' + str(count) + '.jpg', undistort_image)count += 1else:breakcap.release()

未修正:

修正后:

3、透视变换
“透视”是图像成像时,物体距离摄像机越远,看起来越小的一种现象。在真实世界中,左右互相平行的车道线,会在图像的最远处交汇成一个点。这个现象就是“透视成像”的原理造成的。
目的:矫正为真实世界的形状
效果:投影成鸟瞰图

import cv2
import glob
import numpy as np# Step 1 读入图片、预处理图片、检测交点、标定相机的一系列操作
def getCameraCalibrationCoefficients(chessboardname, nx, ny):# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)objp = np.zeros((ny * nx, 3), np.float32)objp[:, :2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2)# Arrays to store object points and image points from all the images.objpoints = []  # 3d points in real world spaceimgpoints = []  # 2d points in image plane.images = glob.glob(chessboardname)if len(images) > 0:print("images num for calibration : ", len(images))else:print("No image for calibration.")returnret_count = 0for idx, fname in enumerate(images):img = cv2.imread(fname)gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)img_size = (img.shape[1], img.shape[0])# Finde the chessboard cornersret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)# If found, add object points, image pointsif ret == True:ret_count += 1objpoints.append(objp)imgpoints.append(corners)ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)print('Do calibration successfully')return ret, mtx, dist, rvecs, tvecs# Step 2 传入计算得到的畸变参数,即可将畸变的图像进行畸变修正处理
def undistortImage(distortImage, mtx, dist):return cv2.undistort(distortImage, mtx, dist, None, mtx)# Step 3 透视变换 : Warp image based on src_points and dst_points
# The type of src_points & dst_points should be like
# np.float32([ [0,0], [100,200], [200, 300], [300,400]])
def warpImage(image, src_points, dst_points):image_size = (image.shape[1], image.shape[0])# rows = img.shape[0] 720# cols = img.shape[1] 1280M = cv2.getPerspectiveTransform(src_points, dst_points)Minv = cv2.getPerspectiveTransform(dst_points, src_points)warped_image = cv2.warpPerspective(image, M, image_size, flags=cv2.INTER_LINEAR)return warped_image, M, Minvif __name__ == "__main__":nx = 9ny = 6# Step 1 获取畸变参数rets, mtx, dist, rvecs, tvecs = getCameraCalibrationCoefficients('camera_cal/calibration*.jpg', nx, ny)# 读取图片test_distort_image = cv2.imread('test_img/test2.jpg')# Step 2 畸变修正test_undistort_image = undistortImage(test_distort_image, mtx, dist)# Step 3 透视变换# “不断调整src和dst的值,确保在直线道路上,能够调试出满意的透视变换图像”# 左图梯形区域的四个端点src = np.float32([[580, 460], [700, 460], [1096, 720], [200, 720]])# 右图矩形区域的四个端点dst = np.float32([[300, 0], [950, 0], [950, 720], [300, 720]])# 变换test_warp_image, M, Minv = warpImage(test_undistort_image, src, dst)# 显示cv2.imshow('img', test_warp_image)cv2.waitKey(0)cv2.destroyAllWindows()

透视变换后:

4、提取车道线
方法:根据HSL模型中的L(亮度)通道来分割出图像中的白色车道线,同时可以根据Lab模型中的b(蓝黄)通道来分割出图像中的黄色车道线,再将两次的分割结果,取合集,叠加到一幅图上,得到两条完整的车道线。

import cv2
import glob
import numpy as np
import matplotlib.pyplot as plt# Step 1 读入图片、预处理图片、检测交点、标定相机的一系列操作
def getCameraCalibrationCoefficients(chessboardname, nx, ny):# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)objp = np.zeros((ny * nx, 3), np.float32)objp[:, :2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2)# Arrays to store object points and image points from all the images.objpoints = []  # 3d points in real world spaceimgpoints = []  # 2d points in image plane.images = glob.glob(chessboardname)if len(images) > 0:print("images num for calibration : ", len(images))else:print("No image for calibration.")returnret_count = 0for idx, fname in enumerate(images):img = cv2.imread(fname)gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)img_size = (img.shape[1], img.shape[0])# Finde the chessboard cornersret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)# If found, add object points, image pointsif ret == True:ret_count += 1objpoints.append(objp)imgpoints.append(corners)ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)print('Do calibration successfully')return ret, mtx, dist, rvecs, tvecs# Step 2 传入计算得到的畸变参数,即可将畸变的图像进行畸变修正处理
def undistortImage(distortImage, mtx, dist):return cv2.undistort(distortImage, mtx, dist, None, mtx)# Step 3 透视变换 : Warp image based on src_points and dst_points
# The type of src_points & dst_points should be like
# np.float32([ [0,0], [100,200], [200, 300], [300,400]])
def warpImage(image, src_points, dst_points):image_size = (image.shape[1], image.shape[0])# rows = img.shape[0] 720# cols = img.shape[1] 1280M = cv2.getPerspectiveTransform(src_points, dst_points)Minv = cv2.getPerspectiveTransform(dst_points, src_points)warped_image = cv2.warpPerspective(image, M, image_size, flags=cv2.INTER_LINEAR)return warped_image, M, Minv# Step 4 : Create a thresholded binary image
# 亮度划分函数
def hlsLSelect(img, thresh=(220, 255)):hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)l_channel = hls[:, :, 1]l_channel = l_channel*(255/np.max(l_channel))binary_output = np.zeros_like(l_channel)binary_output[(l_channel > thresh[0]) & (l_channel <= thresh[1])] = 255return binary_output# 亮度划分函数
def hlsSSelect(img, thresh=(125, 255)):hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)s_channel = hls[:, :, 2]s_channel = s_channel*(255/np.max(s_channel))binary_output = np.zeros_like(s_channel)binary_output[(s_channel > thresh[0]) & (s_channel <= thresh[1])] = 255return binary_outputdef dirThreshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):# 1) Convert to grayscalegray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)# 2) Take the gradient in x and y separatelysobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)# 3) Take the absolute value of the x and y gradientsabs_sobelx = np.absolute(sobelx)abs_sobely = np.absolute(sobely)# 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradientdirection_sobelxy = np.arctan2(abs_sobely, abs_sobelx)# 5) Create a binary mask where direction thresholds are metbinary_output = np.zeros_like(direction_sobelxy)binary_output[(direction_sobelxy >= thresh[0]) & (direction_sobelxy <= thresh[1])] = 1# 6) Return the binary imagereturn binary_outputdef magThreshold(img, sobel_kernel=3, mag_thresh=(0, 255)):# Apply the following steps to img# 1) Convert to grayscalegray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)# 2) Take the gradient in x and y separatelysobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, sobel_kernel)sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, sobel_kernel)# 3) Calculate the magnitude# abs_sobelx = np.absolute(sobelx)# abs_sobely = np.absolute(sobely)abs_sobelxy = np.sqrt(sobelx * sobelx + sobely * sobely)# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8scaled_sobelxy = np.uint8(abs_sobelxy/np.max(abs_sobelxy) * 255)# 5) Create a binary mask where mag thresholds are metbinary_output = np.zeros_like(scaled_sobelxy)binary_output[(scaled_sobelxy >= mag_thresh[0]) & (scaled_sobelxy <= mag_thresh[1])] = 1# 6) Return this mask as your binary_output imagereturn binary_outputdef absSobelThreshold(img, orient='x', thresh_min=0, thresh_max=255):# Convert to grayscalegray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)# Apply x or y gradient with the OpenCV Sobel() function# and take the absolute valueif orient == 'x':abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0))if orient == 'y':abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1))# Rescale back to 8 bit integerscaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))# Create a copy and apply the threshold# binary_output = np.zeros_like(scaled_sobel)# Here I'm using inclusive (>=, <=) thresholds, but exclusive is ok too# binary_output[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1# Return the resultreturn scaled_sobel# Lab蓝黄通道划分函数
def labBSelect(img, thresh=(215, 255)):# 1) Convert to LAB color spacelab = cv2.cvtColor(img, cv2.COLOR_BGR2Lab)lab_b = lab[:, :, 2]# don't normalize if there are no yellows in the imageif np.max(lab_b) > 100:lab_b = lab_b*(255/np.max(lab_b))# 2) Apply a threshold to the L channelbinary_output = np.zeros_like(lab_b)binary_output[((lab_b > thresh[0]) & (lab_b <= thresh[1]))] = 255# 3) Return a binary image of threshold resultreturn binary_outputif __name__ == "__main__":nx = 9ny = 6# Step 1 获取畸变参数rets, mtx, dist, rvecs, tvecs = getCameraCalibrationCoefficients('camera_cal/calibration*.jpg', nx, ny)# 读取图片test_distort_image = cv2.imread('test_img/test3.jpg')# Step 2 畸变修正test_undistort_image = undistortImage(test_distort_image, mtx, dist)# Step 3 透视变换# “不断调整src和dst的值,确保在直线道路上,能够调试出满意的透视变换图像”# 左图梯形区域的四个端点src = np.float32([[580, 440], [700, 440], [1100, 720], [200, 720]])# 右图矩形区域的四个端点dst = np.float32([[300, 0], [950, 0], [950, 720], [300, 720]])# 变换,得到变换后的结果图,类似俯视图test_warp_image, M, Minv = warpImage(test_undistort_image, src, dst)# Step 4 提取车道线# 提取1:sobel算子提取# min_thresh = 30# max_thresh = 100# Result_sobelAbs = absSobelThreshold(test_warp_image, 'x', min_thresh, max_thresh)# 提取2:hls方法提取,保留黄色和白色的车道线# min_thresh = 150# max_thresh = 255# Result_hlsS = hlsSSelect(test_warp_image, (min_thresh, max_thresh))# 提取3:hls方法提取,只保留白色的车道线# min_thresh = 215# max_thresh = 255# Result_hlsL = hlsLSelect(test_warp_image, (min_thresh, max_thresh))# 提取4:lab方法提取,只保留黄色的车道线# min_thresh = 195# max_thresh = 255# Result_labB = labBSelect(test_warp_image, (min_thresh, max_thresh))# 提取5:方法1-4结合# sx_binary = absSobelThreshold(test_warp_image, 'x', 30, 150)# hlsL_binary = hlsLSelect(test_warp_image)# labB_binary = labBSelect(test_warp_image)# combined_binary = np.zeros_like(sx_binary)# combined_binary[(hlsL_binary == 255) | (labB_binary == 255)] = 255#  提取6:只要求hsl与lab即可,结果与5一致,省略Sobel算子hlsL_binary = hlsLSelect(test_warp_image)labB_binary = labBSelect(test_warp_image)combined_line_img = np.zeros_like(hlsL_binary)combined_line_img[(hlsL_binary == 255) | (labB_binary == 255)] = 255# 显示cv2.imshow('img_0', combined_line_img)cv2.waitKey(0)cv2.destroyAllWindows()

提取车道线结果:

5、矩形滑窗
方法:利用直方图统计每列点的个数,取最大的列数为滑窗(边缘)中点得到初始滑窗的位置,然后统计滑窗内所有点横坐标,求取均值作为下一个滑窗,循环往复得到左右线所有滑窗。

import cv2
import glob
import numpy as np
import matplotlib.pyplot as plt# Step 1 读入图片、预处理图片、检测交点、标定相机的一系列操作
#################################################################
def getCameraCalibrationCoefficients(chessboardname, nx, ny):# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)objp = np.zeros((ny * nx, 3), np.float32)objp[:, :2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2)# Arrays to store object points and image points from all the images.objpoints = []  # 3d points in real world spaceimgpoints = []  # 2d points in image plane.images = glob.glob(chessboardname)if len(images) > 0:print("images num for calibration : ", len(images))else:print("No image for calibration.")returnret_count = 0for idx, fname in enumerate(images):img = cv2.imread(fname)gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)img_size = (img.shape[1], img.shape[0])# Finde the chessboard cornersret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)# If found, add object points, image pointsif ret == True:ret_count += 1objpoints.append(objp)imgpoints.append(corners)ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)print('Do calibration successfully')return ret, mtx, dist, rvecs, tvecs# Step 2 传入计算得到的畸变参数,即可将畸变的图像进行畸变修正处理
def undistortImage(distortImage, mtx, dist):return cv2.undistort(distortImage, mtx, dist, None, mtx)# Step 3 透视变换 : Warp image based on src_points and dst_points
#################################################################
# The type of src_points & dst_points should be like
# np.float32([ [0,0], [100,200], [200, 300], [300,400]])
def warpImage(image, src_points, dst_points):image_size = (image.shape[1], image.shape[0])# rows = img.shape[0] 720# cols = img.shape[1] 1280M = cv2.getPerspectiveTransform(src_points, dst_points)Minv = cv2.getPerspectiveTransform(dst_points, src_points)warped_image = cv2.warpPerspective(image, M, image_size, flags=cv2.INTER_LINEAR)return warped_image, M, Minv# Step 4 : Create a thresholded binary image
#################################################################
# 亮度划分函数
def hlsLSelect(img, thresh=(220, 255)):hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)l_channel = hls[:, :, 1]l_channel = l_channel*(255/np.max(l_channel))binary_output = np.zeros_like(l_channel)binary_output[(l_channel > thresh[0]) & (l_channel <= thresh[1])] = 255return binary_output# 亮度划分函数
def hlsSSelect(img, thresh=(125, 255)):hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)s_channel = hls[:, :, 2]s_channel = s_channel*(255/np.max(s_channel))binary_output = np.zeros_like(s_channel)binary_output[(s_channel > thresh[0]) & (s_channel <= thresh[1])] = 255return binary_outputdef dirThreshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):# 1) Convert to grayscalegray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)# 2) Take the gradient in x and y separatelysobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)# 3) Take the absolute value of the x and y gradientsabs_sobelx = np.absolute(sobelx)abs_sobely = np.absolute(sobely)# 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradientdirection_sobelxy = np.arctan2(abs_sobely, abs_sobelx)# 5) Create a binary mask where direction thresholds are metbinary_output = np.zeros_like(direction_sobelxy)binary_output[(direction_sobelxy >= thresh[0]) & (direction_sobelxy <= thresh[1])] = 1# 6) Return the binary imagereturn binary_outputdef magThreshold(img, sobel_kernel=3, mag_thresh=(0, 255)):# Apply the following steps to img# 1) Convert to grayscalegray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)# 2) Take the gradient in x and y separatelysobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, sobel_kernel)sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, sobel_kernel)# 3) Calculate the magnitude# abs_sobelx = np.absolute(sobelx)# abs_sobely = np.absolute(sobely)abs_sobelxy = np.sqrt(sobelx * sobelx + sobely * sobely)# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8scaled_sobelxy = np.uint8(abs_sobelxy/np.max(abs_sobelxy) * 255)# 5) Create a binary mask where mag thresholds are metbinary_output = np.zeros_like(scaled_sobelxy)binary_output[(scaled_sobelxy >= mag_thresh[0]) & (scaled_sobelxy <= mag_thresh[1])] = 1# 6) Return this mask as your binary_output imagereturn binary_outputdef absSobelThreshold(img, orient='x', thresh_min=0, thresh_max=255):# Convert to grayscalegray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)# Apply x or y gradient with the OpenCV Sobel() function# and take the absolute valueif orient == 'x':abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0))if orient == 'y':abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1))# Rescale back to 8 bit integerscaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))# Create a copy and apply the threshold# binary_output = np.zeros_like(scaled_sobel)# Here I'm using inclusive (>=, <=) thresholds, but exclusive is ok too# binary_output[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1# Return the resultreturn scaled_sobel# Lab蓝黄通道划分函数
def labBSelect(img, thresh=(215, 255)):# 1) Convert to LAB color spacelab = cv2.cvtColor(img, cv2.COLOR_BGR2Lab)lab_b = lab[:, :, 2]# don't normalize if there are no yellows in the imageif np.max(lab_b) > 100:lab_b = lab_b*(255/np.max(lab_b))# 2) Apply a threshold to the L channelbinary_output = np.zeros_like(lab_b)binary_output[((lab_b > thresh[0]) & (lab_b <= thresh[1]))] = 255# 3) Return a binary image of threshold resultreturn binary_output# Step 5 : 矩形滑窗 Detect lane lines through moving window
# Step 5 : Detect lane lines through moving window
#################################################################
# 画矩形滑窗
def find_lane_pixels(binary_warped, nwindows, margin, minpix):# 按列进行直方图统计histogram = np.sum(binary_warped[binary_warped.shape[0]//2:, :], axis=0)# Create an output image to draw on and visualize the resultout_img = np.dstack((binary_warped, binary_warped, binary_warped))# Find the peak of the left and right halves of the histogram# These will be the starting point for the left and right linesmidpoint = np.int(histogram.shape[0]//2)leftx_base = np.argmax(histogram[:midpoint])rightx_base = np.argmax(histogram[midpoint:]) + midpoint# Set height of windows - based on nwindows above and image shapewindow_height = np.int(binary_warped.shape[0]//nwindows)# 确定图片中非零值的坐标nonzero = binary_warped.nonzero()nonzeroy = np.array(nonzero[0])nonzerox = np.array(nonzero[1])# 第一个窗口位置leftx_current = leftx_baserightx_current = rightx_base# Create empty lists to receive left and right lane pixel indicesleft_lane_inds = []right_lane_inds = []# 依次确定每个窗口的位置for window in range(nwindows):# 确定窗口左下和右上点的坐标win_y_low = binary_warped.shape[0] - (window+1)*window_heightwin_y_high = binary_warped.shape[0] - window*window_heightwin_xleft_low = leftx_current - marginwin_xleft_high = leftx_current + marginwin_xright_low = rightx_current - marginwin_xright_high = rightx_current + margin# Draw the windows on the visualization imagecv2.rectangle(out_img, (win_xleft_low, win_y_low),(win_xleft_high, win_y_high), (0, 255, 0), 2)cv2.rectangle(out_img, (win_xright_low, win_y_low),(win_xright_high, win_y_high), (0, 255, 0), 2)# 确定当前框中非零值的坐标,其中nonzero()[0]是指非零元素的横坐标good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]# Append these indices to the listsleft_lane_inds.append(good_left_inds)right_lane_inds.append(good_right_inds)# 根据当前框坐标值确定下一个框if len(good_left_inds) > minpix:leftx_current = np.int(np.mean(nonzerox[good_left_inds]))if len(good_right_inds) > minpix:rightx_current = np.int(np.mean(nonzerox[good_right_inds]))# Concatenate the arrays of indices (previously was a list of lists of pixels)try:left_lane_inds = np.concatenate(left_lane_inds)right_lane_inds = np.concatenate(right_lane_inds)except ValueError:# Avoids an error if the above is not implemented fullypass# Extract left and right line pixel positionsleftx = nonzerox[left_lane_inds]lefty = nonzeroy[left_lane_inds]rightx = nonzerox[right_lane_inds]righty = nonzeroy[right_lane_inds]return leftx, lefty, rightx, righty, out_img# 将矩形滑窗中的点改变颜色
def fit_polynomial(binary_warped, nwindows=9, margin=100, minpix=50):# Find our lane pixels firstleftx, lefty, rightx, righty, out_img = find_lane_pixels(binary_warped, nwindows, margin, minpix)# Fit a second order polynomial to each using `np.polyfit`left_fit = np.polyfit(lefty, leftx, 2)right_fit = np.polyfit(righty, rightx, 2)# Generate x and y values for plottingploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0])try:left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]except TypeError:# Avoids an error if `left` and `right_fit` are still none or incorrectprint('The function failed to fit a line!')left_fitx = 1*ploty**2 + 1*plotyright_fitx = 1*ploty**2 + 1*ploty# Visualization ## Colors in the left and right lane regionsout_img[lefty, leftx] = [255, 0, 0]out_img[righty, rightx] = [0, 0, 255]# Plots the left and right polynomials on the lane lines# plt.plot(left_fitx, ploty, color='yellow')# plt.plot(right_fitx, ploty, color='yellow')return out_img, left_fit, right_fit, plotyif __name__ == "__main__":nx = 9ny = 6# Step 1 获取畸变参数rets, mtx, dist, rvecs, tvecs = getCameraCalibrationCoefficients('camera_cal/calibration*.jpg', nx, ny)# 读取图片test_distort_image = cv2.imread('test_img/test3.jpg')# Step 2 畸变修正test_undistort_image = undistortImage(test_distort_image, mtx, dist)# Step 3 透视变换# “不断调整src和dst的值,确保在直线道路上,能够调试出满意的透视变换图像”# 左图梯形区域的四个端点src = np.float32([[580, 440], [700, 440], [1100, 720], [200, 720]])# 右图矩形区域的四个端点dst = np.float32([[300, 0], [950, 0], [950, 720], [300, 720]])# 变换,得到变换后的结果图,类似俯视图test_warp_image, M, Minv = warpImage(test_undistort_image, src, dst)# Step 4 提取车道线# 提取1:sobel算子提取# min_thresh = 30# max_thresh = 100# Result_sobelAbs = absSobelThreshold(test_warp_image, 'x', min_thresh, max_thresh)# 提取2:hls方法提取,保留黄色和白色的车道线# min_thresh = 150# max_thresh = 255# Result_hlsS = hlsSSelect(test_warp_image, (min_thresh, max_thresh))# 提取3:hls方法提取,只保留白色的车道线# min_thresh = 215# max_thresh = 255# Result_hlsL = hlsLSelect(test_warp_image, (min_thresh, max_thresh))# 提取4:lab方法提取,只保留黄色的车道线# min_thresh = 195# max_thresh = 255# Result_labB = labBSelect(test_warp_image, (min_thresh, max_thresh))# 提取5:方法1-4结合# sx_binary = absSobelThreshold(test_warp_image, 'x', 30, 150)# hlsL_binary = hlsLSelect(test_warp_image)# labB_binary = labBSelect(test_warp_image)# combined_binary = np.zeros_like(sx_binary)# combined_binary[(hlsL_binary == 255) | (labB_binary == 255)] = 255#  提取6:只要求hsl与lab即可,结果与5一致,省略Sobel算子hlsL_binary = hlsLSelect(test_warp_image)labB_binary = labBSelect(test_warp_image)combined_line_img = np.zeros_like(hlsL_binary)combined_line_img[(hlsL_binary == 255) | (labB_binary == 255)] = 255# Step 5 矩形滑窗out_img, left_fit, right_fit, ploty = fit_polynomial(combined_line_img, nwindows=9, margin=80, minpix=40)# 显示cv2.imshow('img_0', out_img)cv2.waitKey(0)cv2.destroyAllWindows()

矩形滑窗结果:

6、跟踪车道线
方法:对左右车道线的点进行曲线拟合得到左右两端的直线,然后对车道线拟合曲线设定一个左右偏移量,得到4条曲线,然后利用cv2的填充函数分别进行填充,得到左右2个阴影轨道。

import cv2
import glob
import numpy as np
import matplotlib.pyplot as plt# Step 1 读入图片、预处理图片、检测交点、标定相机的一系列操作
#################################################################
def getCameraCalibrationCoefficients(chessboardname, nx, ny):# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)objp = np.zeros((ny * nx, 3), np.float32)objp[:, :2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2)# Arrays to store object points and image points from all the images.objpoints = []  # 3d points in real world spaceimgpoints = []  # 2d points in image plane.images = glob.glob(chessboardname)if len(images) > 0:print("images num for calibration : ", len(images))else:print("No image for calibration.")returnret_count = 0for idx, fname in enumerate(images):img = cv2.imread(fname)gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)img_size = (img.shape[1], img.shape[0])# Finde the chessboard cornersret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)# If found, add object points, image pointsif ret == True:ret_count += 1objpoints.append(objp)imgpoints.append(corners)ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)print('Do calibration successfully')return ret, mtx, dist, rvecs, tvecs# Step 2 传入计算得到的畸变参数,即可将畸变的图像进行畸变修正处理
def undistortImage(distortImage, mtx, dist):return cv2.undistort(distortImage, mtx, dist, None, mtx)# Step 3 透视变换 : Warp image based on src_points and dst_points
#################################################################
# The type of src_points & dst_points should be like
# np.float32([ [0,0], [100,200], [200, 300], [300,400]])
def warpImage(image, src_points, dst_points):image_size = (image.shape[1], image.shape[0])# rows = img.shape[0] 720# cols = img.shape[1] 1280M = cv2.getPerspectiveTransform(src_points, dst_points)Minv = cv2.getPerspectiveTransform(dst_points, src_points)warped_image = cv2.warpPerspective(image, M, image_size, flags=cv2.INTER_LINEAR)return warped_image, M, Minv# Step 4 : Create a thresholded binary image
#################################################################
# 亮度划分函数
def hlsLSelect(img, thresh=(220, 255)):hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)l_channel = hls[:, :, 1]l_channel = l_channel*(255/np.max(l_channel))binary_output = np.zeros_like(l_channel)binary_output[(l_channel > thresh[0]) & (l_channel <= thresh[1])] = 255return binary_output# 亮度划分函数
def hlsSSelect(img, thresh=(125, 255)):hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)s_channel = hls[:, :, 2]s_channel = s_channel*(255/np.max(s_channel))binary_output = np.zeros_like(s_channel)binary_output[(s_channel > thresh[0]) & (s_channel <= thresh[1])] = 255return binary_outputdef dirThreshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):# 1) Convert to grayscalegray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)# 2) Take the gradient in x and y separatelysobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)# 3) Take the absolute value of the x and y gradientsabs_sobelx = np.absolute(sobelx)abs_sobely = np.absolute(sobely)# 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradientdirection_sobelxy = np.arctan2(abs_sobely, abs_sobelx)# 5) Create a binary mask where direction thresholds are metbinary_output = np.zeros_like(direction_sobelxy)binary_output[(direction_sobelxy >= thresh[0]) & (direction_sobelxy <= thresh[1])] = 1# 6) Return the binary imagereturn binary_outputdef magThreshold(img, sobel_kernel=3, mag_thresh=(0, 255)):# Apply the following steps to img# 1) Convert to grayscalegray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)# 2) Take the gradient in x and y separatelysobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, sobel_kernel)sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, sobel_kernel)# 3) Calculate the magnitude# abs_sobelx = np.absolute(sobelx)# abs_sobely = np.absolute(sobely)abs_sobelxy = np.sqrt(sobelx * sobelx + sobely * sobely)# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8scaled_sobelxy = np.uint8(abs_sobelxy/np.max(abs_sobelxy) * 255)# 5) Create a binary mask where mag thresholds are metbinary_output = np.zeros_like(scaled_sobelxy)binary_output[(scaled_sobelxy >= mag_thresh[0]) & (scaled_sobelxy <= mag_thresh[1])] = 1# 6) Return this mask as your binary_output imagereturn binary_outputdef absSobelThreshold(img, orient='x', thresh_min=0, thresh_max=255):# Convert to grayscalegray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)# Apply x or y gradient with the OpenCV Sobel() function# and take the absolute valueif orient == 'x':abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0))if orient == 'y':abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1))# Rescale back to 8 bit integerscaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))# Create a copy and apply the threshold# binary_output = np.zeros_like(scaled_sobel)# Here I'm using inclusive (>=, <=) thresholds, but exclusive is ok too# binary_output[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1# Return the resultreturn scaled_sobel# Lab蓝黄通道划分函数
def labBSelect(img, thresh=(215, 255)):# 1) Convert to LAB color spacelab = cv2.cvtColor(img, cv2.COLOR_BGR2Lab)lab_b = lab[:, :, 2]# don't normalize if there are no yellows in the imageif np.max(lab_b) > 100:lab_b = lab_b*(255/np.max(lab_b))# 2) Apply a threshold to the L channelbinary_output = np.zeros_like(lab_b)binary_output[((lab_b > thresh[0]) & (lab_b <= thresh[1]))] = 255# 3) Return a binary image of threshold resultreturn binary_output# Step 5 : 矩形滑窗 Detect lane lines through moving window
# Step 5 : Detect lane lines through moving window
#################################################################
def find_lane_pixels(binary_warped, nwindows, margin, minpix):# Take a histogram of the bottom half of the imagehistogram = np.sum(binary_warped[binary_warped.shape[0]//2:, :], axis=0)# Create an output image to draw on and visualize the resultout_img = np.dstack((binary_warped, binary_warped, binary_warped))# Find the peak of the left and right halves of the histogram# These will be the starting point for the left and right linesmidpoint = np.int(histogram.shape[0]//2)leftx_base = np.argmax(histogram[:midpoint])rightx_base = np.argmax(histogram[midpoint:]) + midpoint# Set height of windows - based on nwindows above and image shapewindow_height = np.int(binary_warped.shape[0]//nwindows)# Identify the x and y positions of all nonzero pixels in the imagenonzero = binary_warped.nonzero()nonzeroy = np.array(nonzero[0])nonzerox = np.array(nonzero[1])# Current positions to be updated later for each window in nwindowsleftx_current = leftx_baserightx_current = rightx_base# Create empty lists to receive left and right lane pixel indicesleft_lane_inds = []right_lane_inds = []# Step through the windows one by onefor window in range(nwindows):# Identify window boundaries in x and y (and right and left)win_y_low = binary_warped.shape[0] - (window+1)*window_heightwin_y_high = binary_warped.shape[0] - window*window_heightwin_xleft_low = leftx_current - marginwin_xleft_high = leftx_current + marginwin_xright_low = rightx_current - marginwin_xright_high = rightx_current + margin# Draw the windows on the visualization imagecv2.rectangle(out_img, (win_xleft_low, win_y_low),(win_xleft_high, win_y_high), (0, 255, 0), 2)cv2.rectangle(out_img, (win_xright_low, win_y_low),(win_xright_high, win_y_high), (0, 255, 0), 2)# Identify the nonzero pixels in x and y within the window #good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]# Append these indices to the listsleft_lane_inds.append(good_left_inds)right_lane_inds.append(good_right_inds)# If you found > minpix pixels, recenter next window on their mean positionif len(good_left_inds) > minpix:leftx_current = np.int(np.mean(nonzerox[good_left_inds]))if len(good_right_inds) > minpix:rightx_current = np.int(np.mean(nonzerox[good_right_inds]))# Concatenate the arrays of indices (previously was a list of lists of pixels)try:left_lane_inds = np.concatenate(left_lane_inds)right_lane_inds = np.concatenate(right_lane_inds)except ValueError:# Avoids an error if the above is not implemented fullypass# Extract left and right line pixel positionsleftx = nonzerox[left_lane_inds]lefty = nonzeroy[left_lane_inds]rightx = nonzerox[right_lane_inds]righty = nonzeroy[right_lane_inds]return leftx, lefty, rightx, righty, out_imgdef fit_polynomial(binary_warped, nwindows=9, margin=100, minpix=50):# Find our lane pixels firstleftx, lefty, rightx, righty, out_img = find_lane_pixels(binary_warped, nwindows, margin, minpix)# Fit a second order polynomial to each using `np.polyfit`left_fit = np.polyfit(lefty, leftx, 2)right_fit = np.polyfit(righty, rightx, 2)# Generate x and y values for plottingploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0])try:left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]except TypeError:# Avoids an error if `left` and `right_fit` are still none or incorrectprint('The function failed to fit a line!')left_fitx = 1*ploty**2 + 1*plotyright_fitx = 1*ploty**2 + 1*ploty# Visualization ## Colors in the left and right lane regionsout_img[lefty, leftx] = [255, 0, 0]out_img[righty, rightx] = [0, 0, 255]# Plots the left and right polynomials on the lane lines# plt.plot(left_fitx, ploty, color='yellow')# plt.plot(right_fitx, ploty, color='yellow')return out_img, left_fit, right_fit, ploty# Step 6 : Track lane lines based latest lane line result
#################################################################
def fit_poly(img_shape, leftx, lefty, rightx, righty):# ## TO-DO: Fit a second order polynomial to each with np.polyfit() ###left_fit = np.polyfit(lefty, leftx, 2)right_fit = np.polyfit(righty, rightx, 2)# Generate x and y values for plottingploty = np.linspace(0, img_shape[0]-1, img_shape[0])# ## TO-DO: Calc both polynomials using ploty, left_fit and right_fit ###left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]return left_fitx, right_fitx, ploty, left_fit, right_fitdef search_around_poly(binary_warped, left_fit, right_fit):# HYPERPARAMETER# Choose the width of the margin around the previous polynomial to search# The quiz grader expects 100 here, but feel free to tune on your own!margin = 60# Grab activated pixelsnonzero = binary_warped.nonzero()nonzeroy = np.array(nonzero[0])nonzerox = np.array(nonzero[1])# ## TO-DO: Set the area of search based on activated x-values #### ## within the +/- margin of our polynomial function #### ## Hint: consider the window areas for the similarly named variables #### ## in the previous quiz, but change the windows to our new search area ###left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy +left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) +left_fit[1]*nonzeroy + left_fit[2] + margin)))right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy +right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) +right_fit[1]*nonzeroy + right_fit[2] + margin)))# Again, extract left and right line pixel positionsleftx = nonzerox[left_lane_inds]lefty = nonzeroy[left_lane_inds]rightx = nonzerox[right_lane_inds]righty = nonzeroy[right_lane_inds]# Fit new polynomialsleft_fitx, right_fitx, ploty, left_fit, right_fit = fit_poly(binary_warped.shape, leftx, lefty, rightx, righty)# # Visualization # ## Create an image to draw on and an image to show the selection windowout_img = np.dstack((binary_warped, binary_warped, binary_warped))*255window_img = np.zeros_like(out_img)# Color in left and right line pixelsout_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]# 根据每条拟合曲线偏移得到左右两条拟合曲线,随后进行填充 right_line_pts维度为(1,1440,2)# Generate a polygon to illustrate the search window area# And recast the x and y points into usable format for cv2.fillPoly()left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin,ploty])))])left_line_pts = np.hstack((left_line_window1, left_line_window2))right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin,ploty])))])right_line_pts = np.hstack((right_line_window1, right_line_window2))# 将曲线围成的区域画出来,window_img为绿色阴影cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)# 绘制拟合曲线 Plot the polynomial lines onto the image# plt.plot(left_fitx, ploty, color='yellow')# plt.plot(right_fitx, ploty, color='yellow')# plt.show()# # End visualization steps # #return result, left_fit, right_fit, plotyif __name__ == "__main__":nx = 9ny = 6# Step 1 获取畸变参数rets, mtx, dist, rvecs, tvecs = getCameraCalibrationCoefficients('camera_cal/calibration*.jpg', nx, ny)# 读取图片test_distort_image = cv2.imread('test_img/test3.jpg')# Step 2 畸变修正test_undistort_image = undistortImage(test_distort_image, mtx, dist)# Step 3 透视变换# “不断调整src和dst的值,确保在直线道路上,能够调试出满意的透视变换图像”# 左图梯形区域的四个端点src = np.float32([[580, 440], [700, 440], [1100, 720], [200, 720]])# 右图矩形区域的四个端点dst = np.float32([[300, 0], [950, 0], [950, 720], [300, 720]])# 变换,得到变换后的结果图,类似俯视图test_warp_image, M, Minv = warpImage(test_undistort_image, src, dst)# Step 4 提取车道线# 提取1:sobel算子提取# min_thresh = 30# max_thresh = 100# Result_sobelAbs = absSobelThreshold(test_warp_image, 'x', min_thresh, max_thresh)# 提取2:hls方法提取,保留黄色和白色的车道线# min_thresh = 150# max_thresh = 255# Result_hlsS = hlsSSelect(test_warp_image, (min_thresh, max_thresh))# 提取3:hls方法提取,只保留白色的车道线# min_thresh = 215# max_thresh = 255# Result_hlsL = hlsLSelect(test_warp_image, (min_thresh, max_thresh))# 提取4:lab方法提取,只保留黄色的车道线# min_thresh = 195# max_thresh = 255# Result_labB = labBSelect(test_warp_image, (min_thresh, max_thresh))# 提取5:方法1-4结合# sx_binary = absSobelThreshold(test_warp_image, 'x', 30, 150)# hlsL_binary = hlsLSelect(test_warp_image)# labB_binary = labBSelect(test_warp_image)# combined_binary = np.zeros_like(sx_binary)# combined_binary[(hlsL_binary == 255) | (labB_binary == 255)] = 255#  提取6:只要求hsl与lab即可,结果与5一致,省略Sobel算子hlsL_binary = hlsLSelect(test_warp_image)labB_binary = labBSelect(test_warp_image)combined_line_img = np.zeros_like(hlsL_binary)combined_line_img[(hlsL_binary == 255) | (labB_binary == 255)] = 255# Step 5 矩形滑窗out_img, left_fit, right_fit, ploty = fit_polynomial(combined_line_img, nwindows=9, margin=80, minpix=40)# Step 6 跟踪车道线track_result, track_left_fit, track_right_fit, ploty,  = search_around_poly(combined_line_img, left_fit, right_fit)# 显示cv2.imshow('img_0', track_result)cv2.waitKey(0)cv2.destroyAllWindows()

跟踪车道线结果:

7、求取曲率与偏移量
偏移量:相对于左右车道线中心线的偏移

import cv2
import glob
import numpy as np
import matplotlib.pyplot as plt# Step 1 读入图片、预处理图片、检测交点、标定相机的一系列操作
#################################################################
def getCameraCalibrationCoefficients(chessboardname, nx, ny):# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)objp = np.zeros((ny * nx, 3), np.float32)objp[:, :2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2)# Arrays to store object points and image points from all the images.objpoints = []  # 3d points in real world spaceimgpoints = []  # 2d points in image plane.images = glob.glob(chessboardname)if len(images) > 0:print("images num for calibration : ", len(images))else:print("No image for calibration.")returnret_count = 0for idx, fname in enumerate(images):img = cv2.imread(fname)gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)img_size = (img.shape[1], img.shape[0])# Finde the chessboard cornersret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)# If found, add object points, image pointsif ret == True:ret_count += 1objpoints.append(objp)imgpoints.append(corners)ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)print('Do calibration successfully')return ret, mtx, dist, rvecs, tvecs# Step 2 传入计算得到的畸变参数,即可将畸变的图像进行畸变修正处理
def undistortImage(distortImage, mtx, dist):return cv2.undistort(distortImage, mtx, dist, None, mtx)# Step 3 透视变换 : Warp image based on src_points and dst_points
#################################################################
# The type of src_points & dst_points should be like
# np.float32([ [0,0], [100,200], [200, 300], [300,400]])
def warpImage(image, src_points, dst_points):image_size = (image.shape[1], image.shape[0])# rows = img.shape[0] 720# cols = img.shape[1] 1280M = cv2.getPerspectiveTransform(src_points, dst_points)Minv = cv2.getPerspectiveTransform(dst_points, src_points)warped_image = cv2.warpPerspective(image, M, image_size, flags=cv2.INTER_LINEAR)return warped_image, M, Minv# Step 4 : Create a thresholded binary image
#################################################################
# 亮度划分函数
def hlsLSelect(img, thresh=(220, 255)):hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)l_channel = hls[:, :, 1]l_channel = l_channel*(255/np.max(l_channel))binary_output = np.zeros_like(l_channel)binary_output[(l_channel > thresh[0]) & (l_channel <= thresh[1])] = 255return binary_output# 亮度划分函数
def hlsSSelect(img, thresh=(125, 255)):hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)s_channel = hls[:, :, 2]s_channel = s_channel*(255/np.max(s_channel))binary_output = np.zeros_like(s_channel)binary_output[(s_channel > thresh[0]) & (s_channel <= thresh[1])] = 255return binary_outputdef dirThreshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):# 1) Convert to grayscalegray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)# 2) Take the gradient in x and y separatelysobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)# 3) Take the absolute value of the x and y gradientsabs_sobelx = np.absolute(sobelx)abs_sobely = np.absolute(sobely)# 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradientdirection_sobelxy = np.arctan2(abs_sobely, abs_sobelx)# 5) Create a binary mask where direction thresholds are metbinary_output = np.zeros_like(direction_sobelxy)binary_output[(direction_sobelxy >= thresh[0]) & (direction_sobelxy <= thresh[1])] = 1# 6) Return the binary imagereturn binary_outputdef magThreshold(img, sobel_kernel=3, mag_thresh=(0, 255)):# Apply the following steps to img# 1) Convert to grayscalegray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)# 2) Take the gradient in x and y separatelysobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, sobel_kernel)sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, sobel_kernel)# 3) Calculate the magnitude# abs_sobelx = np.absolute(sobelx)# abs_sobely = np.absolute(sobely)abs_sobelxy = np.sqrt(sobelx * sobelx + sobely * sobely)# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8scaled_sobelxy = np.uint8(abs_sobelxy/np.max(abs_sobelxy) * 255)# 5) Create a binary mask where mag thresholds are metbinary_output = np.zeros_like(scaled_sobelxy)binary_output[(scaled_sobelxy >= mag_thresh[0]) & (scaled_sobelxy <= mag_thresh[1])] = 1# 6) Return this mask as your binary_output imagereturn binary_outputdef absSobelThreshold(img, orient='x', thresh_min=0, thresh_max=255):# Convert to grayscalegray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)# Apply x or y gradient with the OpenCV Sobel() function# and take the absolute valueif orient == 'x':abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0))if orient == 'y':abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1))# Rescale back to 8 bit integerscaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))# Create a copy and apply the threshold# binary_output = np.zeros_like(scaled_sobel)# Here I'm using inclusive (>=, <=) thresholds, but exclusive is ok too# binary_output[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1# Return the resultreturn scaled_sobel# Lab蓝黄通道划分函数
def labBSelect(img, thresh=(215, 255)):# 1) Convert to LAB color spacelab = cv2.cvtColor(img, cv2.COLOR_BGR2Lab)lab_b = lab[:, :, 2]# don't normalize if there are no yellows in the imageif np.max(lab_b) > 100:lab_b = lab_b*(255/np.max(lab_b))# 2) Apply a threshold to the L channelbinary_output = np.zeros_like(lab_b)binary_output[((lab_b > thresh[0]) & (lab_b <= thresh[1]))] = 255# 3) Return a binary image of threshold resultreturn binary_output# Step 5 : 矩形滑窗 Detect lane lines through moving window
# Step 5 : Detect lane lines through moving window
#################################################################
def find_lane_pixels(binary_warped, nwindows, margin, minpix):# Take a histogram of the bottom half of the imagehistogram = np.sum(binary_warped[binary_warped.shape[0]//2:, :], axis=0)# Create an output image to draw on and visualize the resultout_img = np.dstack((binary_warped, binary_warped, binary_warped))# Find the peak of the left and right halves of the histogram# These will be the starting point for the left and right linesmidpoint = np.int(histogram.shape[0]//2)leftx_base = np.argmax(histogram[:midpoint])rightx_base = np.argmax(histogram[midpoint:]) + midpoint# Set height of windows - based on nwindows above and image shapewindow_height = np.int(binary_warped.shape[0]//nwindows)# Identify the x and y positions of all nonzero pixels in the imagenonzero = binary_warped.nonzero()nonzeroy = np.array(nonzero[0])nonzerox = np.array(nonzero[1])# Current positions to be updated later for each window in nwindowsleftx_current = leftx_baserightx_current = rightx_base# Create empty lists to receive left and right lane pixel indicesleft_lane_inds = []right_lane_inds = []# Step through the windows one by onefor window in range(nwindows):# Identify window boundaries in x and y (and right and left)win_y_low = binary_warped.shape[0] - (window+1)*window_heightwin_y_high = binary_warped.shape[0] - window*window_heightwin_xleft_low = leftx_current - marginwin_xleft_high = leftx_current + marginwin_xright_low = rightx_current - marginwin_xright_high = rightx_current + margin# Draw the windows on the visualization imagecv2.rectangle(out_img, (win_xleft_low, win_y_low),(win_xleft_high, win_y_high), (0, 255, 0), 2)cv2.rectangle(out_img, (win_xright_low, win_y_low),(win_xright_high, win_y_high), (0, 255, 0), 2)# Identify the nonzero pixels in x and y within the window #good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]# Append these indices to the listsleft_lane_inds.append(good_left_inds)right_lane_inds.append(good_right_inds)# If you found > minpix pixels, recenter next window on their mean positionif len(good_left_inds) > minpix:leftx_current = np.int(np.mean(nonzerox[good_left_inds]))if len(good_right_inds) > minpix:rightx_current = np.int(np.mean(nonzerox[good_right_inds]))# Concatenate the arrays of indices (previously was a list of lists of pixels)try:left_lane_inds = np.concatenate(left_lane_inds)right_lane_inds = np.concatenate(right_lane_inds)except ValueError:# Avoids an error if the above is not implemented fullypass# Extract left and right line pixel positionsleftx = nonzerox[left_lane_inds]lefty = nonzeroy[left_lane_inds]rightx = nonzerox[right_lane_inds]righty = nonzeroy[right_lane_inds]return leftx, lefty, rightx, righty, out_imgdef fit_polynomial(binary_warped, nwindows=9, margin=100, minpix=50):# Find our lane pixels firstleftx, lefty, rightx, righty, out_img = find_lane_pixels(binary_warped, nwindows, margin, minpix)# Fit a second order polynomial to each using `np.polyfit`left_fit = np.polyfit(lefty, leftx, 2)right_fit = np.polyfit(righty, rightx, 2)# Generate x and y values for plottingploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0])try:left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]except TypeError:# Avoids an error if `left` and `right_fit` are still none or incorrectprint('The function failed to fit a line!')left_fitx = 1*ploty**2 + 1*plotyright_fitx = 1*ploty**2 + 1*ploty# Visualization ## Colors in the left and right lane regionsout_img[lefty, leftx] = [255, 0, 0]out_img[righty, rightx] = [0, 0, 255]# Plots the left and right polynomials on the lane lines# plt.plot(left_fitx, ploty, color='yellow')# plt.plot(right_fitx, ploty, color='yellow')return out_img, left_fit, right_fit, ploty# Step 6 : Track lane lines based latest lane line result
#################################################################
def fit_poly(img_shape, leftx, lefty, rightx, righty):# ## TO-DO: Fit a second order polynomial to each with np.polyfit() ###left_fit = np.polyfit(lefty, leftx, 2)right_fit = np.polyfit(righty, rightx, 2)# Generate x and y values for plottingploty = np.linspace(0, img_shape[0]-1, img_shape[0])# ## TO-DO: Calc both polynomials using ploty, left_fit and right_fit ###left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]return left_fitx, right_fitx, ploty, left_fit, right_fitdef search_around_poly(binary_warped, left_fit, right_fit):# HYPERPARAMETER# Choose the width of the margin around the previous polynomial to search# The quiz grader expects 100 here, but feel free to tune on your own!margin = 60# Grab activated pixelsnonzero = binary_warped.nonzero()nonzeroy = np.array(nonzero[0])nonzerox = np.array(nonzero[1])# ## TO-DO: Set the area of search based on activated x-values #### ## within the +/- margin of our polynomial function #### ## Hint: consider the window areas for the similarly named variables #### ## in the previous quiz, but change the windows to our new search area ###left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy +left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) +left_fit[1]*nonzeroy + left_fit[2] + margin)))right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy +right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) +right_fit[1]*nonzeroy + right_fit[2] + margin)))# Again, extract left and right line pixel positionsleftx = nonzerox[left_lane_inds]lefty = nonzeroy[left_lane_inds]rightx = nonzerox[right_lane_inds]righty = nonzeroy[right_lane_inds]# Fit new polynomialsleft_fitx, right_fitx, ploty, left_fit, right_fit = fit_poly(binary_warped.shape, leftx, lefty, rightx, righty)# # Visualization # ## Create an image to draw on and an image to show the selection windowout_img = np.dstack((binary_warped, binary_warped, binary_warped))*255window_img = np.zeros_like(out_img)# Color in left and right line pixelsout_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]# 根据每条拟合曲线偏移得到左右两条拟合曲线,随后进行填充 right_line_pts维度为(1,1440,2)# Generate a polygon to illustrate the search window area# And recast the x and y points into usable format for cv2.fillPoly()left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin,ploty])))])left_line_pts = np.hstack((left_line_window1, left_line_window2))right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin,ploty])))])right_line_pts = np.hstack((right_line_window1, right_line_window2))# 将曲线围成的区域画出来,window_img为绿色阴影cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)# 绘制拟合曲线 Plot the polynomial lines onto the image# plt.plot(left_fitx, ploty, color='yellow')# plt.plot(right_fitx, ploty, color='yellow')# plt.show()# # End visualization steps # #return result, left_fit, right_fit, ploty# Step 7 : 曲率与偏移量计算
#################################################################
def measure_curvature_real(left_fit_cr, right_fit_cr, ploty,ym_per_pix=30/720, xm_per_pix=3.7/700):'''Calculates the curvature of polynomial functions in meters.'''# Define y-value where we want radius of curvature# We'll choose the maximum y-value, corresponding to the bottom of the imagey_eval = np.max(ploty)# Calculation of R_curve (radius of curvature)left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])left_position = left_fit_cr[0]*720 + left_fit_cr[1]*720 + left_fit_cr[2]right_position = right_fit_cr[0]*720 + right_fit_cr[1]*720 + right_fit_cr[2]midpoint = 1280/2lane_center =(left_position + right_position)/2offset = (midpoint - lane_center) * xm_per_pixreturn left_curverad, right_curverad, offsetif __name__ == "__main__":nx = 9ny = 6# Step 1 获取畸变参数rets, mtx, dist, rvecs, tvecs = getCameraCalibrationCoefficients('camera_cal/calibration*.jpg', nx, ny)# 读取图片test_distort_image = cv2.imread('test_img/test3.jpg')# Step 2 畸变修正test_undistort_image = undistortImage(test_distort_image, mtx, dist)# Step 3 透视变换# “不断调整src和dst的值,确保在直线道路上,能够调试出满意的透视变换图像”# 左图梯形区域的四个端点src = np.float32([[580, 440], [700, 440], [1100, 720], [200, 720]])# 右图矩形区域的四个端点dst = np.float32([[300, 0], [950, 0], [950, 720], [300, 720]])# 变换,得到变换后的结果图,类似俯视图test_warp_image, M, Minv = warpImage(test_undistort_image, src, dst)# Step 4 提取车道线# 提取1:sobel算子提取# min_thresh = 30# max_thresh = 100# Result_sobelAbs = absSobelThreshold(test_warp_image, 'x', min_thresh, max_thresh)# 提取2:hls方法提取,保留黄色和白色的车道线# min_thresh = 150# max_thresh = 255# Result_hlsS = hlsSSelect(test_warp_image, (min_thresh, max_thresh))# 提取3:hls方法提取,只保留白色的车道线# min_thresh = 215# max_thresh = 255# Result_hlsL = hlsLSelect(test_warp_image, (min_thresh, max_thresh))# 提取4:lab方法提取,只保留黄色的车道线# min_thresh = 195# max_thresh = 255# Result_labB = labBSelect(test_warp_image, (min_thresh, max_thresh))# 提取5:方法1-4结合# sx_binary = absSobelThreshold(test_warp_image, 'x', 30, 150)# hlsL_binary = hlsLSelect(test_warp_image)# labB_binary = labBSelect(test_warp_image)# combined_binary = np.zeros_like(sx_binary)# combined_binary[(hlsL_binary == 255) | (labB_binary == 255)] = 255#  提取6:只要求hsl与lab即可,结果与5一致,省略Sobel算子hlsL_binary = hlsLSelect(test_warp_image)labB_binary = labBSelect(test_warp_image)combined_line_img = np.zeros_like(hlsL_binary)combined_line_img[(hlsL_binary == 255) | (labB_binary == 255)] = 255# Step 5 矩形滑窗out_img, left_fit, right_fit, ploty = fit_polynomial(combined_line_img, nwindows=9, margin=80, minpix=40)# Step 6 跟踪车道线track_result, track_left_fit, track_right_fit, plotys,  = search_around_poly(combined_line_img, left_fit, right_fit)# Step 7 求取曲率与偏移量left_curverad, right_curverad, offset = measure_curvature_real(track_left_fit, track_right_fit, plotys)average_curverad = (left_curverad + right_curverad)/2print(left_curverad, 'm', right_curverad, 'm', average_curverad, 'm')print('offset : ', offset, 'm')# 显示s# cv2.imshow('img_0', track_result)# cv2.waitKey(0)# cv2.destroyAllWindows()

曲率与偏移量计算结果:

8、逆投影到原图
方法:首先求得左右两条车道线的拟合曲线,并进行填充,绘制在“鸟瞰图”上,随后使用逆透视变换矩阵反投到原图上,即可实现在原图上的可视化效果。(步骤3:计算透视变换矩阵得到的两个矩阵M和Minv,使用M能够实现透视变换,使用Minv能够实现逆透视变换。)

import cv2
import glob
import numpy as np
import matplotlib.pyplot as plt# Step 1 读入图片、预处理图片、检测交点、标定相机的一系列操作
#################################################################
def getCameraCalibrationCoefficients(chessboardname, nx, ny):# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)objp = np.zeros((ny * nx, 3), np.float32)objp[:, :2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2)# Arrays to store object points and image points from all the images.objpoints = []  # 3d points in real world spaceimgpoints = []  # 2d points in image plane.images = glob.glob(chessboardname)if len(images) > 0:print("images num for calibration : ", len(images))else:print("No image for calibration.")returnret_count = 0for idx, fname in enumerate(images):img = cv2.imread(fname)gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)img_size = (img.shape[1], img.shape[0])# Finde the chessboard cornersret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)# If found, add object points, image pointsif ret == True:ret_count += 1objpoints.append(objp)imgpoints.append(corners)ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)print('Do calibration successfully')return ret, mtx, dist, rvecs, tvecs# Step 2 传入计算得到的畸变参数,即可将畸变的图像进行畸变修正处理
def undistortImage(distortImage, mtx, dist):return cv2.undistort(distortImage, mtx, dist, None, mtx)# Step 3 透视变换 : Warp image based on src_points and dst_points
#################################################################
# The type of src_points & dst_points should be like
# np.float32([ [0,0], [100,200], [200, 300], [300,400]])
def warpImage(image, src_points, dst_points):image_size = (image.shape[1], image.shape[0])# rows = img.shape[0] 720# cols = img.shape[1] 1280M = cv2.getPerspectiveTransform(src_points, dst_points)Minv = cv2.getPerspectiveTransform(dst_points, src_points)warped_image = cv2.warpPerspective(image, M, image_size, flags=cv2.INTER_LINEAR)return warped_image, M, Minv# Step 4 : Create a thresholded binary image
#################################################################
# 亮度划分函数
def hlsLSelect(img, thresh=(220, 255)):hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)l_channel = hls[:, :, 1]l_channel = l_channel*(255/np.max(l_channel))binary_output = np.zeros_like(l_channel)binary_output[(l_channel > thresh[0]) & (l_channel <= thresh[1])] = 255return binary_output# 亮度划分函数
def hlsSSelect(img, thresh=(125, 255)):hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)s_channel = hls[:, :, 2]s_channel = s_channel*(255/np.max(s_channel))binary_output = np.zeros_like(s_channel)binary_output[(s_channel > thresh[0]) & (s_channel <= thresh[1])] = 255return binary_outputdef dirThreshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):# 1) Convert to grayscalegray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)# 2) Take the gradient in x and y separatelysobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)# 3) Take the absolute value of the x and y gradientsabs_sobelx = np.absolute(sobelx)abs_sobely = np.absolute(sobely)# 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradientdirection_sobelxy = np.arctan2(abs_sobely, abs_sobelx)# 5) Create a binary mask where direction thresholds are metbinary_output = np.zeros_like(direction_sobelxy)binary_output[(direction_sobelxy >= thresh[0]) & (direction_sobelxy <= thresh[1])] = 1# 6) Return the binary imagereturn binary_outputdef magThreshold(img, sobel_kernel=3, mag_thresh=(0, 255)):# Apply the following steps to img# 1) Convert to grayscalegray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)# 2) Take the gradient in x and y separatelysobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, sobel_kernel)sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, sobel_kernel)# 3) Calculate the magnitude# abs_sobelx = np.absolute(sobelx)# abs_sobely = np.absolute(sobely)abs_sobelxy = np.sqrt(sobelx * sobelx + sobely * sobely)# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8scaled_sobelxy = np.uint8(abs_sobelxy/np.max(abs_sobelxy) * 255)# 5) Create a binary mask where mag thresholds are metbinary_output = np.zeros_like(scaled_sobelxy)binary_output[(scaled_sobelxy >= mag_thresh[0]) & (scaled_sobelxy <= mag_thresh[1])] = 1# 6) Return this mask as your binary_output imagereturn binary_outputdef absSobelThreshold(img, orient='x', thresh_min=0, thresh_max=255):# Convert to grayscalegray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)# Apply x or y gradient with the OpenCV Sobel() function# and take the absolute valueif orient == 'x':abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0))if orient == 'y':abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1))# Rescale back to 8 bit integerscaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))# Create a copy and apply the threshold# binary_output = np.zeros_like(scaled_sobel)# Here I'm using inclusive (>=, <=) thresholds, but exclusive is ok too# binary_output[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1# Return the resultreturn scaled_sobel# Lab蓝黄通道划分函数
def labBSelect(img, thresh=(215, 255)):# 1) Convert to LAB color spacelab = cv2.cvtColor(img, cv2.COLOR_BGR2Lab)lab_b = lab[:, :, 2]# don't normalize if there are no yellows in the imageif np.max(lab_b) > 100:lab_b = lab_b*(255/np.max(lab_b))# 2) Apply a threshold to the L channelbinary_output = np.zeros_like(lab_b)binary_output[((lab_b > thresh[0]) & (lab_b <= thresh[1]))] = 255# 3) Return a binary image of threshold resultreturn binary_output# Step 5 : 矩形滑窗 Detect lane lines through moving window
# Step 5 : Detect lane lines through moving window
#################################################################
def find_lane_pixels(binary_warped, nwindows, margin, minpix):# Take a histogram of the bottom half of the imagehistogram = np.sum(binary_warped[binary_warped.shape[0]//2:, :], axis=0)# Create an output image to draw on and visualize the resultout_img = np.dstack((binary_warped, binary_warped, binary_warped))# Find the peak of the left and right halves of the histogram# These will be the starting point for the left and right linesmidpoint = np.int(histogram.shape[0]//2)leftx_base = np.argmax(histogram[:midpoint])rightx_base = np.argmax(histogram[midpoint:]) + midpoint# Set height of windows - based on nwindows above and image shapewindow_height = np.int(binary_warped.shape[0]//nwindows)# Identify the x and y positions of all nonzero pixels in the imagenonzero = binary_warped.nonzero()nonzeroy = np.array(nonzero[0])nonzerox = np.array(nonzero[1])# Current positions to be updated later for each window in nwindowsleftx_current = leftx_baserightx_current = rightx_base# Create empty lists to receive left and right lane pixel indicesleft_lane_inds = []right_lane_inds = []# Step through the windows one by onefor window in range(nwindows):# Identify window boundaries in x and y (and right and left)win_y_low = binary_warped.shape[0] - (window+1)*window_heightwin_y_high = binary_warped.shape[0] - window*window_heightwin_xleft_low = leftx_current - marginwin_xleft_high = leftx_current + marginwin_xright_low = rightx_current - marginwin_xright_high = rightx_current + margin# Draw the windows on the visualization imagecv2.rectangle(out_img, (win_xleft_low, win_y_low),(win_xleft_high, win_y_high), (0, 255, 0), 2)cv2.rectangle(out_img, (win_xright_low, win_y_low),(win_xright_high, win_y_high), (0, 255, 0), 2)# Identify the nonzero pixels in x and y within the window #good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]# Append these indices to the listsleft_lane_inds.append(good_left_inds)right_lane_inds.append(good_right_inds)# If you found > minpix pixels, recenter next window on their mean positionif len(good_left_inds) > minpix:leftx_current = np.int(np.mean(nonzerox[good_left_inds]))if len(good_right_inds) > minpix:rightx_current = np.int(np.mean(nonzerox[good_right_inds]))# Concatenate the arrays of indices (previously was a list of lists of pixels)try:left_lane_inds = np.concatenate(left_lane_inds)right_lane_inds = np.concatenate(right_lane_inds)except ValueError:# Avoids an error if the above is not implemented fullypass# Extract left and right line pixel positionsleftx = nonzerox[left_lane_inds]lefty = nonzeroy[left_lane_inds]rightx = nonzerox[right_lane_inds]righty = nonzeroy[right_lane_inds]return leftx, lefty, rightx, righty, out_imgdef fit_polynomial(binary_warped, nwindows=9, margin=100, minpix=50):# Find our lane pixels firstleftx, lefty, rightx, righty, out_img = find_lane_pixels(binary_warped, nwindows, margin, minpix)# Fit a second order polynomial to each using `np.polyfit`left_fit = np.polyfit(lefty, leftx, 2)right_fit = np.polyfit(righty, rightx, 2)# Generate x and y values for plottingploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0])try:left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]except TypeError:# Avoids an error if `left` and `right_fit` are still none or incorrectprint('The function failed to fit a line!')left_fitx = 1*ploty**2 + 1*plotyright_fitx = 1*ploty**2 + 1*ploty# Visualization ## Colors in the left and right lane regionsout_img[lefty, leftx] = [255, 0, 0]out_img[righty, rightx] = [0, 0, 255]# Plots the left and right polynomials on the lane lines# plt.plot(left_fitx, ploty, color='yellow')# plt.plot(right_fitx, ploty, color='yellow')return out_img, left_fit, right_fit, ploty# Step 6 : Track lane lines based latest lane line result
#################################################################
def fit_poly(img_shape, leftx, lefty, rightx, righty):# ## TO-DO: Fit a second order polynomial to each with np.polyfit() ###left_fit = np.polyfit(lefty, leftx, 2)right_fit = np.polyfit(righty, rightx, 2)# Generate x and y values for plottingploty = np.linspace(0, img_shape[0]-1, img_shape[0])# ## TO-DO: Calc both polynomials using ploty, left_fit and right_fit ###left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]return left_fitx, right_fitx, ploty, left_fit, right_fitdef search_around_poly(binary_warped, left_fit, right_fit):# HYPERPARAMETER# Choose the width of the margin around the previous polynomial to search# The quiz grader expects 100 here, but feel free to tune on your own!margin = 60# Grab activated pixelsnonzero = binary_warped.nonzero()nonzeroy = np.array(nonzero[0])nonzerox = np.array(nonzero[1])# ## TO-DO: Set the area of search based on activated x-values #### ## within the +/- margin of our polynomial function #### ## Hint: consider the window areas for the similarly named variables #### ## in the previous quiz, but change the windows to our new search area ###left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy +left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) +left_fit[1]*nonzeroy + left_fit[2] + margin)))right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy +right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) +right_fit[1]*nonzeroy + right_fit[2] + margin)))# Again, extract left and right line pixel positionsleftx = nonzerox[left_lane_inds]lefty = nonzeroy[left_lane_inds]rightx = nonzerox[right_lane_inds]righty = nonzeroy[right_lane_inds]# Fit new polynomialsleft_fitx, right_fitx, ploty, left_fit, right_fit = fit_poly(binary_warped.shape, leftx, lefty, rightx, righty)# # Visualization # ## Create an image to draw on and an image to show the selection windowout_img = np.dstack((binary_warped, binary_warped, binary_warped))*255window_img = np.zeros_like(out_img)# Color in left and right line pixelsout_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]# 根据每条拟合曲线偏移得到左右两条拟合曲线,随后进行填充 right_line_pts维度为(1,1440,2)# Generate a polygon to illustrate the search window area# And recast the x and y points into usable format for cv2.fillPoly()left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin,ploty])))])left_line_pts = np.hstack((left_line_window1, left_line_window2))right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin,ploty])))])right_line_pts = np.hstack((right_line_window1, right_line_window2))# 将曲线围成的区域画出来,window_img为绿色阴影cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)# 绘制拟合曲线 Plot the polynomial lines onto the image# plt.plot(left_fitx, ploty, color='yellow')# plt.plot(right_fitx, ploty, color='yellow')# plt.show()# # End visualization steps # #return result, left_fit, right_fit, ploty# Step 7 : 曲率与偏移量计算
#################################################################
def measure_curvature_real(left_fit_cr, right_fit_cr, ploty,ym_per_pix=30/720, xm_per_pix=3.7/700):'''Calculates the curvature of polynomial functions in meters.'''# Define y-value where we want radius of curvature# We'll choose the maximum y-value, corresponding to the bottom of the imagey_eval = np.max(ploty)# Calculation of R_curve (radius of curvature)left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])left_position = left_fit_cr[0]*720 + left_fit_cr[1]*720 + left_fit_cr[2]right_position = right_fit_cr[0]*720 + right_fit_cr[1]*720 + right_fit_cr[2]midpoint = 1280/2lane_center =(left_position + right_position)/2offset = (midpoint - lane_center) * xm_per_pixreturn left_curverad, right_curverad, offset# Step 8 : Draw lane line result on undistorted image
#################################################################
# 不同于step7的左右线2个填充,这里只有一个填充,并逆投影到原图
def drawing(undist, bin_warped, color_warp, left_fitx, right_fitx):# Create an image to draw the lines onwarp_zero = np.zeros_like(bin_warped).astype(np.uint8)color_warp = np.dstack((warp_zero, warp_zero, warp_zero))# Recast the x and y points into usable format for cv2.fillPoly()pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])pts = np.hstack((pts_left, pts_right))# Draw the lane onto the warped blank imagecv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))# Warp the blank back to original image space using inverse perspective matrix (Minv)newwarp = cv2.warpPerspective(color_warp, Minv, (undist.shape[1], undist.shape[0]))# Combine the result with the original imageresult = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)return resultdef draw_text(image, curverad, offset):result = np.copy(image)font = cv2.FONT_HERSHEY_SIMPLEX  # 使用默认字体text = 'Curve Radius : ' + '{:04.2f}'.format(curverad) + 'm'cv2.putText(result, text, (20, 50), font, 1.2, (255, 255, 255), 2)if offset > 0:text = 'right of center : ' + '{:04.3f}'.format(abs(offset)) + 'm 'else:text = 'left  of center : ' + '{:04.3f}'.format(abs(offset)) + 'm 'cv2.putText(result, text, (20, 100), font, 1.2, (255, 255, 255), 2)return resultif __name__ == "__main__":nx = 9ny = 6# Step 1 获取畸变参数rets, mtx, dist, rvecs, tvecs = getCameraCalibrationCoefficients('camera_cal/calibration*.jpg', nx, ny)# 读取图片test_distort_image = cv2.imread('test_img/test3.jpg')# Step 2 畸变修正test_undistort_image = undistortImage(test_distort_image, mtx, dist)# Step 3 透视变换# “不断调整src和dst的值,确保在直线道路上,能够调试出满意的透视变换图像”# 左图梯形区域的四个端点src = np.float32([[580, 440], [700, 440], [1100, 720], [200, 720]])# 右图矩形区域的四个端点dst = np.float32([[300, 0], [950, 0], [950, 720], [300, 720]])# 变换,得到变换后的结果图,类似俯视图test_warp_image, M, Minv = warpImage(test_undistort_image, src, dst)# Step 4 提取车道线# 提取1:sobel算子提取# min_thresh = 30# max_thresh = 100# Result_sobelAbs = absSobelThreshold(test_warp_image, 'x', min_thresh, max_thresh)# 提取2:hls方法提取,保留黄色和白色的车道线# min_thresh = 150# max_thresh = 255# Result_hlsS = hlsSSelect(test_warp_image, (min_thresh, max_thresh))# 提取3:hls方法提取,只保留白色的车道线# min_thresh = 215# max_thresh = 255# Result_hlsL = hlsLSelect(test_warp_image, (min_thresh, max_thresh))# 提取4:lab方法提取,只保留黄色的车道线# min_thresh = 195# max_thresh = 255# Result_labB = labBSelect(test_warp_image, (min_thresh, max_thresh))# 提取5:方法1-4结合# sx_binary = absSobelThreshold(test_warp_image, 'x', 30, 150)# hlsL_binary = hlsLSelect(test_warp_image)# labB_binary = labBSelect(test_warp_image)# combined_binary = np.zeros_like(sx_binary)# combined_binary[(hlsL_binary == 255) | (labB_binary == 255)] = 255#  提取6:只要求hsl与lab即可,结果与5一致,省略Sobel算子hlsL_binary = hlsLSelect(test_warp_image)labB_binary = labBSelect(test_warp_image)combined_line_img = np.zeros_like(hlsL_binary)combined_line_img[(hlsL_binary == 255) | (labB_binary == 255)] = 255# Step 5 矩形滑窗out_img, left_fit, right_fit, ploty = fit_polynomial(combined_line_img, nwindows=9, margin=80, minpix=40)# Step 6 跟踪车道线track_result, track_left_fit, track_right_fit, plotys,  = search_around_poly(combined_line_img, left_fit, right_fit)# Step 7 求取曲率与偏移量left_curverad, right_curverad, offset = measure_curvature_real(track_left_fit, track_right_fit, plotys)average_curverad = (left_curverad + right_curverad)/2# print(left_curverad, 'm', right_curverad, 'm', average_curverad, 'm')# print('offset : ', offset, 'm')# Step 8 逆投影到原图left_fitx = track_left_fit[0]*ploty**2 + track_left_fit[1]*ploty + track_left_fit[2]right_fitx = track_right_fit[0]*ploty**2 + track_right_fit[1]*ploty + track_right_fit[2]result = drawing(test_undistort_image, combined_line_img, test_warp_image, left_fitx, right_fitx)text_result = draw_text(result, average_curverad, offset)# 显示cv2.imshow('img_0', text_result)cv2.waitKey(0)cv2.destroyAllWindows()

逆投影到原图结果:

9、视频车道线检测

import cv2
import glob
import numpy as np
import matplotlib.pyplot as plt# Step 1 读入图片、预处理图片、检测交点、标定相机的一系列操作
#################################################################
def getCameraCalibrationCoefficients(chessboardname, nx, ny):# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)objp = np.zeros((ny * nx, 3), np.float32)objp[:, :2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2)# Arrays to store object points and image points from all the images.objpoints = []  # 3d points in real world spaceimgpoints = []  # 2d points in image plane.images = glob.glob(chessboardname)if len(images) > 0:print("images num for calibration : ", len(images))else:print("No image for calibration.")returnret_count = 0for idx, fname in enumerate(images):img = cv2.imread(fname)gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)img_size = (img.shape[1], img.shape[0])# Finde the chessboard cornersret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)# If found, add object points, image pointsif ret == True:ret_count += 1objpoints.append(objp)imgpoints.append(corners)ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)print('Do calibration successfully')return ret, mtx, dist, rvecs, tvecs# Step 2 传入计算得到的畸变参数,即可将畸变的图像进行畸变修正处理
def undistortImage(distortImage, mtx, dist):return cv2.undistort(distortImage, mtx, dist, None, mtx)# Step 3 透视变换 : Warp image based on src_points and dst_points
#################################################################
# The type of src_points & dst_points should be like
# np.float32([ [0,0], [100,200], [200, 300], [300,400]])
def warpImage(image, src_points, dst_points):image_size = (image.shape[1], image.shape[0])# rows = img.shape[0] 720# cols = img.shape[1] 1280M = cv2.getPerspectiveTransform(src_points, dst_points)Minv = cv2.getPerspectiveTransform(dst_points, src_points)warped_image = cv2.warpPerspective(image, M, image_size, flags=cv2.INTER_LINEAR)return warped_image, M, Minv# Step 4 : Create a thresholded binary image
#################################################################
# 亮度划分函数
def hlsLSelect(img, thresh=(220, 255)):hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)l_channel = hls[:, :, 1]l_channel = l_channel*(255/np.max(l_channel))binary_output = np.zeros_like(l_channel)binary_output[(l_channel > thresh[0]) & (l_channel <= thresh[1])] = 255return binary_output# 亮度划分函数
def hlsSSelect(img, thresh=(125, 255)):hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)s_channel = hls[:, :, 2]s_channel = s_channel*(255/np.max(s_channel))binary_output = np.zeros_like(s_channel)binary_output[(s_channel > thresh[0]) & (s_channel <= thresh[1])] = 255return binary_outputdef dirThreshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):# 1) Convert to grayscalegray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)# 2) Take the gradient in x and y separatelysobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)# 3) Take the absolute value of the x and y gradientsabs_sobelx = np.absolute(sobelx)abs_sobely = np.absolute(sobely)# 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradientdirection_sobelxy = np.arctan2(abs_sobely, abs_sobelx)# 5) Create a binary mask where direction thresholds are metbinary_output = np.zeros_like(direction_sobelxy)binary_output[(direction_sobelxy >= thresh[0]) & (direction_sobelxy <= thresh[1])] = 1# 6) Return the binary imagereturn binary_outputdef magThreshold(img, sobel_kernel=3, mag_thresh=(0, 255)):# Apply the following steps to img# 1) Convert to grayscalegray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)# 2) Take the gradient in x and y separatelysobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, sobel_kernel)sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, sobel_kernel)# 3) Calculate the magnitude# abs_sobelx = np.absolute(sobelx)# abs_sobely = np.absolute(sobely)abs_sobelxy = np.sqrt(sobelx * sobelx + sobely * sobely)# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8scaled_sobelxy = np.uint8(abs_sobelxy/np.max(abs_sobelxy) * 255)# 5) Create a binary mask where mag thresholds are metbinary_output = np.zeros_like(scaled_sobelxy)binary_output[(scaled_sobelxy >= mag_thresh[0]) & (scaled_sobelxy <= mag_thresh[1])] = 1# 6) Return this mask as your binary_output imagereturn binary_outputdef absSobelThreshold(img, orient='x', thresh_min=0, thresh_max=255):# Convert to grayscalegray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)# Apply x or y gradient with the OpenCV Sobel() function# and take the absolute valueif orient == 'x':abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0))if orient == 'y':abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1))# Rescale back to 8 bit integerscaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))# Create a copy and apply the threshold# binary_output = np.zeros_like(scaled_sobel)# Here I'm using inclusive (>=, <=) thresholds, but exclusive is ok too# binary_output[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1# Return the resultreturn scaled_sobel# Lab蓝黄通道划分函数
def labBSelect(img, thresh=(215, 255)):# 1) Convert to LAB color spacelab = cv2.cvtColor(img, cv2.COLOR_BGR2Lab)lab_b = lab[:, :, 2]# don't normalize if there are no yellows in the imageif np.max(lab_b) > 100:lab_b = lab_b*(255/np.max(lab_b))# 2) Apply a threshold to the L channelbinary_output = np.zeros_like(lab_b)binary_output[((lab_b > thresh[0]) & (lab_b <= thresh[1]))] = 255# 3) Return a binary image of threshold resultreturn binary_output# Step 5 : 矩形滑窗 Detect lane lines through moving window
# Step 5 : Detect lane lines through moving window
#################################################################
def find_lane_pixels(binary_warped, nwindows, margin, minpix):# Take a histogram of the bottom half of the imagehistogram = np.sum(binary_warped[binary_warped.shape[0]//2:, :], axis=0)# Create an output image to draw on and visualize the resultout_img = np.dstack((binary_warped, binary_warped, binary_warped))# Find the peak of the left and right halves of the histogram# These will be the starting point for the left and right linesmidpoint = np.int(histogram.shape[0]//2)leftx_base = np.argmax(histogram[:midpoint])rightx_base = np.argmax(histogram[midpoint:]) + midpoint# Set height of windows - based on nwindows above and image shapewindow_height = np.int(binary_warped.shape[0]//nwindows)# Identify the x and y positions of all nonzero pixels in the imagenonzero = binary_warped.nonzero()nonzeroy = np.array(nonzero[0])nonzerox = np.array(nonzero[1])# Current positions to be updated later for each window in nwindowsleftx_current = leftx_baserightx_current = rightx_base# Create empty lists to receive left and right lane pixel indicesleft_lane_inds = []right_lane_inds = []# Step through the windows one by onefor window in range(nwindows):# Identify window boundaries in x and y (and right and left)win_y_low = binary_warped.shape[0] - (window+1)*window_heightwin_y_high = binary_warped.shape[0] - window*window_heightwin_xleft_low = leftx_current - marginwin_xleft_high = leftx_current + marginwin_xright_low = rightx_current - marginwin_xright_high = rightx_current + margin# Draw the windows on the visualization imagecv2.rectangle(out_img, (win_xleft_low, win_y_low),(win_xleft_high, win_y_high), (0, 255, 0), 2)cv2.rectangle(out_img, (win_xright_low, win_y_low),(win_xright_high, win_y_high), (0, 255, 0), 2)# Identify the nonzero pixels in x and y within the window #good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]# Append these indices to the listsleft_lane_inds.append(good_left_inds)right_lane_inds.append(good_right_inds)# If you found > minpix pixels, recenter next window on their mean positionif len(good_left_inds) > minpix:leftx_current = np.int(np.mean(nonzerox[good_left_inds]))if len(good_right_inds) > minpix:rightx_current = np.int(np.mean(nonzerox[good_right_inds]))# Concatenate the arrays of indices (previously was a list of lists of pixels)try:left_lane_inds = np.concatenate(left_lane_inds)right_lane_inds = np.concatenate(right_lane_inds)except ValueError:# Avoids an error if the above is not implemented fullypass# Extract left and right line pixel positionsleftx = nonzerox[left_lane_inds]lefty = nonzeroy[left_lane_inds]rightx = nonzerox[right_lane_inds]righty = nonzeroy[right_lane_inds]return leftx, lefty, rightx, righty, out_imgdef fit_polynomial(binary_warped, nwindows=9, margin=100, minpix=50):# Find our lane pixels firstleftx, lefty, rightx, righty, out_img = find_lane_pixels(binary_warped, nwindows, margin, minpix)# Fit a second order polynomial to each using `np.polyfit`left_fit = np.polyfit(lefty, leftx, 2)right_fit = np.polyfit(righty, rightx, 2)# Generate x and y values for plottingploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0])try:left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]except TypeError:# Avoids an error if `left` and `right_fit` are still none or incorrectprint('The function failed to fit a line!')left_fitx = 1*ploty**2 + 1*plotyright_fitx = 1*ploty**2 + 1*ploty# Visualization ## Colors in the left and right lane regionsout_img[lefty, leftx] = [255, 0, 0]out_img[righty, rightx] = [0, 0, 255]# Plots the left and right polynomials on the lane lines# plt.plot(left_fitx, ploty, color='yellow')# plt.plot(right_fitx, ploty, color='yellow')return out_img, left_fit, right_fit, ploty# Step 6 : Track lane lines based latest lane line result
#################################################################
def fit_poly(img_shape, leftx, lefty, rightx, righty):# ## TO-DO: Fit a second order polynomial to each with np.polyfit() ###left_fit = np.polyfit(lefty, leftx, 2)right_fit = np.polyfit(righty, rightx, 2)# Generate x and y values for plottingploty = np.linspace(0, img_shape[0]-1, img_shape[0])# ## TO-DO: Calc both polynomials using ploty, left_fit and right_fit ###left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]return left_fitx, right_fitx, ploty, left_fit, right_fitdef search_around_poly(binary_warped, left_fit, right_fit):# HYPERPARAMETER# Choose the width of the margin around the previous polynomial to search# The quiz grader expects 100 here, but feel free to tune on your own!margin = 60# Grab activated pixelsnonzero = binary_warped.nonzero()nonzeroy = np.array(nonzero[0])nonzerox = np.array(nonzero[1])# ## TO-DO: Set the area of search based on activated x-values #### ## within the +/- margin of our polynomial function #### ## Hint: consider the window areas for the similarly named variables #### ## in the previous quiz, but change the windows to our new search area ###left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy +left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) +left_fit[1]*nonzeroy + left_fit[2] + margin)))right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy +right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) +right_fit[1]*nonzeroy + right_fit[2] + margin)))# Again, extract left and right line pixel positionsleftx = nonzerox[left_lane_inds]lefty = nonzeroy[left_lane_inds]rightx = nonzerox[right_lane_inds]righty = nonzeroy[right_lane_inds]# Fit new polynomialsleft_fitx, right_fitx, ploty, left_fit, right_fit = fit_poly(binary_warped.shape, leftx, lefty, rightx, righty)# # Visualization # ## Create an image to draw on and an image to show the selection windowout_img = np.dstack((binary_warped, binary_warped, binary_warped))*255window_img = np.zeros_like(out_img)# Color in left and right line pixelsout_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]# 根据每条拟合曲线偏移得到左右两条拟合曲线,随后进行填充 right_line_pts维度为(1,1440,2)# Generate a polygon to illustrate the search window area# And recast the x and y points into usable format for cv2.fillPoly()left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin,ploty])))])left_line_pts = np.hstack((left_line_window1, left_line_window2))right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin,ploty])))])right_line_pts = np.hstack((right_line_window1, right_line_window2))# 将曲线围成的区域画出来,window_img为绿色阴影cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)# 绘制拟合曲线 Plot the polynomial lines onto the image# plt.plot(left_fitx, ploty, color='yellow')# plt.plot(right_fitx, ploty, color='yellow')# plt.show()# # End visualization steps # #return result, left_fit, right_fit, ploty# Step 7 : 曲率与偏移量计算
#################################################################
def measure_curvature_real(left_fit_cr, right_fit_cr, ploty,ym_per_pix=30/720, xm_per_pix=3.7/700):'''Calculates the curvature of polynomial functions in meters.'''# Define y-value where we want radius of curvature# We'll choose the maximum y-value, corresponding to the bottom of the imagey_eval = np.max(ploty)# Calculation of R_curve (radius of curvature)left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])left_position = left_fit_cr[0]*720 + left_fit_cr[1]*720 + left_fit_cr[2]right_position = right_fit_cr[0]*720 + right_fit_cr[1]*720 + right_fit_cr[2]midpoint = 1280/2lane_center =(left_position + right_position)/2offset = (midpoint - lane_center) * xm_per_pixreturn left_curverad, right_curverad, offset# Step 8 : Draw lane line result on undistorted image
#################################################################
# 不同于step7的左右线2个填充,这里只有一个填充,并逆投影到原图
def drawing(undist, bin_warped, color_warp, left_fitx, right_fitx):# Create an image to draw the lines onwarp_zero = np.zeros_like(bin_warped).astype(np.uint8)color_warp = np.dstack((warp_zero, warp_zero, warp_zero))# Recast the x and y points into usable format for cv2.fillPoly()pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])pts = np.hstack((pts_left, pts_right))# Draw the lane onto the warped blank imagecv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))# Warp the blank back to original image space using inverse perspective matrix (Minv)newwarp = cv2.warpPerspective(color_warp, Minv, (undist.shape[1], undist.shape[0]))# Combine the result with the original imageresult = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)return resultdef draw_text(image, curverad, offset):result = np.copy(image)font = cv2.FONT_HERSHEY_SIMPLEX  # 使用默认字体text = 'Curve Radius : ' + '{:04.2f}'.format(curverad) + 'm'cv2.putText(result, text, (20, 50), font, 1.2, (255, 255, 255), 2)if offset > 0:text = 'right of center : ' + '{:04.3f}'.format(abs(offset)) + 'm 'else:text = 'left  of center : ' + '{:04.3f}'.format(abs(offset)) + 'm 'cv2.putText(result, text, (20, 100), font, 1.2, (255, 255, 255), 2)return resultif __name__ == "__main__":nx = 9ny = 6# Step 1 获取畸变参数rets, mtx, dist, rvecs, tvecs = getCameraCalibrationCoefficients('camera_cal/calibration*.jpg', nx, ny)# 读取视频cap = cv2.VideoCapture("solidYellowLeft.mp4")ret, frame = cap.read()# 结果写入视频out = cv2.VideoWriter('output1.mp4', cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), 25, (frame.shape[1], frame.shape[0]))while ret:test_distort_image = frame# Step 2 畸变修正test_undistort_image = undistortImage(test_distort_image, mtx, dist)# Step 3 透视变换# “不断调整src和dst的值,确保在直线道路上,能够调试出满意的透视变换图像”# 左图梯形区域的四个端点src = np.float32([[580, 440], [700, 440], [1100, 720], [200, 720]])# 右图矩形区域的四个端点dst = np.float32([[300, 0], [950, 0], [950, 720], [300, 720]])# 变换,得到变换后的结果图,类似俯视图test_warp_image, M, Minv = warpImage(test_undistort_image, src, dst)# Step 4 提取车道线# 提取1:sobel算子提取# min_thresh = 30# max_thresh = 100# Result_sobelAbs = absSobelThreshold(test_warp_image, 'x', min_thresh, max_thresh)# 提取2:hls方法提取,保留黄色和白色的车道线# min_thresh = 150# max_thresh = 255# Result_hlsS = hlsSSelect(test_warp_image, (min_thresh, max_thresh))# 提取3:hls方法提取,只保留白色的车道线# min_thresh = 215# max_thresh = 255# Result_hlsL = hlsLSelect(test_warp_image, (min_thresh, max_thresh))# 提取4:lab方法提取,只保留黄色的车道线# min_thresh = 195# max_thresh = 255# Result_labB = labBSelect(test_warp_image, (min_thresh, max_thresh))# 提取5:方法1-4结合# sx_binary = absSobelThreshold(test_warp_image, 'x', 30, 150)# hlsL_binary = hlsLSelect(test_warp_image)# labB_binary = labBSelect(test_warp_image)# combined_binary = np.zeros_like(sx_binary)# combined_binary[(hlsL_binary == 255) | (labB_binary == 255)] = 255#  提取6:只要求hsl与lab即可,结果与5一致,省略Sobel算子hlsL_binary = hlsLSelect(test_warp_image)labB_binary = labBSelect(test_warp_image)combined_line_img = np.zeros_like(hlsL_binary)combined_line_img[(hlsL_binary == 255) | (labB_binary == 255)] = 255# Step 5 矩形滑窗out_img, left_fit, right_fit, ploty = fit_polynomial(combined_line_img, nwindows=9, margin=80, minpix=40)# Step 6 跟踪车道线track_result, track_left_fit, track_right_fit, plotys,  = search_around_poly(combined_line_img, left_fit, right_fit)# Step 7 求取曲率与偏移量left_curverad, right_curverad, offset = measure_curvature_real(track_left_fit, track_right_fit, plotys)average_curverad = (left_curverad + right_curverad)/2# print(left_curverad, 'm', right_curverad, 'm', average_curverad, 'm')# print('offset : ', offset, 'm')# Step 8 逆投影到原图left_fitx = track_left_fit[0]*ploty**2 + track_left_fit[1]*ploty + track_left_fit[2]right_fitx = track_right_fit[0]*ploty**2 + track_right_fit[1]*ploty + track_right_fit[2]result = drawing(test_undistort_image, combined_line_img, test_warp_image, left_fitx, right_fitx)text_result = draw_text(result, average_curverad, offset)# 显示cv2.imshow('img_0', text_result)cv2.waitKey(1)# 将处理后的帧数写到视频out.write(text_result)# 播放结束跳出循环ret, frame = cap.read()cap.release()cv2.destroyAllWindows()

使用心得:
可以对弯道车道线进行很好的检测,但鲁棒性较差,需要进行调参和重新标定,特别是对透视变换过程中的参数调节,需要不断进行尝试。

以上源代码提取链接:
链接:https://pan.baidu.com/s/12w0Q7SvUGb744gZyP8-ugg
提取码:wqu8

参考资料:
https://zhuanlan.zhihu.com/p/54866418

基于opencv-python的车道线检测(高级)相关推荐

  1. 基于opencv+python的车道检测技术

    针对自动驾驶系统的实际需求,我们需要一种能够兼顾速度.不同环境下高检测成功率和鲁棒性的车道识别算法.首先,因为停车场更多的是地下光线昏暗的情况需要提高车道线与周围环境的对比度,然后灰度化图像进行模糊降 ...

  2. 基于OpenCV的实时车道线分割&车道保持系统(源码&教程)

    1.研究背景 汽车主动安全系统能够实现风险的主动预防和规避,其能有力缓解当前我国汽车交通事故频发的困境,故对其的相关研究得到了国家的大力支持. 车道保持辅助系统(LKAS,Lane Keeping A ...

  3. 无人驾驶汽车系统入门(七)——基于传统计算机视觉的车道线检测(2)

    无人驾驶汽车系统入门(七)--基于传统计算机视觉的车道线检测(2) 原创不易,转载请注明来源:http://blog.csdn.net/adamshan/article/details/7873330 ...

  4. 无人驾驶汽车系统入门(六)——基于传统计算机视觉的车道线检测(1)

    无人驾驶汽车系统入门(六)--基于传统计算机视觉的车道线检测(1) 感知,作为无人驾驶汽车系统中的"眼睛",是目前无人驾驶汽车量产和商用化的最大障碍之一(技术角度), 目前,高等级 ...

  5. 空间中的语义直线检测_基于语义分割的车道线检测算法研究

    龙源期刊网 http://www.qikan.com.cn 基于语义分割的车道线检测算法研究 作者:张道芳 张儒良 来源:<科技创新与应用> 2019 年第 06 期 摘 ; 要:随着半自 ...

  6. matlab与逆透视变换,基于逆透视变换的车道线检测方法与流程

    本发明属计算机视觉.图形处理技术领域,具体涉及一种通过照度不变及逆透视变换进行车道线快速鲁棒检测的方法. 背景技术: 车道线检测作为自动驾驶技术的第一个环节,能够有效感知车辆周围世界,并获取可行驶区域 ...

  7. 基于python的车道线检测

    最近在开源社区下载了一份使用opencv在python环境中实现车道线检测的代码,研究了一下,终于有点儿看懂了,寻思着写下来,免得以后忘记了. 这个车道线检测项目原本是优达学城里无人驾驶课程中的第一个 ...

  8. 基于Spatial CNN的车道线检测和交通场景理解

    SCNN车道线检测--(SCNN)Spatial As Deep: Spatial CNN for Traffic Scene Understanding(论文解读) Spatial As Deep: ...

  9. 毕业设计-基于机器视觉道路视频车道线检测识别

    目录 前言 课题背景和意义 实现技术思路 摄像机校准 ​编辑 透视变换 车道像素查找 识别车道面积 实现效果图样例 前言

  10. 基于深度强化学习的车道线检测和定位(Deep reinforcement learning based lane detection and localization) 论文解读+代码复现

    之前读过这篇论文,导师说要复现,这里记录一下.废话不多说,再重读一下论文. 注:非一字一句翻译.个人理解,一定偏颇. 基于深度强化学习的车道检测和定位 官方源码下载:https://github.co ...

最新文章

  1. 【C++】【五】循环链表
  2. 美多商城之支付(评价订单商品)
  3. 浅谈C++中的友元关系
  4. Objective-c 异步发送Post请求的工具类
  5. ORACLE EXPDP命令使用详细
  6. 一个能描述erp系统的小故事。
  7. matlab白噪声模块,matlab白噪声实现
  8. JS函数运行在它们被定义的作用域内,而不是它们被执行的作用域内
  9. Qt的QStyle类的标准图标汇总
  10. java jsp 传递参数的方法,jsp传参方法小结
  11. 关于在node.js 中使用formData 发送axios上传文件失败解决方案
  12. 《深入理解 Spring Cloud 与微服务构建》第三章 Spring Cloud
  13. 【redis 学习系列08】Redis小功能大用处02 Pipeline、事务与Lua
  14. 【读书笔记】log_archive_dest_n参数描述
  15. CSS属性简写规则详解
  16. 2022年,想自学java有没有一个详细的框架?
  17. 未来改变世界的十大新技术
  18. python 嵌入式webserver 服务器 状态监控
  19. 守望先锋:地图工坊FOR,IF,WHILE分享
  20. 物料分拣系统matlab仿真,自动物料分拣机器人控制系统设计+Matlab源程序+图纸

热门文章

  1. VBA 为什么你redim() 动态二维数组总出错?因为 redim 动态数组不太适合和循环搭配
  2. php wps导入数据库,wps excel表格怎么导入数据库-如何把excel表格导入wps
  3. Pycharm2018永久破解方法
  4. 这个emoji表情可使iphone变砖
  5. 数据结构与算法基础(王卓)(22):哈夫曼树
  6. hdu 杭电1429 胜利大逃亡(续)
  7. RESTful API设计简介
  8. 5口千兆工业以太网交换机宽温导轨式二层非网管全千兆工业级交换机
  9. 关于数据恢复,记一次修复SD卡 RAW 之后的修复过程
  10. 安卓默认打开指定apk的无障碍权限