您的位置:首页 > 编程语言 > Python开发

OpenCV3 Python语言实现 笔记6

2017-07-13 14:50 811 查看
目标跟踪

一、帧间差异 运动检测

import cv2
import numpy as np

camera = cv2.VideoCapture(0)

#获取常用的结构元素的形状:矩形(包括线形)、椭圆(包括圆形)及十字形
#MORPH_RECT, MORPH_ELLIPSE, MORPH_CROSS
es = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10))
kernel = np.ones((5,5),np.uint8)
background = None

while (True):
ret, frame = camera.read()
if background is None:
background = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
background = cv2.GaussianBlur(background, (21, 21), 0)
continue

gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray_frame = cv2.GaussianBlur(gray_frame, (21, 21), 0)
diff = cv2.absdiff(background, gray_frame)#查的绝对值
diff = cv2.threshold(diff, 25, 255, cv2.THRESH_BINARY)[1]
diff = cv2.dilate(diff, es, iterations = 2)
image, cnts, hierarchy = cv2.findContours(diff.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

for c in cnts:
if cv2.contourArea(c) < 1500:
continue
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 255, 0), 2)

cv2.imshow("contours", frame)
cv2.imshow("dif", diff)
if cv2.waitKey(1000 / 12) & 0xff == ord("q"):
break

cv2.destroyAllWindows()
camera.release()

二、帧间差异 背景分割

BackgroundSubtractor KNN MOG2 GMG

cv2.createBackgroundSubtractorKNN()

cv2.createBackgroundSubtractorMOG2()

cv2.createBackgroundSubtractorGMG()

import cv2
import numpy as np

bs = cv2.createBackgroundSubtractorKNN(detectShadows = True)#detectShadows阴影检测
camera = cv2.VideoCapture("movie.mpg")

while True:
ret, frame = camera.read()
fgmask = bs.apply(frame)#################
th = cv2.threshold(fgmask.copy(), 244, 255, cv2.THRESH_BINARY)[1]#图像
th = cv2.erode(th, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3)), iterations = 2)
dilated = cv2.dilate(th, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (8,3)), iterations = 2)
image, contours, hier = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
if cv2.contourArea(c) > 1000:
(x,y,w,h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x,y), (x+w, y+h), (255, 255, 0), 2)

cv2.imshow("mog", fgmask)
cv2.imshow("thresh", th)
cv2.imshow("diff", frame & cv2.cvtColor(fgmask, cv2.COLOR_GRAY2BGR))
cv2.imshow("detection", frame)
k = cv2.waitKey(30) & 0xff
if k == 27:
break

camera.release()
cv2.destroyAllWindows()

三、MeanShift

----------------------------------------------------------

cv2.inRange(src,lower,upper)

对src里每一行,任意一个数在lower和upper范围内,则为255,否则0

例:

src=np.array([[[ 0,  1,  2],[ 3,  4,  5],[ 6,  7,  8]],

              [[ 9, 10, 11],[12, 13, 14],[15, 16, 17]],

              [[18, 19, 20],[ 1,  2, 23],[ 4,  5, 26]]])

lower=np.array([ 0, 1, 2])

upper=np.array([18,19,20])

结果:

array([[255, 255, 255],

       [255, 255, 255],

       [255,   0,   0]], dtype=uint8)

----------------------------------------------------------

----------------------------------------------------------

彩色直方图

calcHist(images,channels,mask,histsize,ranges[, hist[, accumulate])

images 方括号括起来[images]

channels 通道 灰度图[0]

mask可选掩码 标记直方图中计数过的数组元素 即只计算掩码值不为0所对应的图像值

histsize表示每个维度下直方图数组的大小

range一个像素值上下界的数组[0,180]

accumulate 布尔值 直方图是否叠加

----------------------------------------------------------

----------------------------------------------------------

直方图反向投影:一幅图像等于模型图像(产生原始直方图的图像)的概率

cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)

----------------------------------------------------------

import numpy as np
import cv2

cap = cv2.VideoCapture(0)
# capture the first frame
ret,frame = cap.read()
# mark the ROI
r,h,c,w = 10, 200, 10, 200
# wrap in a tuple
track_window = (c,r,w,h)

# extract the ROI for tracking
roi = frame[r:r+h, c:c+w]
# switch to HSV
hsv_roi =  cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# create a mask with upper and lower boundaries of colors you want to track
mask = cv2.inRange(hsv_roi, np.array((100., 30.,32.)), np.array((180.,120.,255.)))
# calculate histograms of roi
roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)

# Setup the termination criteria, either 10 iteration or move by atleast 1 pt
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )

while(1):
ret ,frame = cap.read()

if ret == True:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv],[0],roi
aac1
_hist,[0,180],1)
print dst
# apply meanshift to get the new location
ret, track_window = cv2.meanShift(dst, track_window, term_crit)#term_crit停止条件

# Draw it on image
x,y,w,h = track_window
img2 = cv2.rectangle(frame, (x,y), (x+w,y+h), 255,2)
cv2.imshow('img2',img2)

k = cv2.waitKey(60) & 0xff
if k == 27:
break

else:
break

cv2.destroyAllWindows()
cap.release()

CAMShift

连续自适应均值漂移 调节跟踪窗口的尺寸

import numpy as np
import cv2

cap = cv2.VideoCapture(0)

# take first frame of the video
ret,frame = cap.read()

# setup initial location of window
r,h,c,w = 300,200,400,300  # simply hardcoded the values
track_window = (c,r,w,h)

roi = frame[r:r+h, c:c+w]
hsv_roi =  cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((100., 30.,32.)), np.array((180.,120.,255.)))
roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180])
cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX)
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )

while(1):
ret ,frame = cap.read()

if ret == True:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)
###################################################################
ret, track_window = cv2.CamShift(dst, track_window, term_crit)
pts = cv2.boxPoints(ret)#顶点坐标
pts = np.int0(pts)
img2 = cv2.polylines(frame,[pts],True, 255,2)#折线函数
####################################################################
cv2.imshow('img2',img2)
k = cv2.waitKey(60) & 0xff
if k == 27:
break

else:
break

cv2.destroyAllWindows()
cap.release()

四、卡尔曼滤波器

卡尔曼滤波算法:预测阶段,使用当前点计算的协方差来估计目标新位置

               更新阶段,记录目标位置,为下一次循环计算修正协方差

kalman = cv2.KalmanFilter(dynamParams,measureParams,controlParams = 0,type = CV_32F)

dynamParams:状态的维度

measureParams:测量的维度

controlParams:控制的维度

type:创建的矩阵类型

import cv2, numpy as np

measurements = []
predictions = []
frame = np.zeros((800, 800, 3), np.uint8)
last_measurement = current_measurement = np.array((2,1), np.float32)
last_prediction = current_prediction = np.zeros((2,1), np.float32)

def mousemove(event, x, y, s, p):
global frame, current_measurement, measurements, last_measurement, current_prediction, last_prediction
last_prediction = current_prediction
last_measurement = current_measurement
current_measurement = np.array([[np.float32(x)],[np.float32(y)]])
kalman.correct(current_measurement)###
current_prediction = kalman.predict()###
lmx, lmy = last_measurement[0], last_measurement[1]
cmx, cmy = current_measurement[0], current_measurement[1]
lpx, lpy = last_prediction[0], last_prediction[1]
cpx, cpy = current_prediction[0], current_prediction[1]
cv2.line(frame, (lmx, lmy), (cmx, cmy), (0,100,0))
cv2.line(frame, (lpx, lpy), (cpx, cpy), (0,0,200))

cv2.namedWindow("kalman_tracker")
cv2.setMouseCallback("kalman_tracker", mousemove);

kalman = cv2.KalmanFilter(4,2,1)
kalman.measurementMatrix = np.array([[1,0,0,0],[0,1,0,0]],np.float32)
kalman.transitionMatrix = np.array([[1,0,1,0],[0,1,0,1],[0,0,1,0],[0,0,0,1]],np.float32)
kalman.processNoiseCov = np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]],np.float32) * 0.03

while True:
cv2.imshow("kalman_tracker", frame)
if (cv2.waitKey(30) & 0xFF) == 27:
break
if (cv2.waitKey(30) & 0xFF) == ord('q'):
cv2.imwrite('kalman.jpg', frame)
break

cv2.destroyAllWindows()
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: