opencv-084-视频分析(移动对象的KLT光流跟踪算法)

知识点

光流跟踪方法分为稠密光流跟踪与稀疏光流跟踪算法,KLT是稀疏光流跟踪算法,这个算法最早是由Bruce D. Lucas and Takeo Kanade两位作者提出来的,所以又被称为KLT。稀疏光流算法工作有三个假设前提条件:

  • 亮度恒定
  • 短距离移动
  • 空间一致性

API

1
2
3
4
5
6
7
8
9
10
11
12
13
void cv::calcOpticalFlowPyrLK(
InputArray prevImg, // 前一帧图像
InputArray nextImg, // 后一帧图像
InputArray prevPts, // 前一帧的稀疏光流点
InputOutputArray nextPts, // 后一帧光流点
OutputArray status, // 输出状态,1 表示正常该点保留,否则丢弃
OutputArray err, // 表示错误
Size winSize = Size(21, 21), // 光流法对象窗口大小
int maxLevel = 3, // 金字塔层数,0表示只检测当前图像,不构建金字塔图像
TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01), // 窗口搜索时候停止条件
int flags = 0, // 操作标志
double minEigThreshold = 1e-4 // 最小特征值响应,低于最小值不做处理
)

代码(c++,python)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
#include <iostream>
#include <opencv2/opencv.hpp>

using namespace std;
using namespace cv;

vector<Point2f> featurePoints;
RNG rng(12345);

/*
* 视频分析(移动对象的KLT光流跟踪算法)
*/
int main() {
VideoCapture capture("../images/vtest.avi");
if (!capture.isOpened()) {
cout << "could not open video..." << endl;
return -1;
}

// 角点检测参数
double qualityLevel = 0.01;
int minDistance = 10;
int maxCorners = 100;

// KLT光流跟踪参数
vector<Point2f> pts[2];
vector<uchar> status;
vector<float> err;

// 读取第一帧及其角点
Mat old_frame, old_gray;
capture.read(old_frame);
cvtColor(old_frame, old_gray, COLOR_BGR2GRAY);
goodFeaturesToTrack(old_gray, featurePoints, maxCorners,
qualityLevel, minDistance, Mat());

pts[0].insert(pts[0].end(), featurePoints.begin(), featurePoints.end());
int width = capture.get(CAP_PROP_FRAME_WIDTH);
int height = capture.get(CAP_PROP_FRAME_HEIGHT);
Rect roi(0, 0, width, height);

Mat gray, frame;

while (true) {
bool ret = capture.read(frame);
if (!ret) break;
imshow("frame", frame);
roi.x = 0;
cvtColor(frame, gray, COLOR_BGR2GRAY);

// 计算光流
calcOpticalFlowPyrLK(old_gray, gray, pts[0], pts[1],
status, err, Size(31, 31));
size_t i, k;
for (int i = k = 0; i < pts[1].size(); ++i) {
// 根据状态选择
if (status[i]){
pts[0][k] = pts[0][i];
pts[1][k++] = pts[1][i];
int b = rng.uniform(0, 256);
int g = rng.uniform(0, 256);
int r = rng.uniform(0, 256);
Scalar color(b, g, r);
// 绘制跟踪线
circle(frame, pts[1][i], 4, color, -1);
line(frame, pts[0][i], pts[1][i], color, 2);
}
}

// resize 有用特征点
pts[0].resize(k);
pts[1].resize(k);
imshow("result", frame);
roi.x = width;
char c = waitKey(50);
if (c == 27) break;

// 更新old
std::swap(pts[1], pts[0]);
cv::swap(old_gray, gray);
}

return 0;
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import numpy as np
import cv2 as cv
cap = cv.VideoCapture('D:/images/video/vtest.avi')

# 角点检测参数
feature_params = dict(maxCorners=100, qualityLevel=0.01, minDistance=10, blockSize=3)

# KLT光流参数
lk_params = dict(winSize=(31, 31), maxLevel=3, criteria=(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 30, 0.01))

# 随机颜色
color = np.random.randint(0,255,(100,3))

# 读取第一帧
ret, old_frame = cap.read()
old_gray = cv.cvtColor(old_frame, cv.COLOR_BGR2GRAY)
p0 = cv.goodFeaturesToTrack(old_gray, mask=None, **feature_params)

# 光流跟踪
while True:
ret, frame = cap.read()
if ret is False:
break
frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
# 计算光流
p1, st, err = cv.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# 根据状态选择
good_new = p1[st == 1]
good_old = p0[st == 1]

# 绘制跟踪线
for i, (new, old) in enumerate(zip(good_new,good_old)):
a,b = new.ravel()
c,d = old.ravel()
frame = cv.line(frame, (a,b),(c,d), color[i].tolist(), 2)
frame = cv.circle(frame,(a,b),5,color[i].tolist(),-1)
cv.imshow('frame',frame)
k = cv.waitKey(30) & 0xff
if k == 27:
break

# 更新
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1, 1, 2)

cv.destroyAllWindows()
cap.release()

结果

代码地址

github