opencv-085-视频分析(移动对象的KLT光流跟踪算法_删除静止点与绘制跟踪轨迹)

知识点

在84的知识点分享中我们已经可以跟踪到前后两帧之前的位置移动,但是这个还不足够,我们需要绘制移动对象从初始到最终的完整可以检测的运动轨迹,同时对一些静止的角点进行删除,所以我们需要对状态为1的角点,计算它们之间的距离,只有dx+dy>2(dx=abs(p1.x –p2.x), dy=abs(p1.y-p2.y))的我们才对它进行保留跟踪。

流程

代码(c++,python)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
#include <iostream>
#include <opencv2/opencv.hpp>

using namespace std;
using namespace cv;

vector<Point2f> featurePoints;
vector<Scalar> color_lut;
RNG rng(12345);

void draw_lines(Mat &image, vector<Point2f> pt1, vector<Point2f> pt2);

/*
* 视频分析(移动对象的KLT光流跟踪算法_删除静止点与绘制跟踪轨迹)
*/
int main() {
VideoCapture capture("../images/vtest.avi");
if (!capture.isOpened()) {
cout << "could not open video..." << endl;
return -1;
}

// 角点检测参数
double qualityLevel = 0.01;
int minDistance = 10;
int maxCorners = 100;

// KLT光流跟踪参数
vector<Point2f> pts[2];
vector<uchar> status;
vector<float> err;
vector<Point2f> initPoints;

// 读取第一帧及其角点
Mat old_frame, old_gray;
capture.read(old_frame);
cvtColor(old_frame, old_gray, COLOR_BGR2GRAY);
goodFeaturesToTrack(old_gray, featurePoints, maxCorners,
qualityLevel, minDistance, Mat());

pts[0].insert(pts[0].end(), featurePoints.begin(), featurePoints.end());
initPoints.insert(initPoints.end(), featurePoints.begin(), featurePoints.end());
int width = capture.get(CAP_PROP_FRAME_WIDTH);
int height = capture.get(CAP_PROP_FRAME_HEIGHT);

VideoWriter writer("D:/test.mp4", VideoWriter::fourcc('D', 'I', 'V', 'X'),
10, Size(width * 2, height), true);
Mat result = Mat::zeros(Size(width * 2, height), CV_8UC3);
Rect roi(0, 0, width, height);

Mat gray, frame;

while (true) {
bool ret = capture.read(frame);
if (!ret) break;
imshow("frame", frame);
roi.x = 0;
frame.copyTo(result(roi));
cvtColor(frame, gray, COLOR_BGR2GRAY);

// 计算光流
calcOpticalFlowPyrLK(old_gray, gray, pts[0], pts[1],
status, err, Size(31, 31));
size_t i, k;
for (int i = k = 0; i < pts[1].size(); ++i) {
// 距离与状态测量,删除静止点
double dist = abs(pts[0][i].x - pts[1][i].x) +
abs(pts[0][i].y - pts[1][i].y);
if (status[i] && dist > 2) {
pts[0][k] = pts[0][i];
initPoints[k] = initPoints[i];
pts[1][k++] = pts[1][i];
circle(frame, pts[1][i], 4, Scalar(0, 255, 0), -1);
}
}

// resize 有用特征点
pts[0].resize(k);
pts[1].resize(k);
initPoints.resize(k);

// 绘制跟踪轨迹
draw_lines(frame, initPoints, pts[1]);
imshow("result", frame);
roi.x = width;
frame.copyTo(result(roi));
char c = waitKey(50);
if (c == 27) break;

// 更新old
std::swap(pts[1], pts[0]);
cv::swap(old_gray, gray);

// 重新初始化角点
if (initPoints.size() < 40){
goodFeaturesToTrack(old_gray, featurePoints, maxCorners,
qualityLevel, minDistance, Mat());
pts[0].insert(pts[0].end(), featurePoints.begin(), featurePoints.end());
initPoints.insert(initPoints.end(), featurePoints.begin(), featurePoints.end());
}
writer.write(result);
}

return 0;
}

void draw_lines(Mat &image, vector<Point2f> pt1, vector<Point2f> pt2) {
if (color_lut.size() < pt1.size()){
for (size_t i = 0; i < pt1.size(); ++i) {
int b = rng.uniform(0, 255);
int g = rng.uniform(0, 255);
int r = rng.uniform(0, 255);
Scalar color(b, g, r);
color_lut.push_back(color);
}
}

for (size_t j = 0; j < pt1.size(); ++j) {
line(image, pt1[j], pt2[j], color_lut[j], 2);
}
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import numpy as np
import cv2 as cv
cap = cv.VideoCapture('D:\\code-workspace\\Clion-workspace\\learnOpencv\\images\\vtest.avi')

# 角点检测参数
feature_params = dict(maxCorners=100, qualityLevel=0.01, minDistance=10, blockSize=3)

# KLT光流参数
lk_params = dict(winSize=(31, 31), maxLevel=3, criteria=(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 30, 0.01))

# 随机颜色
color = np.random.randint(0,255,(100,3))

# 读取第一帧
ret, old_frame = cap.read()
old_gray = cv.cvtColor(old_frame, cv.COLOR_BGR2GRAY)
p0 = cv.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
initPoints = p0.copy()

# 光流跟踪
while True:
ret, frame = cap.read()
if ret is False:
break
cv.imshow('frame',frame)
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)

# 计算光流
p1, st, err = cv.calcOpticalFlowPyrLK(old_gray, gray, p0, None, **lk_params)

# 距离与状态测量,删除静止点
k=0
for i, (new, old) in enumerate(zip(p1,p0)):
a,b = new.ravel()
c,d = old.ravel()
dist = abs(a-c) + abs(b-d)
if st[i] == 1 and dist > 2:
p0[k] = p0[i]
initPoints[k] = initPoints[i]
p1[k] = p1[i]
k = k+1
frame = cv.circle(frame,(a,b),5,color[i].tolist(),-1)

# 取有用特征点
p0 = p0[:k]
p1 = p1[:k]
initPoints = initPoints[:k]

# 绘制跟踪线
for i, (old, new) in enumerate(zip(initPoints,p1)):
a,b = old.ravel()
c,d = new.ravel()
frame = cv.line(frame, (a,b),(c,d), (0,255,0), 2)
cv.imshow('result',frame)

k = cv.waitKey(30) & 0xff
if k == 27:
break

# 更新old
old_gray = gray.copy()
p0, _ = p1, p0

# 重新初始化角点
if len(initPoints) < 40:
p0 = cv.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
initPoints = p0.copy()

cv.destroyAllWindows()
cap.release()

结果

代码地址

github