差值法跟踪移动物体

差值法原理

有够草率

计算差值绝对值函数

示例

#include <opencv2/opencv.hpp>
#include<iostream>

using namespace cv;
using namespace std;

int main() 
{
    //加载视频文件,并判断是否加载成功
    VideoCapture capture("bike.avi");
    if (!capture.isOpened()) {
        cout<<"请确认视频文件是否正确"<<endl;
        return -1;
    }

    //输出视频相关信息
    int fps = capture.get(CAP_PROP_FPS);
    int width = capture.get(CAP_PROP_FRAME_WIDTH);
    int height = capture.get(CAP_PROP_FRAME_HEIGHT);
    int num_of_frames = capture.get(CAP_PROP_FRAME_COUNT);
    cout << "视频宽度:" << width << " 视频高度:" << height << " 视频帧率:" << fps << " 视频总帧数" << num_of_frames << endl;

    //读取视频中第一帧图像作为前一帧图像,并进行灰度化
    Mat preFrame, preGray;
    capture.read(preFrame);
    cvtColor(preFrame, preGray, COLOR_BGR2GRAY);
    //对图像进行高斯滤波,减少噪声干扰
    GaussianBlur(preGray, preGray, Size(0, 0), 15);

    Mat binary;
    Mat frame, gray;
    //形态学操作的矩形模板
    Mat k = getStructuringElement(MORPH_RECT, Size(7, 7), Point(-1, -1));

    while (true) 
    {
        //视频中所有图像处理完后推出循环
        if (!capture.read(frame))
        {
            break;
        }

        //对当前帧进行灰度化
        cvtColor(frame, gray, COLOR_BGR2GRAY);
        GaussianBlur(gray, gray, Size(0, 0), 15);

        //计算当前帧与前一帧的差值的绝对值
        absdiff(gray, preGray, binary);
        
        //对计算结果二值化并进行开运算,减少噪声的干扰
        threshold(binary, binary, 10, 255, THRESH_BINARY | THRESH_OTSU);
        morphologyEx(binary, binary, MORPH_OPEN, k);

        //显示处理结果
        imshow("input", frame);
        imshow("result", binary);
        
        //将当前帧变成前一帧,准备下一个循环,注释掉这句话为固定背景
        //gray.copyTo(preGray);

        //5毫秒延时判断是否推出程序,按ESC键退出
        char c = waitKey(5);
        if (c == 27) 
        {
            break;
        }
    }

    waitKey(0);
    return 0;
}

稠密光流法跟踪

光流法原理

稠密光流法函数

光流图像是每个像素对应x和对应y的速度 示例

#include <opencv2/opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main(int argc, char** argv)
{
    VideoCapture capture("vtest.avi");
    Mat prevFrame, prevGray;
    if (!capture.read(prevFrame))
    {
        cout << "请确认视频文件名称是否正确" << endl;
        return -1;
    }

    //将彩色图像转换成灰度图像
    cvtColor(prevFrame, prevGray, COLOR_BGR2GRAY);

    while (true)
    {
        Mat nextFrame, nextGray;
        //所有图像处理完成后推出程序
        if (!capture.read(nextFrame))
        {
            break;
        }
        imshow("视频图像", nextFrame);

        //计算稠密光流
        cvtColor(nextFrame, nextGray, COLOR_BGR2GRAY);
        Mat_<Point2f> flow;  //两个方向的运动速度
        calcOpticalFlowFarneback(prevGray, nextGray, flow, 0.5, 3, 15, 3, 5, 1.2, 0);

        Mat xV = Mat::zeros(prevFrame.size(), CV_32FC1);  //x方向移动速度
        Mat yV = Mat::zeros(prevFrame.size(), CV_32FC1);  //y方向移动速度
        //提取两个方向的速度
        for (int row = 0; row < flow.rows; row++)
        {
            for (int col = 0; col < flow.cols; col++)
            {
                const Point2f& flow_xy = flow.at<Point2f>(row, col);
                xV.at<float>(row, col) = flow_xy.x;
                yV.at<float>(row, col) = flow_xy.y;
            }
        }

        //计算向量角度和幅值
        Mat magnitude, angle;
        cartToPolar(xV, yV, magnitude, angle);

        //讲角度转换成角度制
        angle = angle * 180.0 / CV_PI / 2.0;

        //把幅值归一化到0-255区间便于显示结果
        normalize(magnitude, magnitude, 0, 255, NORM_MINMAX);

        //计算角度和幅值的绝对值
        convertScaleAbs(magnitude, magnitude);
        convertScaleAbs(angle, angle);

        //讲运动的幅值和角度生成HSV颜色空间的图像
        Mat HSV = Mat::zeros(prevFrame.size(), prevFrame.type());
        vector<Mat> result;
        split(HSV, result);
        result[0] = angle;  //决定颜色
        result[1] = Scalar(255);
        result[2] = magnitude;  //决定形态
        //将三个多通道图像合并成三通道图像
        merge(result, HSV);

        //讲HSV颜色空间图像转换到RGB颜色空间中
        Mat rgbImg;
        cvtColor(HSV, rgbImg, COLOR_HSV2BGR);

        //显示检测结果
        imshow("运动检测结果", rgbImg);
        int ch = waitKey(5);
        if (ch == 27)
        {
            break;
        }
    }
    waitKey(0);
    return 0;
}

稀疏光流法跟踪

稀疏光流法目标跟踪函数

通常情况下使用的是tomas角点 在图像中提取若干个角点,在角点和下一帧图像输入给函数,去除掉没有移动的角点,多次迭代,是角点数目越来越少,当其小于一定阈值则对图像重新求角点,对角点进行扩充 示例

#include <opencv2/opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

void draw_lines(Mat& image, vector<Point2f> pt1, vector<Point2f> pt2);
vector<Scalar> color_lut;  //颜色查找表

int main()
{
    VideoCapture capture("mulballs.mp4");
    Mat prevframe, prevImg;
    if (!capture.read(prevframe))
    {
        cout << "请确认输入视频文件是否正确" << endl;
        return -1;
    }
    cvtColor(prevframe, prevImg, COLOR_BGR2GRAY);

    //角点检测相关参数设置
    vector<Point2f> Points;
    double qualityLevel = 0.01;
    int minDistance = 10;
    int blockSize = 3;
    bool useHarrisDetector = false;
    double k = 0.04;
    int Corners = 5000;
    //角点检测
    goodFeaturesToTrack(prevImg, Points, Corners, qualityLevel, minDistance, Mat(),
        blockSize, useHarrisDetector, k);

    //稀疏光流检测相关参数设置
    vector<Point2f> prevPts;  //前一帧图像角点坐标
    vector<Point2f> nextPts;  //当前帧图像角点坐标
    vector<uchar> status;  //检点检测到的状态
    vector<float> err;
    TermCriteria criteria = TermCriteria(TermCriteria::COUNT
        + TermCriteria::EPS, 30, 0.01);
    double derivlambda = 0.5;
    int flags = 0;

    //初始状态的角点
    vector<Point2f> initPoints;
    initPoints.insert(initPoints.end(), Points.begin(), Points.end());

    //前一帧图像中的角点坐标
    prevPts.insert(prevPts.end(), Points.begin(), Points.end());

    while (true)
    {
        Mat nextframe, nextImg;
        if (!capture.read(nextframe))
        {
            break;
        }
        imshow("nextframe", nextframe);

        //光流跟踪
        cvtColor(nextframe, nextImg, COLOR_BGR2GRAY);
        calcOpticalFlowPyrLK(prevImg, nextImg, prevPts, nextPts, status, err,
            Size(31, 31), 3, criteria, derivlambda, flags);

        //判断角点是否移动,如果不移动就删除
        size_t i, k;
        for (i = k = 0; i < nextPts.size(); i++)
        {
            // 距离与状态测量
            double dist = abs(prevPts[i].x - nextPts[i].x) + abs(prevPts[i].y - nextPts[i].y);
            if (status[i] && dist > 2)
            {
                prevPts[k] = prevPts[i];
                initPoints[k] = initPoints[i];
                nextPts[k++] = nextPts[i];
                circle(nextframe, nextPts[i], 3, Scalar(0, 255, 0), -1, 8);
            }
        }

        //更新移动角点数目
        nextPts.resize(k);
        prevPts.resize(k);
        initPoints.resize(k);

        // 绘制跟踪轨迹
        draw_lines(nextframe, initPoints, nextPts);
        imshow("result", nextframe);

        char c = waitKey(50);
        if (c == 27)
        {
            break;
        }

        //更新角点坐标和前一帧图像
        std::swap(nextPts, prevPts);
        nextImg.copyTo(prevImg);

        //如果角点数目少于30,就重新检测角点
        if (initPoints.size() < 30)
        {
            goodFeaturesToTrack(prevImg, Points, Corners, qualityLevel,
                minDistance, Mat(), blockSize, useHarrisDetector, k);
            initPoints.insert(initPoints.end(), Points.begin(), Points.end());
            prevPts.insert(prevPts.end(), Points.begin(), Points.end());
            printf("total feature points : %d\n", prevPts.size());
        }

    }
    return 0;
}

void draw_lines(Mat& image, vector<Point2f> pt1, vector<Point2f> pt2)
{
    RNG rng(5000);
    if (color_lut.size() < pt1.size())
    {
        for (size_t t = 0; t < pt1.size(); t++)
        {
            color_lut.push_back(Scalar(rng.uniform(0, 255), rng.uniform(0, 255),
                rng.uniform(0, 255)));
        }
    }
    for (size_t t = 0; t < pt1.size(); t++) {
        line(image, pt1[t], pt2[t], color_lut[t], 2, 8, 0);
    }
}