监督学习

K近邻

原理 使用k=3的时候将目标分类为三角,k等于5的时候分类于正方形 除此之外还有 进行额外操作

SVM(支持向量机)

线通过数据,且通过数据的线相互平行,两条线的距离保证最大

ML模块介绍

ML定义函数

Traindata类

所有ml类的基类都是algorithm类,在algorithm类里面定义了一些非常基础的函数,比如load() 加载完成的模型和save()存储训练完的模型 函数文档

示例(KNN训练)

#include <opencv2/opencv.hpp>
#include <iostream>

using namespace cv;
using namespace cv::ml;
using namespace std;

int main(int argc, char** argv)
{
    Mat img = imread("digits.png");
    Mat gray;
    cvtColor(img, gray, COLOR_BGR2GRAY);

    // 分割为5000个cells
    Mat images = Mat::zeros(5000, 400, CV_8UC1);
    Mat labels = Mat::zeros(5000, 1, CV_8UC1);

    int index = 0;
    Rect numberImg;
    numberImg.x = 0;
    numberImg.height = 1;
    numberImg.width = 400;
    for (int row = 0; row < 50; row++)
    {
        //从图像中分割出20×20的图像作为独立数字图像
        int label = row / 5;
        int datay = row * 20;
        for (int col = 0; col < 100; col++)
        {
            int datax = col * 20;
            Mat number = Mat::zeros(Size(20, 20), CV_8UC1);
            for (int x = 0; x < 20; x++)
            {
                for (int y = 0; y < 20; y++)
                {
                    number.at<uchar>(x, y) = gray.at<uchar>(x + datay, y + datax);
                }
            }
            //将二维图像数据转成行数据
            Mat row = number.reshape(1, 1);
            cout << "提取第" << index + 1 << "个数据" << endl;
            numberImg.y = index;
            //添加到总数据中
            row.copyTo(images(numberImg));
            //记录每个图像对应的数字标签
            labels.at<uchar>(index, 0) = label;
            index++;
        }
    }
    imwrite("所有数据按行排列结果.png", images);
    imwrite("标签.png", labels);

    //加载训练数据集
    images.convertTo(images, CV_32FC1);
    labels.convertTo(labels, CV_32SC1);
    Ptr<ml::TrainData> tdata = ml::TrainData::create(images, ml::ROW_SAMPLE, labels);

    //创建K近邻类
    Ptr<KNearest> knn = KNearest::create();
    knn->setDefaultK(5);  //每个类别拿出5个数据
    knn->setIsClassifier(true);  //进行分类

    //训练数据
    knn->train(tdata);
    //保存训练结果
    knn->save("knn_model.yml");

    //输出运行结果提示
    cout << "已使用K近邻完成数据训练和保存" << endl;

    waitKey(0);
    return true;
}

示例(KNN测试)

#include <opencv2/opencv.hpp>
#include <iostream>

using namespace cv;
using namespace cv::ml;
using namespace std;

int main()
{
    system("color F0");
    // 加载KNN分类器
    Mat data = imread("所有数据按行排列结果.png", IMREAD_ANYDEPTH);
    Mat labels = imread("标签.png", IMREAD_ANYDEPTH);
    data.convertTo(data, CV_32FC1);
    labels.convertTo(labels, CV_32SC1);
    Ptr<KNearest> knn = Algorithm::load<KNearest>("knn_model.yml");

    //查看分类结果
    Mat result;
    knn->findNearest(data, 5, result);

    //统计分类结果与真实结果相同的数目
    int count = 0;
    for (int row = 0; row < result.rows; row++)
    {

        int predict = result.at<float>(row, 0);
        if (labels.at<int>(row, 0) == predict)
        {
            count = count + 1;
        }
    }
    float rate = 1.0 * count / result.rows;
    cout << "分类的正确性:" << rate << endl;

    //测试新图像是否能够识别数字
    Mat testImg1 = imread("handWrite01.png", IMREAD_GRAYSCALE);
    Mat testImg2 = imread("handWrite02.png", IMREAD_GRAYSCALE);
    imshow("testImg1", testImg1);
    imshow("testImg2", testImg2);

    //缩放到20×20的尺寸
    resize(testImg1, testImg1, Size(20, 20));
    resize(testImg2, testImg2, Size(20, 20));
    Mat testdata = Mat::zeros(2, 400, CV_8UC1);
    Rect rect;
    rect.x = 0;
    rect.y = 0;
    rect.height = 1;
    rect.width = 400;
    Mat oneDate = testImg1.reshape(1, 1);
    Mat twoData = testImg2.reshape(1, 1);
    oneDate.copyTo(testdata(rect));
    rect.y = 1;
    twoData.copyTo(testdata(rect));
    //数据类型转换
    testdata.convertTo(testdata, CV_32F);

    //进行估计识别
    Mat result2;
    knn->findNearest(testdata, 5, result2);

    //查看预测的结果
    for (int i = 0; i < result2.rows; i++)
    {
        int predict = result2.at<float>(i, 0);
        cout << "第" << i + 1 << "图像预测结果:" << predict
            << "  真实结果:" << i + 1 << endl;
    }
    waitKey(0);
    return 0;
}

示例(SVM)

#include <opencv2/opencv.hpp>
#include <iostream>  

using namespace std;
using namespace cv;
using namespace cv::ml;

int main()
{
    //训练数据
    Mat samples, labls;
    FileStorage fread("point.yml", FileStorage::READ);
    fread["data"] >> samples;
    fread["labls"] >> labls;
    fread.release();

    //不同种类坐标点拥有不同的颜色
    vector<Vec3b> colors;
    colors.push_back(Vec3b(0, 255, 0));
    colors.push_back(Vec3b(0, 0, 255));

    //创建空白图像用于显示坐标点
    Mat img(480, 640, CV_8UC3, Scalar(255, 255, 255));
    Mat img2;
    img.copyTo(img2);

    //在空白图像中绘制坐标点
    for (int i = 0; i < samples.rows; i++)
    {
        Point2f point;
        point.x = samples.at<float>(i, 0);
        point.y = samples.at<float>(i, 1);
        Scalar color = colors[labls.at<int>(i, 0)];
        circle(img, point, 3, color, -1);
        circle(img2, point, 3, color, -1);
    }
    imshow("两类像素点图像", img);

    //建立模型
    Ptr<SVM> model = SVM::create();

    //参数设置
    model->setKernel(SVM::INTER);  //内核的模型
    model->setType(SVM::C_SVC);  //SVM的类型
    model->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 100, 0.01));
    //model->setGamma(5.383);
    //model->setC(0.01);
    //model->setDegree(3);

    //训练模型
    model->train(TrainData::create(samples, ROW_SAMPLE, labls));

    //用模型对图像中全部像素点进行分类
    Mat imagePoint(1, 2, CV_32FC1);
    for (int y = 0; y < img2.rows; y = y + 2)
    {
        for (int x = 0; x < img2.cols; x = x + 2)
        {
            imagePoint.at<float>(0) = (float)x;
            imagePoint.at<float>(1) = (float)y;
            int color = (int)model->predict(imagePoint);
            img2.at<Vec3b>(y, x) = colors[color];
        }
    }

    imshow("图像所有像素点分类结果", img2);
    waitKey();
    return 0;
}

k均值聚类法

k均值聚类法原理

相关函数

需要人为对聚类种类进行识别 crireria用精度和次数进行设置 输出的bestlable是一个一维矩阵

示例(对点分类)

#include <opencv2/opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
    //生成一个500×500的图像用于显示特征点和分类结果
    Mat img(500, 500, CV_8UC3, Scalar(255, 255, 255));
    RNG rng(10000);

    //设置三种颜色
    Scalar colorLut[3] =
    {
        Scalar(0, 0, 255),
        Scalar(0, 255, 0),
        Scalar(255, 0, 0),
    };

    //设置三个点集,并且每个点集中点的数目随机
    int number = 3;
    int Points1 = rng.uniform(20, 200);
    int Points2 = rng.uniform(20, 200);
    int Points3 = rng.uniform(20, 200);
    int Points_num = Points1 + Points2 + Points3;
    Mat Points(Points_num, 1, CV_32FC2);

    int i = 0;
    for (; i < Points1; i++)
    {
        Point2f pts;
        pts.x = rng.uniform(100, 200);
        pts.y = rng.uniform(100, 200);
        Points.at<Point2f>(i, 0) = pts;
    }

    for (; i < Points1 + Points2; i++)
    {
        Point2f pts;
        pts.x = rng.uniform(300, 400);
        pts.y = rng.uniform(100, 300);
        Points.at<Point2f>(i, 0) = pts;
    }

    for (; i < Points1 + Points2 + Points3; i++)
    {
        Point2f pts;
        pts.x = rng.uniform(100, 200);
        pts.y = rng.uniform(390, 490);
        Points.at<Point2f>(i, 0) = pts;
    }

    // 使用KMeans
    Mat labels;  //每个点所属的种类
    Mat centers;  //每类点的中心位置坐标
    kmeans(Points, number, labels, TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 10, 0.1), 3, KMEANS_PP_CENTERS, centers);

    // 根据分类为每个点设置不同的颜色
    img = Scalar::all(255);
    for (int i = 0; i < Points_num; i++)
    {
        int index = labels.at<int>(i);
        Point point = Points.at<Point2f>(i);
        circle(img, point, 2, colorLut[index], -1, 4);
    }

    // 绘制每个聚类的中心来绘制圆
    for (int i = 0; i < centers.rows; i++)
    {
        int x = centers.at<float>(i, 0);
        int y = centers.at<float>(i, 1);
        cout << "第" << i + 1 << "类的中心坐标:x=" << x << "  y=" << y << endl;
        circle(img, Point(x, y), 50, colorLut[i], 1, LINE_AA);
    }

    imshow("K近邻点集分类结果", img);
    waitKey(0);
    return 0;
}

示例(图像分割)

#include <opencv2/opencv.hpp>
#include <iostream>

using namespace cv;
using namespace std;

int main()
{
    Mat img = imread("people.jpg");
    if (!img.data)
    {
        printf("请确认图像文件是否输入正确");
        return -1;
    }

    Vec3b colorLut[5] = {
        Vec3b(0, 0, 255),
        Vec3b(0, 255, 0),
        Vec3b(255, 0, 0),
        Vec3b(0, 255, 255),
        Vec3b(255, 0, 255)
    };

    //图像的尺寸,用于计算图像中像素点的数目
    int width = img.cols;
    int height = img.rows;

    // 初始化定义
    int sampleCount = width * height;


    //将图像矩阵数据转换成每行一个数据的形式
    Mat sample_data = img.reshape(3, sampleCount);//第一个参数是通道数
    Mat data;
    sample_data.convertTo(data, CV_32F);

    //KMean函数将像素值进行分类
    int number = 3;  //分割后的颜色种类
    Mat labels;
    TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 10, 0.1);
    kmeans(data, number, labels, criteria, number, KMEANS_PP_CENTERS);

    // 显示图像分割结果
    Mat result = Mat::zeros(img.size(), img.type());
    for (int row = 0; row < height; row++)
    {
        for (int col = 0; col < width; col++)
        {
            int index = row * width + col;
            int label = labels.at<int>(index, 0);
            result.at<Vec3b>(row, col) = colorLut[label];
        }
    }

    imshow("原图", img);
    imshow("分割后图像", result);
    waitKey(0);
    return 0;
}

深度神经网络模型

读取网络模型函数

Net类

#include <opencv2/opencv.hpp>
#include <iostream>

using namespace cv;
using namespace cv::dnn;
using namespace std;

int main()
{
    system("color F0");
    string model = "bvlc_googlenet.caffemodel";
    string config = "bvlc_googlenet.prototxt";

    //加载模型
    Net net = dnn::readNet(model, config);
    if (net.empty())
    {
        cout << "请确认是否输入空的模型文件" << endl;
        return -1;
    }

    // 获取各层信息
    vector<String> layerNames = net.getLayerNames();
    for (int i = 0; i < layerNames.size(); i++)
    {
        //读取每层网络的ID
        int ID = net.getLayerId(layerNames[i]);
        //读取每层网络的信息
        Ptr<Layer> layer = net.getLayer(ID);
        //输出网络信息
        cout << "网络层数:" << ID << "  网络层名称:" << layerNames[i] << endl
            << "网络层类型:" << layer->type.c_str() << endl;
    }
    return 0;
}

深度神经网络模型的应用

输入数据尺寸转换函数

转换数据尺寸以适配模型需要 注意scalefactor是像素的缩放系数 mean是对像素值整体减去一个数

示例(风格转换)

#include <opencv2/opencv.hpp>
#include <iostream>

using namespace cv;
using namespace cv::dnn;
using namespace std;

int main()
{
    Mat image = imread("lena.png");
    String models[5] = { "the_wave.t7",	"mosaic.t7", "feathers.t7", "candy.t7", "udnie.t7" };
    for (int i = 0; i < size(models); i++)
    {
        Net net = readNet(models[i]);
        imshow("原始图像", image);
        //计算图像每个通道的均值
        Scalar imgaeMean = mean(image);
        //调整图像尺寸和格式
        Mat blobImage = blobFromImage(image, 1.0, Size(256, 256), imgaeMean, false, false);

        //计算网络对原图像处理结果
        net.setInput(blobImage);
        Mat output = net.forward();

        //输出结果的尺寸和通道数
        int outputChannels = output.size[1];
        int outputRows = output.size[2];
        int outputCols = output.size[3];

        //将输出结果存放到图像中
        Mat result = Mat::zeros(Size(outputCols, outputRows), CV_32FC3);
        float* data = output.ptr<float>();
        for (int channel = 0; channel < outputChannels; channel++)
        {
            for (int row = 0; row < outputRows; row++)
            {
                for (int col = 0; col < outputCols; col++)
                {
                    result.at<Vec3f>(row, col)[channel] = *data++;
                }
            }
        }

        //对迁移结果进行进一步操作处理
        //恢复图像减掉的均值
        result = result + imgaeMean;
        //对图像进行归一化,便于图像显示
        result = result / 255.0;
        //调整图像尺寸,使得与原图像尺寸相同
        resize(result, result, image.size());
        //显示结果
        imshow("第" + to_string(i) + "种风格迁移结果", result);
    }

    waitKey(0);
    return 0;
}

示例(图像分类)

#include <opencv2/opencv.hpp>
#include <iostream>
#include <fstream>

using namespace cv;
using namespace cv::dnn;
using namespace std;

int main() 
{
    Mat img = imread("airplane.jpg");
    if (img.empty())
    {
        printf("could not load image...\n");
        return -1;
    }

    //读取分类种类名称
    String typeListFile = "imagenet_comp_graph_label_strings.txt";
    vector<String> typeList;
    ifstream file(typeListFile);
    if (!file.is_open())
    {
        printf("请确认分类种类名称是否正确");
        return -1;
    }
    
    std::string type;
    while (!file.eof())
    {
        //读取名称
        getline(file, type);
        if (type.length())
            typeList.push_back(type);
    }
    file.close();

    // 加载网络
    String tf_pb_file = "tensorflow_inception_graph.pb";
    Net net = readNet(tf_pb_file);
    if (net.empty()) 
    {
        printf("请确认模型文件是否为空文件");
        return -1;
    }

    //对输入图像数据进行处理
    Mat blob = blobFromImage(img, 1.0f, Size(224, 224), Scalar(), true, false);

    //进行图像种类预测
    Mat prob;
    net.setInput(blob, "input");
    prob = net.forward("softmax2");

    // 得到最可能分类输出
    Mat probMat = prob.reshape(1, 1);
    Point classNumber;  
    double classProb;  //最大可能性
    minMaxLoc(probMat, NULL, &classProb, NULL, &classNumber);

    string typeName = typeList.at(classNumber.x).c_str();
    cout << "图像中物体可能为:" << typeName << "  可能性为:" << classProb;
        
    //检测内容
    string str = typeName + " possibility:" + to_string(classProb);
    putText(img, str, Point(50, 50), FONT_HERSHEY_SIMPLEX, 1.0, Scalar(0, 0, 255), 2, 8);
    imshow("图像判断结果", img);
    waitKey(0);
    return 0;
}

示例(性别检测)

#include <opencv2/opencv.hpp>
#include <opencv2/dnn.hpp>
#include <iostream>

using namespace cv;
using namespace cv::dnn;
using namespace std;

int main()
{
    Mat img = imread("dingzhen.jpg");
    if (img.empty())
    {
        cout << "请确定是否输入正确的图像文件" << endl;
        return -1;
    }

    //读取人脸识别模型
    String model_bin = "ch12_face_age/opencv_face_detector_uint8.pb";
    String config_text = "ch12_face_age/opencv_face_detector.pbtxt";
    Net faceNet = readNet(model_bin, config_text);

    //读取性别检测模型
    String genderProto = "ch12_face_age/gender_deploy.prototxt";
    String genderModel = "ch12_face_age/gender_net.caffemodel";
    String genderList[] = { "Male", "Female" };
    Net genderNet = readNet(genderModel, genderProto);
    if (faceNet.empty() && genderNet.empty())
    {
        cout << "请确定是否输入正确的模型文件" << endl;
        return -1;
    }

    //对整幅图像进行人脸检测
    Mat blobImage = blobFromImage(img, 1.0, Size(300, 300), Scalar(), false, false);
    faceNet.setInput(blobImage, "data");
    Mat detect = faceNet.forward("detection_out");
    //人脸概率、人脸矩形区域的位置
    Mat detectionMat(detect.size[2], detect.size[3], CV_32F, detect.ptr<float>());

    //对每个人脸区域进行性别检测
    int exBoundray = 25;  //每个人脸区域四个方向扩充的尺寸
    float confidenceThreshold = 0.5;  //判定为人脸的概率阈值,阈值越大准确性越高
    for (int i = 0; i < detectionMat.rows; i++)
    {
        float confidence = detectionMat.at<float>(i, 2);  //检测为人脸的概率
        //只检测概率大于阈值区域的性别
        if (confidence > confidenceThreshold)
        {
            //网络检测人脸区域大小
            int topLx = detectionMat.at<float>(i, 3) * img.cols;
            int topLy = detectionMat.at<float>(i, 4) * img.rows;
            int bottomRx = detectionMat.at<float>(i, 5) * img.cols;
            int bottomRy = detectionMat.at<float>(i, 6) * img.rows;
            Rect faceRect(topLx, topLy, bottomRx - topLx, bottomRy - topLy);

            //将网络检测出的区域尺寸进行扩充,要注意防止尺寸在图像真实尺寸之外
            Rect faceTextRect;
            faceTextRect.x = max(0, faceRect.x - exBoundray);
            faceTextRect.y = max(0, faceRect.y - exBoundray);
            faceTextRect.width = min(faceRect.width + exBoundray, img.cols - 1);
            faceTextRect.height = min(faceRect.height + exBoundray, img.rows - 1);
            Mat face = img(faceTextRect);  //扩充后的人脸图像

            //调整面部图像尺寸
            Mat faceblob = blobFromImage(face, 1.0, Size(227, 227), Scalar(), false, false);
            //将调整后的面部图像输入到性别检测网络
            genderNet.setInput(faceblob);
            //计算检测结果
            Mat genderPreds = genderNet.forward();  //两个性别的可能性

            //性别检测结果
            float male, female;
            male = genderPreds.at<float>(0, 0);
            female = genderPreds.at<float>(0, 1);
            int classID = male > female ? 0 : 1;
            String gender = genderList[classID];

            //在原图像中绘制面部轮廓和性别
            rectangle(img, faceRect, Scalar(0, 0, 255), 2, 8, 0);
            putText(img, gender.c_str(), faceRect.tl(), FONT_HERSHEY_SIMPLEX, 0.8, Scalar(0, 0, 255), 2, 8);
        }
    }
    imshow("性别检测结果", img);
    waitKey(0);
    return 0;
}

效果拔群