OpenCV之C++经典案例
- 尝试二值图像分析
- 模板匹配技术
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
Mat tpl;
void sort_box(vector<Rect> &boxes);
void detect_defect(Mat &binary, vector<Rect> rects, vector<Rect> &defect);
int main(int argc, char** argv) {
Mat src = imread("D:/images/ce_01.jpg");
if (src.empty()) {
printf("could not load image file...");
return -1;
}
namedWindow("input", WINDOW_AUTOSIZE);
imshow("input", src);
//图像二值化
Mat gray, binary;
cvtColor(src, gray, COLOR_BGR2GRAY);
threshold(gray, binary, 0, 255, THRESH_BINARY_INV | THRESH_OTSU); //全局阈值
imshow("binary", binary);
//定义结构元素,进行开操作去除小的干扰点
Mat se = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));
morphologyEx(binary, binary, MORPH_OPEN, se);
imshow("open-binary", binary);
//轮廓发现
vector<vector<Point>> contours;
vector<Vec4i> hierarchy;
vector<Rect> rects;
findContours(binary, contours, hierarchy, RETR_LIST, CHAIN_APPROX_SIMPLE);
int height = src.rows;
for (size_t t = 0; t < contours.size(); t++) {
Rect rect = boundingRect(contours[t]);
double area = contourArea(contours[t]);
if (rect.height > (height / 2)) {
continue;
}
if (area < 150) {
continue;
}
rects.push_back(rect); //不知道rects大小的情况下,向rects中放入rect
//rectangle(src, rect, Scalar(0, 255, 0), 2, 8, 0); //绘制矩形
//drawContours(src, contours, t, Scalar(0, 0, 255), 2, 8); //绘制轮廓
}
sort_box(rects);
tpl = binary(rects[1]);
//for (int i = 0; i < rects.size(); i++) {
// putText(src, format("%d", i), rects[i].tl(), FONT_HERSHEY_PLAIN, 1.0, Scalar(0, 255, 0), 1, 8);
//}
vector<Rect> defects;
detect_defect(binary, rects, defects);
for (int i = 0; i < defects.size(); i++) { //将检测到的缺陷部分绘制出来
rectangle(src, defects[i], Scalar(0, 0, 255), 2, 8, 0);
putText(src, "bad", defects[i].tl(), FONT_HERSHEY_PLAIN, 1.0, Scalar(0, 255, 0), 1, 8);
}
imshow("result", src);
waitKey(0);
return 0;
}
void sort_box(vector<Rect> &boxes) {
int size = boxes.size();
for (int i = 0; i < size; i++) {
for (int j = i; j < size; j++) {
int x = boxes[j].x;
int y = boxes[j].y;
if (y < boxes[i].y) {
Rect temp = boxes[i];
boxes[i] = boxes[j];
boxes[j] = temp;
}
}
}
}
void detect_defect(Mat &binary, vector<Rect> rects, vector<Rect> &defect) {
int h = tpl.rows;
int w = tpl.cols;
int size = rects.size();
for (int i = 0; i < size; i++) {
//构建diff
Mat roi = binary(rects[i]);
resize(roi, roi, tpl.size()); //将roi大小统一
Mat mask;
subtract(tpl, roi, mask);
Mat se = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1)); //开操作去除微小差异
morphologyEx(mask, mask, MORPH_OPEN, se);
threshold(mask, mask, 0, 255, THRESH_BINARY); //将获取的mask二值化
imshow("mask", mask);
waitKey(0);
//根据diff查找缺陷,阈值化
int count = 0;
for (int row = 0; row < h; row++) {
for (int col = 0; col < w; col++) {
int pv = mask.at<uchar>(row, col); //获取每一个像素值,如果等于255则count+1
if (pv == 255) {
count++;
}
}
}
//填充一个像素块
int mh = mask.rows + 2;
int mw = mask.cols + 2;
Mat m1 = Mat::zeros(Size(mw, mh), mask.type());
Rect mroi; //将mask复制到m1的mroi区域,并使mroi区域四周各有一个像素值为0
mroi.x = 1;
mroi.y = 1;
mroi.height = mask.rows;
mroi.width = mask.cols;
mask.copyTo(m1(mroi));
//轮廓分析,对每个矩形中的差异进行过滤
vector<vector<Point>> contours;
vector<Vec4i> hierarchy;
findContours(m1, contours, hierarchy, RETR_LIST, CHAIN_APPROX_SIMPLE); //查找每一个矩形中微小的差异轮廓
bool find = false;
for (size_t t = 0; t < contours.size(); t++) { //循环判断矩形中的差异区域有无满足要求的,如果有则find=true
Rect rect = boundingRect(contours[t]);
float ratio = (float)rect.width / ((float)rect.height); //计算矩形宽高比
//将宽高比>4的并且位于上下边缘的差异区域过滤
if (ratio > 4.0 && (rect.y < 5 || (m1.rows - (rect.height + rect.y)) < 10)) { //将边缘的白色区域过滤
continue;
}
double area = contourArea(contours[t]);
if (area > 10) {
printf("ratio:%.2f,area:%.2f \n", ratio, area);
find = true;
}
}
if (count > 50 && find) { //如果等于255的像素个数>50并且符合以上判断要求,就将该矩形放入缺陷容器defect中
printf("count:%d \n", count);
defect.push_back(rects[i]);
}
}
//返回结果
}
效果: 1、图像二值化并开操作
2、获取每个刀片区域并排序
3、根据与模板差异的像素个数筛选有缺陷的刀片
4、根据每个刀片区域与模板的差异部位宽高比、位置及像素个数筛选有缺陷的刀片
2、自定义对象检测 解决思路
- OpenCV中对象检测类问题
- 模板匹配
- 特征匹配
- 特征 + 机器学习
- 选择HOG特征 + SVM机器学习生成模型
- 开窗检测
- 灰度图像转换
- 梯度计算
- 分网格的梯度方向直方图
- 块描述子
- 块描述子归一化
- 特征数据与检测窗口
- 匹配方法
根据块的形状不一样HOG特征分为C-HOG和R-HOG
基于 L2 实现块描述子归一化,归一化因子计算:
- 线性不可分映射为线性可分离
- 核函数:线性、高斯、多项式等
首先svm算法,当遇到分布比较杂乱的函数时,可以进行升维处理,将二维不好处理的问题改为三维,是一个比较好的办法;
此外,svm分割数据的操作也比较合理,划分边界及区域在经过一些复杂的函数计算什么的,可以算出划分的边界的位置,划分好边界线,之后便可以划分边界区域,这样区分样本的时候就会事半功倍了。
对于升维进行计算数据的话,是存在一个核函数的,具体的讲解如下:
当样本在原始空间线性不可分时,可将样本从原始空间映射到一个更高维的特征空间,使得样本在这个特征空间内线性可分。而引入这样的映射后,所要求解的对偶问题的求解中,无需求解真正的映射函数,而只需要知道其核函数。
核函数的定义:K(x,y)=<ϕ(x),ϕ(y)>,即在特征空间的内积等于它们在原始样本空间中通过核函数 K 计算的结果。一方面数据变成了高维空间中线性可分的数据,另一方面不需要求解具体的映射函数,只需要给定具体的核函数即可,这样使得求解的难度大大降低。
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace cv;
using namespace cv::ml;
using namespace std;
string positive_dir = "D:/images/elec_watchzip/elec_watch/positive";
string negative_dir = "D:/images/elec_watchzip/elec_watch/negative";
void get_hog_descriptor(Mat &image, vector<float> &desc);
void generate_dataset(Mat &trainData, Mat &labels);
void svm_train(Mat &trainData, Mat &labels);
int main(int argc, char** argv) {
//read data and generate dataset
Mat trainData = Mat::zeros(Size(3780, 26), CV_32FC1);
Mat labels = Mat::zeros(Size(1, 26), CV_32SC1);
generate_dataset(trainData, labels);
//SVM train and save model
svm_train(trainData, labels);
//load model
Ptr<SVM> svm = SVM::load("D:/images/elec_watchzip/elec_watch/hog_elec.xml"); //读取训练好的模型
//detect custom object
Mat test = imread("D:/images/elec_watchzip/elec_watch/test/scene_01.jpg");
resize(test, test, Size(0, 0), 0.2, 0.2); //重新设置图像大小dsize与(fx、fy)不能同时为0
imshow("input", test);
Rect winRect;
winRect.width = 64;
winRect.height = 128;
int sum_x = 0;
int sum_y = 0;
int count = 0;
//开窗检测...
for (int row = 64; row < test.rows - 64; row += 4) {
for (int col = 32; col < test.cols - 32; col += 4) {
winRect.x = col - 32;
winRect.y = row - 64;
vector<float> fv;
Mat img = test(winRect);
get_hog_descriptor(img, fv);
Mat one_row = Mat::zeros(Size(fv.size(), 1), CV_32FC1);
for (int i = 0; i < fv.size(); i++) {
one_row.at<float>(0, i) = fv[i];
}
float result = svm->predict(one_row);
if (result > 0) {
//rectangle(test, winRect, Scalar(0, 0, 255), 1, 8, 0);
count += 1;
sum_x += winRect.x;
sum_y += winRect.y;
}
}
}
//显示box
winRect.x = sum_x / count;
winRect.y = sum_y / count;
rectangle(test, winRect, Scalar(255, 0, 0), 2, 8, 0);
imshow("object detection result", test);
waitKey(0);
return 0;
}
void get_hog_descriptor(Mat &image, vector<float> &desc) {
HOGDescriptor hog; //HOG描述子
int h = image.rows;
int w = image.cols;
float rate = 64.0 / w;
Mat img, gray;
resize(image, img, Size(64, int(rate*h))); //保证宽为64,同时宽高比例与原图相同
cvtColor(img, gray, COLOR_BGR2GRAY);
Mat result = Mat::zeros(Size(64, 128), CV_8UC1);
result = Scalar(127);
Rect roi;
roi.x = 0;
roi.width = 64;
roi.y = (128 - gray.rows) / 2;
roi.height = gray.rows;
gray.copyTo(result(roi));
hog.compute(result, desc, Size(8, 8), Size(0, 0));
printf("desc len:%d\n", desc.size());
}
void generate_dataset(Mat &trainData, Mat &labels) {
vector<String> images;
glob(positive_dir, images); //扫描目录,得到所有正样本
int pos_num = images.size();
for (int i = 0; i < images.size(); i++) {
Mat image = imread(images[i].c_str());
vector<float> fv;
get_hog_descriptor(image, fv);
for (int j = 0; j < fv.size(); j++) {
trainData.at<float>(i, j) = fv[j];
}
labels.at<int>(i, 0) = 1;
}
images.clear();
glob(negative_dir, images);
for (int i = 0; i < images.size(); i++) {
Mat image = imread(images[i].c_str());
vector<float> fv;
get_hog_descriptor(image, fv);
for (int j = 0; j < fv.size(); j++) {
trainData.at<float>(i + pos_num, j) = fv[j];
}
labels.at<int>(i + pos_num, 0) = -1;
}
}
void svm_train(Mat &trainData, Mat &labels) {
printf("\n start SVM training... \n");
Ptr<SVM> svm = SVM::create();
svm->setC(2.67); //值越大,分类模型越复杂
svm->setType(SVM::C_SVC); //分类器类型
svm->setKernel(SVM::LINEAR); //线性内核,速度快
svm->setGamma(5.383); //线性内核可以忽略,其他内核需要
svm->train(trainData, ROW_SAMPLE, labels); //按行读取
clog << "....[Done]" << endl;
printf("end train...\n");
//save xml
svm->save("D:/images/elec_watchzip/elec_watch/hog_elec.xml"); //保存路径
}
效果:
3、二维码检测与定位 二维定位检测知识点:
- 二维码特征
- 图像二值化
- 轮廓提取
- 透视变换
- 几何分析
- 全局或者局部阈值选择
- 全局阈值分割
- 最外层轮廓与多层轮廓
- 面积与几何形状过滤
- 透视变换与单应性矩阵
- 寻找每个正方形
- 寻找X方向1 : 1 : 3 : 1 : 1结构
- 寻找Y方向比率结构
- 得到输出结果
- 面积太小不能识别排除
- minAreaRect
- findHomography
- warpPerspective
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
void scanAndDetectQRCode(Mat & image);
bool isXCorner(Mat &image);
bool isYCorner(Mat &image);
Mat transformCorner(Mat &image, RotatedRect &rect);
int main(int argc, char** argv) {
// Mat src = imread("D:/images/qrcode.png");
Mat src = imread("D:/images/qrcode_07.png");
if (src.empty()) {
printf("could not load image file...");
return -1;
}
namedWindow("input", WINDOW_AUTOSIZE);
imshow("input", src);
scanAndDetectQRCode(src);
waitKey(0);
return 0;
}
void scanAndDetectQRCode(Mat & image) {
Mat gray, binary;
cvtColor(image, gray, COLOR_BGR2GRAY);
threshold(gray, binary, 0, 255, THRESH_BINARY | THRESH_OTSU);
imshow("binary", binary);
// detect rectangle now
vector<vector<Point>> contours;
vector<Vec4i> hireachy;
Moments monents;
findContours(binary.clone(), contours, hireachy, RETR_LIST, CHAIN_APPROX_SIMPLE, Point());
Mat result = Mat::zeros(image.size(), CV_8UC1);
for (size_t t = 0; t < contours.size(); t++) {
double area = contourArea(contours[t]);
if (area < 100) continue; //将面积<100的轮廓去掉
RotatedRect rect = minAreaRect(contours[t]);
float w = rect.size.width;
float h = rect.size.height;
float rate = min(w, h) / max(w, h);
if (rate > 0.85 && w < image.cols / 4 && h < image.rows / 4) { //根据宽高比进行过滤
Mat qr_roi = transformCorner(image, rect);
// 根据矩形特征进行几何分析
if (isXCorner(qr_roi)) {
drawContours(image, contours, static_cast<int>(t), Scalar(255, 0, 0), 2, 8);
drawContours(result, contours, static_cast<int>(t), Scalar(255), 2, 8);
}
}
}
// scan all key points
vector<Point> pts;
for (int row = 0; row < result.rows; row++) {
for (int col = 0; col < result.cols; col++) {
int pv = result.at<uchar>(row, col);
if (pv == 255) {
pts.push_back(Point(col, row)); //向pts容器中添加白色像素点坐标
}
}
}
RotatedRect rrt = minAreaRect(pts); //获取pts的最小外接矩形
Point2f vertices[4];
rrt.points(vertices);
pts.clear();
for (int i = 0; i < 4; i++) { //绘制最小外接矩形的四根线
line(image, vertices[i], vertices[(i + 1) % 4], Scalar(0, 255, 0), 2);
pts.push_back(vertices[i]);
}
Mat mask = Mat::zeros(result.size(), result.type()); //将result绘制成指定形状
vector<vector<Point>> cpts;
cpts.push_back(pts);
drawContours(mask, cpts, 0, Scalar(255), -1, 8); //填充
Mat dst;
bitwise_and(image, image, dst, mask); //通过与操作,获取二维码区域
imshow("detect result", image);
//imwrite("D:/case03.png", image);
imshow("result-mask", mask);
imshow("qrcode-roi", dst);
}
bool isXCorner(Mat &image) { //对找到的候选轮廓进行分析
Mat gray, binary;
cvtColor(image, gray, COLOR_BGR2GRAY);
threshold(gray, binary, 0, 255, THRESH_BINARY | THRESH_OTSU);
int xb = 0, yb = 0;
int w1x = 0, w2x = 0;
int b1x = 0, b2x = 0;
int width = binary.cols;
int height = binary.rows;
int cy = height / 2;
int cx = width / 2;
int pv = binary.at<uchar>(cy, cx);
if (pv == 255) return false; //判断中心像素是否为黑色
// verfiy finder pattern
bool findleft = false, findright = false;
int start = 0, end = 0;
int offset = 0;
while (true) { //从中间像素开始向两侧遍历查找
offset++;
if ((cx - offset) <= width / 8 || (cx + offset) >= width - 1) {
start = -1;
end = -1;
break;
}
pv = binary.at<uchar>(cy, cx - offset);
if (pv == 255) {
start = cx - offset;
findleft = true;
}
pv = binary.at<uchar>(cy, cx + offset);
if (pv == 255) {
end = cx + offset;
findright = true;
}
if (findleft && findright) { //当左右两侧都找到白色像素时终止循环,start和end分别保存起止坐标
break;
}
}
if (start <= 0 || end <= 0) {
return false;
}
xb = end - start;
for (int col = start; col > 0; col--) {
pv = binary.at<uchar>(cy, col);
if (pv == 0) {
w1x = start - col;
break;
}
}
for (int col = end; col < width - 1; col++) {
pv = binary.at<uchar>(cy, col);
if (pv == 0) {
w2x = col - end;
break;
}
}
for (int col = (end + w2x); col < width; col++) {
pv = binary.at<uchar>(cy, col);
if (pv == 255) {
b2x = col - end - w2x;
break;
}
else {
b2x++;
}
}
for (int col = (start - w1x); col > 0; col--) {
pv = binary.at<uchar>(cy, col);
if (pv == 255) {
b1x = start - col - w1x;
break;
}
else {
b1x++;
}
}
float sum = xb + b1x + b2x + w1x + w2x;
//printf("xb : %d, b1x = %d, b2x = %d, w1x = %d, w2x = %d\n", xb , b1x , b2x , w1x , w2x);
xb = static_cast<int>((xb / sum)*7.0 + 0.5); //+0.5为了保证获取四舍五入的值,避免浮点数转换为0
b1x = static_cast<int>((b1x / sum)*7.0 + 0.5);
b2x = static_cast<int>((b2x / sum)*7.0 + 0.5);
w1x = static_cast<int>((w1x / sum)*7.0 + 0.5);
w2x = static_cast<int>((w2x / sum)*7.0 + 0.5);
printf("xb : %d, b1x = %d, b2x = %d, w1x = %d, w2x = %d\n", xb, b1x, b2x, w1x, w2x);
if ((xb == 3 || xb == 4) && b1x == b2x && w1x == w2x && w1x == b1x && b1x == 1) { // 1:1:3:1:1
return true;
}
else {
return false;
}
}
bool isYCorner(Mat &image) { //对中心像素一侧的像素进行检测,对黑白像素个数分别计数,
Mat gray, binary;
cvtColor(image, gray, COLOR_BGR2GRAY);
threshold(gray, binary, 0, 255, THRESH_BINARY | THRESH_OTSU);
int width = binary.cols;
int height = binary.rows;
int cy = height / 2;
int cx = width / 2;
int pv = binary.at<uchar>(cy, cx);
int bc = 0, wc = 0;
bool found = true;
for (int row = cy; row > 0; row--) {
pv = binary.at<uchar>(row, cx);
if (pv == 0 && found) {
bc++;
}
else if (pv == 255) {
found = false;
wc++;
}
}
bc = bc * 2;
if (bc <= wc) { //如果白色像素个数大于等于黑色像素个数的两倍,返回false,黑色像素个数两倍正常是白色像素个数5倍
return false;
}
return true;
}
Mat transformCorner(Mat &image, RotatedRect &rect) { //单一性矩阵与透视变换
int width = static_cast<int>(rect.size.width);
int height = static_cast<int>(rect.size.height);
Mat result = Mat::zeros(height, width, image.type());
Point2f vertices[4];
rect.points(vertices);
vector<Point> src_corners;
vector<Point> dst_corners;
dst_corners.push_back(Point(0, 0));
dst_corners.push_back(Point(width, 0));
dst_corners.push_back(Point(width, height)); // big trick
dst_corners.push_back(Point(0, height));
for (int i = 0; i < 4; i++) {
src_corners.push_back(vertices[i]);
}
Mat h = findHomography(src_corners, dst_corners);
warpPerspective(image, result, h, result.size());
return result;
}
过程分析
效果:
4、KMeans应用
- 数据聚类
- 图像聚类
- 背景替换
- 主色彩提取
- 聚类中心
- 根据距离分类
聚类和分类最大的不同在于,分类的目标是事先已知的,而聚类则不一样,聚类事先不知道目标变量是什么,类别没有像分类那样被预先定义出来,也就是聚类分组不需要提前被告知所划分的组应该是什么样的,因为我们甚至可能都不知道我们再寻找什么,所以聚类是用于知识发现而不是预测,所以,聚类有时也叫无监督学习。
KMeans算法是最常用的聚类算法,主要思想是:在给定K值和K个初始类簇中心点的情况下,把每个点(亦即数据记录)分到离其最近的类簇中心点所代表的类簇中,所有点分配完毕之后,根据一个类簇内的所有点重新计算该类簇的中心点(取平均值),然后再迭代的进行分配点和更新类簇中心点的步骤,直至类簇中心点的变化很小,或者达到指定的迭代次数。
K-means过程:
- 首先选择k个类别的中心点
- 对任意一个样本,求其到各类中心的距离,将该样本归到距离最短的中心所在的类
- 聚好类后,重新计算每个聚类的中心点位置
- 重复2,3步骤迭代,直到k个类中心点的位置不变,或者达到一定的迭代次数,则迭代结束,否则继续迭代
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
void kmeans_data_demo();
void kmeans_image_demo();
void kmeans_background_replace();
void kmeans_color_card();
int main(int argc, char** argv) {
// kmeans_data_demo();
// kmeans_image_demo();
// kmeans_background_replace();
kmeans_color_card();
return 0;
waitKey(0);
return 0;
}
void kmeans_data_demo() {
Mat img(500, 500, CV_8UC3);
RNG rng(12345);
Scalar colorTab[] = {
Scalar(0, 0, 255),
Scalar(255, 0, 0),
};
int numCluster = 2; //聚类个数
int sampleCount = rng.uniform(5, 500); //随机产生的数据点个数,均匀分布
Mat points(sampleCount, 1, CV_32FC2); //矩阵大小为:数据点个数*1,每个点有两个维度
// 生成随机数
for (int k = 0; k < numCluster; k++) {
Point center;
center.x = rng.uniform(0, img.cols);
center.y = rng.uniform(0, img.rows);
//两次循环产生随机数的纵坐标范围不同
Mat pointChunk = points.rowRange(k*sampleCount / numCluster,
k == numCluster - 1 ? sampleCount : (k + 1)*sampleCount / numCluster);
//使用指定范围二维随机数填充矩阵,填充方式为均匀分布或高斯分布
rng.fill(pointChunk, RNG::NORMAL, Scalar(center.x, center.y), Scalar(img.cols*0.05, img.rows*0.05));
}
randShuffle(points, 1, &rng); //打乱随机数顺序
// 使用KMeans
Mat labels;
Mat centers;
//将这些点分为2类,每个点有一个标签,使用不同的初始聚类中心执行算法的次数,初始中心点选取方式
kmeans(points, numCluster, labels, TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 10, 0.1), 3, KMEANS_PP_CENTERS, centers);
// 用不同颜色显示分类
img = Scalar::all(255);
for (int i = 0; i < sampleCount; i++) {
int index = labels.at<int>(i);
Point p = points.at<Point2f>(i);
circle(img, p, 2, colorTab[index], -1, 8); //对不同标签的点按不同颜色进行填充
}
// 每个聚类的中心来绘制圆
for (int i = 0; i < centers.rows; i++) {
int x = centers.at<float>(i, 0);
int y = centers.at<float>(i, 1);
printf("c.x= %d, c.y=%d\n", x, y);
circle(img, Point(x, y), 40, colorTab[i], 1, LINE_AA);
}
imshow("KMeans-Data-Demo", img);
waitKey(0);
}
void kmeans_image_demo() {
Mat src = imread("D:/images/toux.jpg");
if (src.empty()) {
printf("could not load image...\n");
return;
}
namedWindow("input image", WINDOW_AUTOSIZE);
imshow("input image", src);
Vec3b colorTab[] = {
Vec3b(0, 0, 255),
Vec3b(0, 255, 0),
Vec3b(255, 0, 0),
Vec3b(0, 255, 255),
Vec3b(255, 0, 255)
};
int width = src.cols;
int height = src.rows;
int dims = src.channels();
// 初始化定义
int sampleCount = width * height;
int clusterCount = 3;
Mat labels;
Mat centers;
// RGB 数据转换到样本数据
Mat sample_data = src.reshape(3, sampleCount); //将输入图像转换到特定维数
Mat data;
sample_data.convertTo(data, CV_32F);
// 运行K-Means
TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 10, 0.1); //停止迭代判定条件,迭代10次,精度达到0.1
kmeans(data, clusterCount, labels, criteria, clusterCount, KMEANS_PP_CENTERS, centers);
// 显示图像分割结果
int index = 0;
Mat result = Mat::zeros(src.size(), src.type());
for (int row = 0; row < height; row++) {
for (int col = 0; col < width; col++) {
index = row * width + col;
int label = labels.at<int>(index, 0);
result.at<Vec3b>(row, col) = colorTab[label]; //按不同标签对结果中的点设置不同颜色
}
}
imshow("KMeans-image-Demo", result);
waitKey(0);
}
void kmeans_background_replace() {
Mat src = imread("D:/images/toux.jpg");
if (src.empty()) {
printf("could not load image...\n");
return;
}
namedWindow("input image", WINDOW_AUTOSIZE);
imshow("input image", src);
int width = src.cols;
int height = src.rows;
int dims = src.channels();
// 初始化定义
int sampleCount = width * height;
int clusterCount = 3;
Mat labels;
Mat centers;
// RGB 数据转换到样本数据
Mat sample_data = src.reshape(3, sampleCount);
Mat data;
sample_data.convertTo(data, CV_32F);
// 运行K-Means
TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 10, 0.1);
kmeans(data, clusterCount, labels, criteria, clusterCount, KMEANS_PP_CENTERS, centers);
// 生成mask
Mat mask = Mat::zeros(src.size(), CV_8UC1);
int index = labels.at<int>(0, 0); //获取(0,0)点的label,与(0,0)点相同label的部分为背景
labels = labels.reshape(1, height);
for (int row = 0; row < height; row++) {
for (int col = 0; col < width; col++) {
int c = labels.at<int>(row, col);
if (c == index) {
mask.at<uchar>(row, col) = 255; //将与(0,0)点相同label的部分像素值设为255
}
}
}
imshow("mask", mask);
Mat se = getStructuringElement(MORPH_RECT, Size(3, 3), Point(-1, -1));
dilate(mask, mask, se); //背景白色区域膨胀操作
// 生成高斯权重
GaussianBlur(mask, mask, Size(5, 5), 0); //通过高斯模糊,使轮廓边缘过度自然
imshow("mask-blur", mask);
// 基于高斯权重图像融合
Mat result = Mat::zeros(src.size(), CV_8UC3);
for (int row = 0; row < height; row++) {
for (int col = 0; col < width; col++) {
float w1 = mask.at<uchar>(row, col) / 255.0;
Vec3b bgr = src.at<Vec3b>(row, col);
bgr[0] = w1 * 255.0 + bgr[0] * (1.0 - w1); //对bgr三通道进行分别融合
bgr[1] = w1 * 0 + bgr[1] * (1.0 - w1);
bgr[2] = w1 * 255.0 + bgr[2] * (1.0 - w1);
result.at<Vec3b>(row, col) = bgr;
}
}
imshow("background-replacement-demo", result);
waitKey(0);
}
void kmeans_color_card() {
Mat src = imread("D:/images/test.png");
if (src.empty()) {
printf("could not load image...\n");
return;
}
namedWindow("input image", WINDOW_AUTOSIZE);
imshow("input image", src);
int width = src.cols;
int height = src.rows;
int dims = src.channels();
// 初始化定义
int sampleCount = width * height;
int clusterCount = 4;
Mat labels;
Mat centers;
// RGB 数据转换到样本数据
Mat sample_data = src.reshape(3, sampleCount);
Mat data;
sample_data.convertTo(data, CV_32F);
// 运行K-Means
TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 10, 0.1);
kmeans(data, clusterCount, labels, criteria, clusterCount, KMEANS_PP_CENTERS, centers);
Mat card = Mat::zeros(Size(width, 50), CV_8UC3); //初始化一个 输入图像宽*50 的色卡
vector<float> clusters(clusterCount);
// 生成色卡比率
for (int i = 0; i < labels.rows; i++) { //遍历标签
clusters[labels.at<int>(i, 0)]++;
}
for (int i = 0; i < clusters.size(); i++) { //将clusters对应位置保存其对应比例
clusters[i] = clusters[i] / sampleCount;
}
int x_offset = 0;
// 绘制色卡
for (int x = 0; x < clusterCount; x++) {
Rect rect;
rect.x = x_offset;
rect.y = 0;
rect.height = 50;
rect.width = round(clusters[x] * width);
x_offset += rect.width;
int b = centers.at<float>(x, 0);
int g = centers.at<float>(x, 1);
int r = centers.at<float>(x, 2);
rectangle(card, rect, Scalar(b, g, r), -1, 8, 0);
}
imshow("Image Color Card", card);
waitKey(0);
}
效果: 1、KMeans聚类示例
2、使用KMeans根据图像颜色分割
3、图像背景平滑置换
4、获取图片中占比最高的前四种颜色色卡
总结
以上是真正的电脑专家为你收集整理的OpenCV之C++经典案例的全部内容,希望文章能够帮你解决所遇到的问题。
如果觉得真正的电脑专家网站内容还不错,欢迎将真正的电脑专家推荐给好友。