图像分割之区域生长
2016-10-29 20:12
232 查看
在数字图像处理中,常常需要我们把目标物体提取出来,在这里我们简单地把图像分成了我们需要的目标图像和不关心的背景图像。
实现方法一:区域生长
区域生长,故名思义就是让一颗种子在一个区域内按某种规则生长,待生长完全后,【这棵大植株】就是你要的目标区域了。所以我们在完成这个算法的时候就必须明确以下几点:
①区域:我们可以规定其在我们想要的图像区域内生长,一般情况我们的区域就是整张图像范围。
②种子:怎么定种子?种子可以是单个像素点也可以是某一个区域。可以是整幅图的第一个像素点,可以是图中灰度最大的点,也可以是某一片颜色的区域。即种子种在哪?种什么种子都可以认为设定。
③生长规则:我们可以按照像素值,按照颜色,纹理等特征来规定生长规则。比如我们可以定 邻域内像素与种子的灰度值差小于阈值T则认为是同一类,即把它归为种子区域。一般都是在种子的四邻域或八邻域内进行判断。
④终止生长:如何使区域生长停止。要是其停下来要么是遍历了图像中的所有的点,也就是所有的像素点都有了归属。或者种子库已经用完后,则说明生长停止。
按照上面的过程,我们不难发现,如果我们的生长规则定得不对很容易造成过度分割。
以下是代码展示:
IplImage *regionGrow(IplImage *img)
{
int t1 = 1;
std::stack<CvPoint> AAA;
IplImage *seedIplImage, *img1;;
seedIplImage = cvCreateImage(cvGetSize(img), img->depth, 1);//种子图像
img1 = cvCreateImage(cvGetSize(img), img->depth, 1);//存储生成后的图像
//IplImage *img2=cvCreateImage(cvGetSize(img),img->depth,1);
cvZero(seedIplImage);
cvZero(img1);
////////////////////获得种子点(这里只有一个)///////////////////////////////////////
CvPoint A;
CvPoint seedpoint;
CvPoint temppoint;
int max_gray = 0;
for (int i = 0; i<img->height; i++)
{
uchar* ptr = (uchar*)img->imageData + i*img->widthStep;
for (int j = 0; j<img->width; j++)
{
if (ptr[j]<0)
{
//ptr[j] = 256 + ptr[j];
ptr[j] = 128+ ptr[j];
}
if (max_gray <= ptr[j])
{
if ((i>img->height / 4) && (i<3 * img->height / 4))
{
max_gray = ptr[j];
A.x = j;
A.y = i;
}
}
}
}//最终A是一个最大灰度值的坐标
uchar *ptr1 = (uchar*)seedIplImage->imageData + A.y*seedIplImage->widthStep;//ptr指向最大灰度值的那一行
ptr1[A.x] = 255;//把最大灰度值的像素置255.
AAA.push(A);
uchar* seed_data = (uchar *)seedIplImage->imageData;
uchar* img_data = (uchar *)img->imageData;
uchar* img1_data = (uchar *)img1->imageData;
//uchar* src1_data =(uchar*)src1->imageData;
//存在非0像素点
while (AAA.size() != 0)
{
seedpoint = AAA.top(); //返回栈顶数据,不删除
AAA.pop(); //栈顶数据出栈,删除栈顶元素,但不返回其值
max_gray = img_data[seedIplImage->widthStep*seedpoint.y + seedpoint.x];
if ((seedpoint.x>0) && (seedpoint.x<(img->width - 1)) && (seedpoint.y>0) && (seedpoint.y<(img->height - 1)))//如果种子点在图像范围内,分别检测其8个邻域点
{
//邻域一点像素为0,即不是种子点本身,并且该点与种子点之差小于设定的阈值
//正上
if ((seed_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x] == 0) && (abs(img_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x] - max_gray) <
4000
; t1))
{
//cvSet2D(seed,(point.x-1),point.y,pixel);
seed_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x] = 255;
img1_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x] = img_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x];
temppoint.x = seedpoint.x;
temppoint.y = seedpoint.y - 1;
AAA.push(temppoint);
}
//右
if ((seed_data[seedpoint.y*seedIplImage->widthStep + seedpoint.x + 1] == 0) && (abs(img_data[seedpoint.y*seedIplImage->widthStep + seedpoint.x + 1] - max_gray) < t1))
{
//cvSet2D(seed,(point.x),(point.y+1),pixel);
seed_data[seedpoint.y*seedIplImage->widthStep + seedpoint.x + 1] = 255;
img1_data[seedpoint.y*seedIplImage->widthStep + seedpoint.x + 1] = img_data[seedpoint.y*seedIplImage->widthStep + seedpoint.x + 1];
temppoint.x = seedpoint.x + 1;
temppoint.y = seedpoint.y;
AAA.push(temppoint);
}
//左
if ((seed_data[seedpoint.y*seedIplImage->widthStep + seedpoint.x - 1] == 0) && (abs(img_data[seedpoint.y*seedIplImage->widthStep + seedpoint.x - 1] - max_gray) < t1))
{
//cvSet2D(seed,(point.x),(point.y-1),pixel);
seed_data[seedpoint.y*seedIplImage->widthStep + seedpoint.x - 1] = 255;
img1_data[seedpoint.y*seedIplImage->widthStep + seedpoint.x - 1] = img_data[seedpoint.y*seedIplImage->widthStep + seedpoint.x - 1];
temppoint.x = seedpoint.x - 1;
temppoint.y = seedpoint.y;
AAA.push(temppoint);
}
//下
if ((seed_data[(seedpoint.y + 1)*seedIplImage->widthStep + seedpoint.x] == 0) && (abs(img_data[(seedpoint.y + 1)*seedIplImage->widthStep + seedpoint.x] - max_gray) < t1))
{
//cvSet2D(seed,(point.x),(point.y+1),pixel);
seed_data[(seedpoint.y + 1)*seedIplImage->widthStep + seedpoint.x] = 255;
img1_data[(seedpoint.y + 1)*seedIplImage->widthStep + seedpoint.x] = img_data[(seedpoint.y + 1)*seedIplImage->widthStep + seedpoint.x];
temppoint.x = seedpoint.x;
temppoint.y = seedpoint.y + 1;
AAA.push(temppoint);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
//左上
if ((seed_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x - 1] == 0) && (abs(img_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x - 1] - max_gray) < t1))
{
//cvSet2D(seed,(point.x-1),point.y,pixel);
seed_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x - 1] = 255;
img1_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x - 1] = img_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x - 1];
temppoint.x = seedpoint.x - 1;
temppoint.y = seedpoint.y - 1;
AAA.push(temppoint);
}
//右上
if ((seed_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x + 1] == 0) && (abs(img_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x + 1] - max_gray) < t1))
{
//cvSet2D(seed,(point.x-1),point.y,pixel);
seed_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x + 1] = 255;
img1_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x + 1] = img_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x + 1];
temppoint.x = seedpoint.x + 1;
temppoint.y = seedpoint.y - 1;
AAA.push(temppoint);
}
//左下
if ((seed_data[(seedpoint.y + 1)*seedIplImage->widthStep + seedpoint.x - 1] == 0) && (abs(img_data[(seedpoint.y + 1)*seedIplImage->widthStep + seedpoint.x - 1] - max_gray) < t1))
{
//cvSet2D(seed,(point.x-1),point.y,pixel);
seed_data[(seedpoint.y + 1)*seedIplImage->widthStep + seedpoint.x - 1] = 255;
img1_data[(seedpoint.y + 1)*seedIplImage->widthStep + seedpoint.x - 1] = img_data[(seedpoint.y + 1)*seedIplImage->widthStep + seedpoint.x - 1];
temppoint.x = seedpoint.x - 1;
temppoint.y = seedpoint.y + 1;
AAA.push(temppoint);
}
if ((seed_data[(seedpoint.x + 1)*seedIplImage->widthStep + seedpoint.x + 1] == 0) && (abs(img_data[(seedpoint.x + 1)*seedIplImage->widthStep + seedpoint.x + 1] - max_gray) < t1))
{
//cvSet2D(seed,(point.x-1),point.y,pixel);
seed_data[(seedpoint.x + 1)*seedIplImage->widthStep + seedpoint.x + 1] = 255;
img1_data[(seedpoint.x + 1)*seedIplImage->widthStep + seedpoint.x + 1] = img_data[(seedpoint.x + 1)*seedIplImage->widthStep + seedpoint.x + 1];
temppoint.x = seedpoint.x + 1;
temppoint.y = seedpoint.y + 1;
AAA.push(temppoint);
}
}
}
//img2=reducelightcross(seedIplImage);
//cvSaveImage("tg.bmp",img2);
return seedIplImage;
}
实现方法一:区域生长
区域生长,故名思义就是让一颗种子在一个区域内按某种规则生长,待生长完全后,【这棵大植株】就是你要的目标区域了。所以我们在完成这个算法的时候就必须明确以下几点:
①区域:我们可以规定其在我们想要的图像区域内生长,一般情况我们的区域就是整张图像范围。
②种子:怎么定种子?种子可以是单个像素点也可以是某一个区域。可以是整幅图的第一个像素点,可以是图中灰度最大的点,也可以是某一片颜色的区域。即种子种在哪?种什么种子都可以认为设定。
③生长规则:我们可以按照像素值,按照颜色,纹理等特征来规定生长规则。比如我们可以定 邻域内像素与种子的灰度值差小于阈值T则认为是同一类,即把它归为种子区域。一般都是在种子的四邻域或八邻域内进行判断。
④终止生长:如何使区域生长停止。要是其停下来要么是遍历了图像中的所有的点,也就是所有的像素点都有了归属。或者种子库已经用完后,则说明生长停止。
按照上面的过程,我们不难发现,如果我们的生长规则定得不对很容易造成过度分割。
以下是代码展示:
IplImage *regionGrow(IplImage *img)
{
int t1 = 1;
std::stack<CvPoint> AAA;
IplImage *seedIplImage, *img1;;
seedIplImage = cvCreateImage(cvGetSize(img), img->depth, 1);//种子图像
img1 = cvCreateImage(cvGetSize(img), img->depth, 1);//存储生成后的图像
//IplImage *img2=cvCreateImage(cvGetSize(img),img->depth,1);
cvZero(seedIplImage);
cvZero(img1);
////////////////////获得种子点(这里只有一个)///////////////////////////////////////
CvPoint A;
CvPoint seedpoint;
CvPoint temppoint;
int max_gray = 0;
for (int i = 0; i<img->height; i++)
{
uchar* ptr = (uchar*)img->imageData + i*img->widthStep;
for (int j = 0; j<img->width; j++)
{
if (ptr[j]<0)
{
//ptr[j] = 256 + ptr[j];
ptr[j] = 128+ ptr[j];
}
if (max_gray <= ptr[j])
{
if ((i>img->height / 4) && (i<3 * img->height / 4))
{
max_gray = ptr[j];
A.x = j;
A.y = i;
}
}
}
}//最终A是一个最大灰度值的坐标
uchar *ptr1 = (uchar*)seedIplImage->imageData + A.y*seedIplImage->widthStep;//ptr指向最大灰度值的那一行
ptr1[A.x] = 255;//把最大灰度值的像素置255.
AAA.push(A);
uchar* seed_data = (uchar *)seedIplImage->imageData;
uchar* img_data = (uchar *)img->imageData;
uchar* img1_data = (uchar *)img1->imageData;
//uchar* src1_data =(uchar*)src1->imageData;
//存在非0像素点
while (AAA.size() != 0)
{
seedpoint = AAA.top(); //返回栈顶数据,不删除
AAA.pop(); //栈顶数据出栈,删除栈顶元素,但不返回其值
max_gray = img_data[seedIplImage->widthStep*seedpoint.y + seedpoint.x];
if ((seedpoint.x>0) && (seedpoint.x<(img->width - 1)) && (seedpoint.y>0) && (seedpoint.y<(img->height - 1)))//如果种子点在图像范围内,分别检测其8个邻域点
{
//邻域一点像素为0,即不是种子点本身,并且该点与种子点之差小于设定的阈值
//正上
if ((seed_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x] == 0) && (abs(img_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x] - max_gray) <
4000
; t1))
{
//cvSet2D(seed,(point.x-1),point.y,pixel);
seed_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x] = 255;
img1_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x] = img_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x];
temppoint.x = seedpoint.x;
temppoint.y = seedpoint.y - 1;
AAA.push(temppoint);
}
//右
if ((seed_data[seedpoint.y*seedIplImage->widthStep + seedpoint.x + 1] == 0) && (abs(img_data[seedpoint.y*seedIplImage->widthStep + seedpoint.x + 1] - max_gray) < t1))
{
//cvSet2D(seed,(point.x),(point.y+1),pixel);
seed_data[seedpoint.y*seedIplImage->widthStep + seedpoint.x + 1] = 255;
img1_data[seedpoint.y*seedIplImage->widthStep + seedpoint.x + 1] = img_data[seedpoint.y*seedIplImage->widthStep + seedpoint.x + 1];
temppoint.x = seedpoint.x + 1;
temppoint.y = seedpoint.y;
AAA.push(temppoint);
}
//左
if ((seed_data[seedpoint.y*seedIplImage->widthStep + seedpoint.x - 1] == 0) && (abs(img_data[seedpoint.y*seedIplImage->widthStep + seedpoint.x - 1] - max_gray) < t1))
{
//cvSet2D(seed,(point.x),(point.y-1),pixel);
seed_data[seedpoint.y*seedIplImage->widthStep + seedpoint.x - 1] = 255;
img1_data[seedpoint.y*seedIplImage->widthStep + seedpoint.x - 1] = img_data[seedpoint.y*seedIplImage->widthStep + seedpoint.x - 1];
temppoint.x = seedpoint.x - 1;
temppoint.y = seedpoint.y;
AAA.push(temppoint);
}
//下
if ((seed_data[(seedpoint.y + 1)*seedIplImage->widthStep + seedpoint.x] == 0) && (abs(img_data[(seedpoint.y + 1)*seedIplImage->widthStep + seedpoint.x] - max_gray) < t1))
{
//cvSet2D(seed,(point.x),(point.y+1),pixel);
seed_data[(seedpoint.y + 1)*seedIplImage->widthStep + seedpoint.x] = 255;
img1_data[(seedpoint.y + 1)*seedIplImage->widthStep + seedpoint.x] = img_data[(seedpoint.y + 1)*seedIplImage->widthStep + seedpoint.x];
temppoint.x = seedpoint.x;
temppoint.y = seedpoint.y + 1;
AAA.push(temppoint);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
//左上
if ((seed_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x - 1] == 0) && (abs(img_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x - 1] - max_gray) < t1))
{
//cvSet2D(seed,(point.x-1),point.y,pixel);
seed_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x - 1] = 255;
img1_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x - 1] = img_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x - 1];
temppoint.x = seedpoint.x - 1;
temppoint.y = seedpoint.y - 1;
AAA.push(temppoint);
}
//右上
if ((seed_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x + 1] == 0) && (abs(img_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x + 1] - max_gray) < t1))
{
//cvSet2D(seed,(point.x-1),point.y,pixel);
seed_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x + 1] = 255;
img1_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x + 1] = img_data[(seedpoint.y - 1)*seedIplImage->widthStep + seedpoint.x + 1];
temppoint.x = seedpoint.x + 1;
temppoint.y = seedpoint.y - 1;
AAA.push(temppoint);
}
//左下
if ((seed_data[(seedpoint.y + 1)*seedIplImage->widthStep + seedpoint.x - 1] == 0) && (abs(img_data[(seedpoint.y + 1)*seedIplImage->widthStep + seedpoint.x - 1] - max_gray) < t1))
{
//cvSet2D(seed,(point.x-1),point.y,pixel);
seed_data[(seedpoint.y + 1)*seedIplImage->widthStep + seedpoint.x - 1] = 255;
img1_data[(seedpoint.y + 1)*seedIplImage->widthStep + seedpoint.x - 1] = img_data[(seedpoint.y + 1)*seedIplImage->widthStep + seedpoint.x - 1];
temppoint.x = seedpoint.x - 1;
temppoint.y = seedpoint.y + 1;
AAA.push(temppoint);
}
if ((seed_data[(seedpoint.x + 1)*seedIplImage->widthStep + seedpoint.x + 1] == 0) && (abs(img_data[(seedpoint.x + 1)*seedIplImage->widthStep + seedpoint.x + 1] - max_gray) < t1))
{
//cvSet2D(seed,(point.x-1),point.y,pixel);
seed_data[(seedpoint.x + 1)*seedIplImage->widthStep + seedpoint.x + 1] = 255;
img1_data[(seedpoint.x + 1)*seedIplImage->widthStep + seedpoint.x + 1] = img_data[(seedpoint.x + 1)*seedIplImage->widthStep + seedpoint.x + 1];
temppoint.x = seedpoint.x + 1;
temppoint.y = seedpoint.y + 1;
AAA.push(temppoint);
}
}
}
//img2=reducelightcross(seedIplImage);
//cvSaveImage("tg.bmp",img2);
return seedIplImage;
}
相关文章推荐
- 【转】基于区域的图像分割-----------区域生长
- 图像分割-区域生长
- 图像分割_区域生长
- 区域生长(基于区域的图像分割)
- 灰度图像--图像分割 区域分割之区域生长
- 区域生长和区域分离与合并的图像分割方法
- 【OpenCV学习笔记 016】图像分割-种子区域生长
- 基于区域的图像分割-----------区域生长
- 基于区域的图像分割-----------区域生长
- 图像分割_区域生长
- 基于区域生长的图像分割
- 数字图像处理—图像分割—串行区域(区域生长)(分裂合并)
- 区域生长法图像分割的实现方法
- 图像分割——区域生长
- 【图像算法】彩色图像分割专题三:边缘检测+区域生长 法
- 图像分割之区域生长
- 转载]图像分割——区域生长法
- 基于区域的图像分割-----------区域生长
- 图像处理算法1——区域生长法
- 实现二值图像连通区标记之区域生长法