可以使用哪些图像处理技术来实现检测以下图像中显示的圣诞树的应用程序?
我正在寻找解决方案,将工作在所有这些图像。因此,需要训练haar级联分类器或模板匹配的方法不是很有趣。
我正在寻找可以用任何编程语言编写的东西,只要它只使用开源技术。解决方案必须使用在此问题上共享的图像进行测试。有6个输入图像,答案应该显示处理每个图像的结果。最后,对于每个输出图像,必须在检测到的树周围绘制红线。
您将如何通过编程来检测这些图像中的树木呢?
可以使用哪些图像处理技术来实现检测以下图像中显示的圣诞树的应用程序?
我正在寻找解决方案,将工作在所有这些图像。因此,需要训练haar级联分类器或模板匹配的方法不是很有趣。
我正在寻找可以用任何编程语言编写的东西,只要它只使用开源技术。解决方案必须使用在此问题上共享的图像进行测试。有6个输入图像,答案应该显示处理每个图像的结果。最后,对于每个输出图像,必须在检测到的树周围绘制红线。
您将如何通过编程来检测这些图像中的树木呢?
当前回答
这是我简单而愚蠢的解决方案。 它是基于这样一个假设:树将是图片中最明亮、最大的东西。
//g++ -Wall -pedantic -ansi -O2 -pipe -s -o christmas_tree christmas_tree.cpp `pkg-config --cflags --libs opencv`
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main(int argc,char *argv[])
{
Mat original,tmp,tmp1;
vector <vector<Point> > contours;
Moments m;
Rect boundrect;
Point2f center;
double radius, max_area=0,tmp_area=0;
unsigned int j, k;
int i;
for(i = 1; i < argc; ++i)
{
original = imread(argv[i]);
if(original.empty())
{
cerr << "Error"<<endl;
return -1;
}
GaussianBlur(original, tmp, Size(3, 3), 0, 0, BORDER_DEFAULT);
erode(tmp, tmp, Mat(), Point(-1, -1), 10);
cvtColor(tmp, tmp, CV_BGR2HSV);
inRange(tmp, Scalar(0, 0, 0), Scalar(180, 255, 200), tmp);
dilate(original, tmp1, Mat(), Point(-1, -1), 15);
cvtColor(tmp1, tmp1, CV_BGR2HLS);
inRange(tmp1, Scalar(0, 185, 0), Scalar(180, 255, 255), tmp1);
dilate(tmp1, tmp1, Mat(), Point(-1, -1), 10);
bitwise_and(tmp, tmp1, tmp1);
findContours(tmp1, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
max_area = 0;
j = 0;
for(k = 0; k < contours.size(); k++)
{
tmp_area = contourArea(contours[k]);
if(tmp_area > max_area)
{
max_area = tmp_area;
j = k;
}
}
tmp1 = Mat::zeros(original.size(),CV_8U);
approxPolyDP(contours[j], contours[j], 30, true);
drawContours(tmp1, contours, j, Scalar(255,255,255), CV_FILLED);
m = moments(contours[j]);
boundrect = boundingRect(contours[j]);
center = Point2f(m.m10/m.m00, m.m01/m.m00);
radius = (center.y - (boundrect.tl().y))/4.0*3.0;
Rect heightrect(center.x-original.cols/5, boundrect.tl().y, original.cols/5*2, boundrect.size().height);
tmp = Mat::zeros(original.size(), CV_8U);
rectangle(tmp, heightrect, Scalar(255, 255, 255), -1);
circle(tmp, center, radius, Scalar(255, 255, 255), -1);
bitwise_and(tmp, tmp1, tmp1);
findContours(tmp1, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
max_area = 0;
j = 0;
for(k = 0; k < contours.size(); k++)
{
tmp_area = contourArea(contours[k]);
if(tmp_area > max_area)
{
max_area = tmp_area;
j = k;
}
}
approxPolyDP(contours[j], contours[j], 30, true);
convexHull(contours[j], contours[j]);
drawContours(original, contours, j, Scalar(0, 0, 255), 3);
namedWindow(argv[i], CV_WINDOW_NORMAL|CV_WINDOW_KEEPRATIO|CV_GUI_EXPANDED);
imshow(argv[i], original);
waitKey(0);
destroyWindow(argv[i]);
}
return 0;
}
第一步是检测图片中最亮的像素,但我们必须在树本身和反射其光的雪之间做区分。在这里,我们试图排除雪应用一个非常简单的滤镜的颜色代码:
GaussianBlur(original, tmp, Size(3, 3), 0, 0, BORDER_DEFAULT);
erode(tmp, tmp, Mat(), Point(-1, -1), 10);
cvtColor(tmp, tmp, CV_BGR2HSV);
inRange(tmp, Scalar(0, 0, 0), Scalar(180, 255, 200), tmp);
然后我们找到每个“亮”像素:
dilate(original, tmp1, Mat(), Point(-1, -1), 15);
cvtColor(tmp1, tmp1, CV_BGR2HLS);
inRange(tmp1, Scalar(0, 185, 0), Scalar(180, 255, 255), tmp1);
dilate(tmp1, tmp1, Mat(), Point(-1, -1), 10);
最后我们将两个结果结合起来:
bitwise_and(tmp, tmp1, tmp1);
现在我们寻找最大的明亮物体:
findContours(tmp1, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
max_area = 0;
j = 0;
for(k = 0; k < contours.size(); k++)
{
tmp_area = contourArea(contours[k]);
if(tmp_area > max_area)
{
max_area = tmp_area;
j = k;
}
}
tmp1 = Mat::zeros(original.size(),CV_8U);
approxPolyDP(contours[j], contours[j], 30, true);
drawContours(tmp1, contours, j, Scalar(255,255,255), CV_FILLED);
现在我们已经基本完成了,但是由于下雪,我们仍然有一些不完善的地方。 为了切断它们,我们将使用一个圆形和一个矩形来近似树的形状来创建一个蒙版,以删除不需要的部分:
m = moments(contours[j]);
boundrect = boundingRect(contours[j]);
center = Point2f(m.m10/m.m00, m.m01/m.m00);
radius = (center.y - (boundrect.tl().y))/4.0*3.0;
Rect heightrect(center.x-original.cols/5, boundrect.tl().y, original.cols/5*2, boundrect.size().height);
tmp = Mat::zeros(original.size(), CV_8U);
rectangle(tmp, heightrect, Scalar(255, 255, 255), -1);
circle(tmp, center, radius, Scalar(255, 255, 255), -1);
bitwise_and(tmp, tmp1, tmp1);
最后一步是找到我们的树的轮廓,并把它画在原图上。
findContours(tmp1, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
max_area = 0;
j = 0;
for(k = 0; k < contours.size(); k++)
{
tmp_area = contourArea(contours[k]);
if(tmp_area > max_area)
{
max_area = tmp_area;
j = k;
}
}
approxPolyDP(contours[j], contours[j], 30, true);
convexHull(contours[j], contours[j]);
drawContours(original, contours, j, Scalar(0, 0, 255), 3);
很抱歉,目前我的网络连接不好,无法上传图片。我以后再试着做。
圣诞快乐。
编辑:
以下是最终输出的一些图片:
其他回答
这是我简单而愚蠢的解决方案。 它是基于这样一个假设:树将是图片中最明亮、最大的东西。
//g++ -Wall -pedantic -ansi -O2 -pipe -s -o christmas_tree christmas_tree.cpp `pkg-config --cflags --libs opencv`
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main(int argc,char *argv[])
{
Mat original,tmp,tmp1;
vector <vector<Point> > contours;
Moments m;
Rect boundrect;
Point2f center;
double radius, max_area=0,tmp_area=0;
unsigned int j, k;
int i;
for(i = 1; i < argc; ++i)
{
original = imread(argv[i]);
if(original.empty())
{
cerr << "Error"<<endl;
return -1;
}
GaussianBlur(original, tmp, Size(3, 3), 0, 0, BORDER_DEFAULT);
erode(tmp, tmp, Mat(), Point(-1, -1), 10);
cvtColor(tmp, tmp, CV_BGR2HSV);
inRange(tmp, Scalar(0, 0, 0), Scalar(180, 255, 200), tmp);
dilate(original, tmp1, Mat(), Point(-1, -1), 15);
cvtColor(tmp1, tmp1, CV_BGR2HLS);
inRange(tmp1, Scalar(0, 185, 0), Scalar(180, 255, 255), tmp1);
dilate(tmp1, tmp1, Mat(), Point(-1, -1), 10);
bitwise_and(tmp, tmp1, tmp1);
findContours(tmp1, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
max_area = 0;
j = 0;
for(k = 0; k < contours.size(); k++)
{
tmp_area = contourArea(contours[k]);
if(tmp_area > max_area)
{
max_area = tmp_area;
j = k;
}
}
tmp1 = Mat::zeros(original.size(),CV_8U);
approxPolyDP(contours[j], contours[j], 30, true);
drawContours(tmp1, contours, j, Scalar(255,255,255), CV_FILLED);
m = moments(contours[j]);
boundrect = boundingRect(contours[j]);
center = Point2f(m.m10/m.m00, m.m01/m.m00);
radius = (center.y - (boundrect.tl().y))/4.0*3.0;
Rect heightrect(center.x-original.cols/5, boundrect.tl().y, original.cols/5*2, boundrect.size().height);
tmp = Mat::zeros(original.size(), CV_8U);
rectangle(tmp, heightrect, Scalar(255, 255, 255), -1);
circle(tmp, center, radius, Scalar(255, 255, 255), -1);
bitwise_and(tmp, tmp1, tmp1);
findContours(tmp1, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
max_area = 0;
j = 0;
for(k = 0; k < contours.size(); k++)
{
tmp_area = contourArea(contours[k]);
if(tmp_area > max_area)
{
max_area = tmp_area;
j = k;
}
}
approxPolyDP(contours[j], contours[j], 30, true);
convexHull(contours[j], contours[j]);
drawContours(original, contours, j, Scalar(0, 0, 255), 3);
namedWindow(argv[i], CV_WINDOW_NORMAL|CV_WINDOW_KEEPRATIO|CV_GUI_EXPANDED);
imshow(argv[i], original);
waitKey(0);
destroyWindow(argv[i]);
}
return 0;
}
第一步是检测图片中最亮的像素,但我们必须在树本身和反射其光的雪之间做区分。在这里,我们试图排除雪应用一个非常简单的滤镜的颜色代码:
GaussianBlur(original, tmp, Size(3, 3), 0, 0, BORDER_DEFAULT);
erode(tmp, tmp, Mat(), Point(-1, -1), 10);
cvtColor(tmp, tmp, CV_BGR2HSV);
inRange(tmp, Scalar(0, 0, 0), Scalar(180, 255, 200), tmp);
然后我们找到每个“亮”像素:
dilate(original, tmp1, Mat(), Point(-1, -1), 15);
cvtColor(tmp1, tmp1, CV_BGR2HLS);
inRange(tmp1, Scalar(0, 185, 0), Scalar(180, 255, 255), tmp1);
dilate(tmp1, tmp1, Mat(), Point(-1, -1), 10);
最后我们将两个结果结合起来:
bitwise_and(tmp, tmp1, tmp1);
现在我们寻找最大的明亮物体:
findContours(tmp1, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
max_area = 0;
j = 0;
for(k = 0; k < contours.size(); k++)
{
tmp_area = contourArea(contours[k]);
if(tmp_area > max_area)
{
max_area = tmp_area;
j = k;
}
}
tmp1 = Mat::zeros(original.size(),CV_8U);
approxPolyDP(contours[j], contours[j], 30, true);
drawContours(tmp1, contours, j, Scalar(255,255,255), CV_FILLED);
现在我们已经基本完成了,但是由于下雪,我们仍然有一些不完善的地方。 为了切断它们,我们将使用一个圆形和一个矩形来近似树的形状来创建一个蒙版,以删除不需要的部分:
m = moments(contours[j]);
boundrect = boundingRect(contours[j]);
center = Point2f(m.m10/m.m00, m.m01/m.m00);
radius = (center.y - (boundrect.tl().y))/4.0*3.0;
Rect heightrect(center.x-original.cols/5, boundrect.tl().y, original.cols/5*2, boundrect.size().height);
tmp = Mat::zeros(original.size(), CV_8U);
rectangle(tmp, heightrect, Scalar(255, 255, 255), -1);
circle(tmp, center, radius, Scalar(255, 255, 255), -1);
bitwise_and(tmp, tmp1, tmp1);
最后一步是找到我们的树的轮廓,并把它画在原图上。
findContours(tmp1, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
max_area = 0;
j = 0;
for(k = 0; k < contours.size(); k++)
{
tmp_area = contourArea(contours[k]);
if(tmp_area > max_area)
{
max_area = tmp_area;
j = k;
}
}
approxPolyDP(contours[j], contours[j], 30, true);
convexHull(contours[j], contours[j]);
drawContours(original, contours, j, Scalar(0, 0, 255), 3);
很抱歉,目前我的网络连接不好,无法上传图片。我以后再试着做。
圣诞快乐。
编辑:
以下是最终输出的一些图片:
...另一个老式的解决方案-纯粹基于HSV处理:
将图像转换为HSV色彩空间 根据HSV中的启发式创建掩码(见下文) 将形态扩张应用于掩模以连接断开的区域 丢弃小块区域和水平块(记住树是垂直块) 计算包围框
简单介绍一下HSV处理中的启发式:
所有色调(H)在210 - 320度之间的都被丢弃为蓝洋红色,这应该是在背景或不相关的区域 所有V值低于40%的内容也会因为太暗而被丢弃
当然,人们可以尝试许多其他可能性来微调这种方法……
这里是MATLAB代码来做的技巧(警告:代码远远没有被优化!!我使用了不推荐用于MATLAB编程的技术,只是为了能够跟踪过程中的任何东西——这可以大大优化):
% clear everything
clear;
pack;
close all;
close all hidden;
drawnow;
clc;
% initialization
ims=dir('./*.jpg');
num=length(ims);
imgs={};
hsvs={};
masks={};
dilated_images={};
measurements={};
boxs={};
for i=1:num,
% load original image
imgs{end+1} = imread(ims(i).name);
flt_x_size = round(size(imgs{i},2)*0.005);
flt_y_size = round(size(imgs{i},1)*0.005);
flt = fspecial( 'average', max( flt_y_size, flt_x_size));
imgs{i} = imfilter( imgs{i}, flt, 'same');
% convert to HSV colorspace
hsvs{end+1} = rgb2hsv(imgs{i});
% apply a hard thresholding and binary operation to construct the mask
masks{end+1} = medfilt2( ~(hsvs{i}(:,:,1)>(210/360) & hsvs{i}(:,:,1)<(320/360))&hsvs{i}(:,:,3)>0.4);
% apply morphological dilation to connect distonnected components
strel_size = round(0.03*max(size(imgs{i}))); % structuring element for morphological dilation
dilated_images{end+1} = imdilate( masks{i}, strel('disk',strel_size));
% do some measurements to eliminate small objects
measurements{i} = regionprops( dilated_images{i},'Perimeter','Area','BoundingBox');
for m=1:length(measurements{i})
if (measurements{i}(m).Area < 0.02*numel( dilated_images{i})) || (measurements{i}(m).BoundingBox(3)>1.2*measurements{i}(m).BoundingBox(4))
dilated_images{i}( round(measurements{i}(m).BoundingBox(2):measurements{i}(m).BoundingBox(4)+measurements{i}(m).BoundingBox(2)),...
round(measurements{i}(m).BoundingBox(1):measurements{i}(m).BoundingBox(3)+measurements{i}(m).BoundingBox(1))) = 0;
end
end
dilated_images{i} = dilated_images{i}(1:size(imgs{i},1),1:size(imgs{i},2));
% compute the bounding box
[y,x] = find( dilated_images{i});
if isempty( y)
boxs{end+1}=[];
else
boxs{end+1} = [ min(x) min(y) max(x)-min(x)+1 max(y)-min(y)+1];
end
end
%%% additional code to display things
for i=1:num,
figure;
subplot(121);
colormap gray;
imshow( imgs{i});
if ~isempty(boxs{i})
hold on;
rr = rectangle( 'position', boxs{i});
set( rr, 'EdgeColor', 'r');
hold off;
end
subplot(122);
imshow( imgs{i}.*uint8(repmat(dilated_images{i},[1 1 3])));
end
结果:
在结果中,我显示了蒙面图像和包围框。
一些老式的图像处理方法…… 这个想法是基于这样的假设,即图像描绘的是在通常较暗和较光滑的背景(在某些情况下是前景)上点亮的树木。点亮的树木区域更“有活力”,具有更高的强度。 具体流程如下:
转换为灰度 应用LoG过滤来获得最“活跃”的区域 应用亮度阈值来获得最亮的区域 结合前两个得到一个初步的蒙版 应用形态扩张来扩大区域并连接相邻组件 根据候选区域的面积大小剔除较小的候选区域
你得到的是一个二进制掩码和每个图像的包围框。
以下是使用这种简单技术的结果:
MATLAB代码如下: 该代码运行在带有JPG图像的文件夹上。加载所有图像并返回检测到的结果。
% clear everything
clear;
pack;
close all;
close all hidden;
drawnow;
clc;
% initialization
ims=dir('./*.jpg');
imgs={};
images={};
blur_images={};
log_image={};
dilated_image={};
int_image={};
bin_image={};
measurements={};
box={};
num=length(ims);
thres_div = 3;
for i=1:num,
% load original image
imgs{end+1}=imread(ims(i).name);
% convert to grayscale
images{end+1}=rgb2gray(imgs{i});
% apply laplacian filtering and heuristic hard thresholding
val_thres = (max(max(images{i}))/thres_div);
log_image{end+1} = imfilter( images{i},fspecial('log')) > val_thres;
% get the most bright regions of the image
int_thres = 0.26*max(max( images{i}));
int_image{end+1} = images{i} > int_thres;
% compute the final binary image by combining
% high 'activity' with high intensity
bin_image{end+1} = log_image{i} .* int_image{i};
% apply morphological dilation to connect distonnected components
strel_size = round(0.01*max(size(imgs{i}))); % structuring element for morphological dilation
dilated_image{end+1} = imdilate( bin_image{i}, strel('disk',strel_size));
% do some measurements to eliminate small objects
measurements{i} = regionprops( logical( dilated_image{i}),'Area','BoundingBox');
for m=1:length(measurements{i})
if measurements{i}(m).Area < 0.05*numel( dilated_image{i})
dilated_image{i}( round(measurements{i}(m).BoundingBox(2):measurements{i}(m).BoundingBox(4)+measurements{i}(m).BoundingBox(2)),...
round(measurements{i}(m).BoundingBox(1):measurements{i}(m).BoundingBox(3)+measurements{i}(m).BoundingBox(1))) = 0;
end
end
% make sure the dilated image is the same size with the original
dilated_image{i} = dilated_image{i}(1:size(imgs{i},1),1:size(imgs{i},2));
% compute the bounding box
[y,x] = find( dilated_image{i});
if isempty( y)
box{end+1}=[];
else
box{end+1} = [ min(x) min(y) max(x)-min(x)+1 max(y)-min(y)+1];
end
end
%%% additional code to display things
for i=1:num,
figure;
subplot(121);
colormap gray;
imshow( imgs{i});
if ~isempty(box{i})
hold on;
rr = rectangle( 'position', box{i});
set( rr, 'EdgeColor', 'r');
hold off;
end
subplot(122);
imshow( imgs{i}.*uint8(repmat(dilated_image{i},[1 1 3])));
end
我的解决步骤:
Get R channel (from RGB) - all operations we make on this channel: Create Region of Interest (ROI) Threshold R channel with min value 149 (top right image) Dilate result region (middle left image) Detect eges in computed roi. Tree has a lot of edges (middle right image) Dilate result Erode with bigger radius ( bottom left image) Select the biggest (by area) object - it's the result region ConvexHull ( tree is convex polygon ) ( bottom right image ) Bounding box (bottom right image - grren box )
循序渐进:
第一个结果——最简单但不是开源软件——“自适应视觉工作室+自适应视觉库”: 这不是开源的,但是很快就能原型化:
整个圣诞树检测算法(11块):
下一个步骤。我们需要开源解决方案。将AVL过滤器更改为OpenCV过滤器: 这里我做了一些小改动,例如边缘检测使用cvCanny过滤器,为了尊重roi,我将区域图像与边缘图像相乘,为了选择最大的元素,我使用findContours + contourArea,但想法是一样的。
https://www.youtube.com/watch?v=sfjB3MigLH0&index=1&list=UUpSRrkMHNHiLDXgylwhWNQQ
我现在不能显示中间步骤的图像,因为我只能放2个链接。
好吧,现在我们使用开源过滤器,但它仍然不是完全开源的。 最后一步-移植到c++代码。我在2.4.4版本中使用了OpenCV
最终的c++代码的结果是:
c++代码也很短:
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include <algorithm>
using namespace cv;
int main()
{
string images[6] = {"..\\1.png","..\\2.png","..\\3.png","..\\4.png","..\\5.png","..\\6.png"};
for(int i = 0; i < 6; ++i)
{
Mat img, thresholded, tdilated, tmp, tmp1;
vector<Mat> channels(3);
img = imread(images[i]);
split(img, channels);
threshold( channels[2], thresholded, 149, 255, THRESH_BINARY); //prepare ROI - threshold
dilate( thresholded, tdilated, getStructuringElement( MORPH_RECT, Size(22,22) ) ); //prepare ROI - dilate
Canny( channels[2], tmp, 75, 125, 3, true ); //Canny edge detection
multiply( tmp, tdilated, tmp1 ); // set ROI
dilate( tmp1, tmp, getStructuringElement( MORPH_RECT, Size(20,16) ) ); // dilate
erode( tmp, tmp1, getStructuringElement( MORPH_RECT, Size(36,36) ) ); // erode
vector<vector<Point> > contours, contours1(1);
vector<Point> convex;
vector<Vec4i> hierarchy;
findContours( tmp1, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
//get element of maximum area
//int bestID = std::max_element( contours.begin(), contours.end(),
// []( const vector<Point>& A, const vector<Point>& B ) { return contourArea(A) < contourArea(B); } ) - contours.begin();
int bestID = 0;
int bestArea = contourArea( contours[0] );
for( int i = 1; i < contours.size(); ++i )
{
int area = contourArea( contours[i] );
if( area > bestArea )
{
bestArea = area;
bestID = i;
}
}
convexHull( contours[bestID], contours1[0] );
drawContours( img, contours1, 0, Scalar( 100, 100, 255 ), img.rows / 100, 8, hierarchy, 0, Point() );
imshow("image", img );
waitKey(0);
}
return 0;
}
我在opencv中使用python。
我的算法是这样的:
首先,它从图像中取出红色通道 对红色通道应用阈值(最小值200) 然后应用形态梯度,然后做一个“关闭”(扩张,然后侵蚀) 然后它找到平面上的轮廓然后选择最长的轮廓。
代码:
import numpy as np
import cv2
import copy
def findTree(image,num):
im = cv2.imread(image)
im = cv2.resize(im, (400,250))
gray = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
imf = copy.deepcopy(im)
b,g,r = cv2.split(im)
minR = 200
_,thresh = cv2.threshold(r,minR,255,0)
kernel = np.ones((25,5))
dst = cv2.morphologyEx(thresh, cv2.MORPH_GRADIENT, kernel)
dst = cv2.morphologyEx(dst, cv2.MORPH_CLOSE, kernel)
contours = cv2.findContours(dst,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)[0]
cv2.drawContours(im, contours,-1, (0,255,0), 1)
maxI = 0
for i in range(len(contours)):
if len(contours[maxI]) < len(contours[i]):
maxI = i
img = copy.deepcopy(r)
cv2.polylines(img,[contours[maxI]],True,(255,255,255),3)
imf[:,:,2] = img
cv2.imshow(str(num), imf)
def main():
findTree('tree.jpg',1)
findTree('tree2.jpg',2)
findTree('tree3.jpg',3)
findTree('tree4.jpg',4)
findTree('tree5.jpg',5)
findTree('tree6.jpg',6)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
如果我把核函数从(25,5)改成(10,5) 我在所有树上都得到了更好的结果,除了左下角,
我的算法假设树上有灯,而且 在左下角的树中,顶部的光线比其他树要少。