I'm trying to implement an Active Contour Models algorithm with Opencv 3.0 in C++.
This algorithm is based on a script I wrote for MatLab and is not working as it supposed to.
This two images shows the result of the two algorithms running.
The MatLab script:
and the OpenCV one:
In both of them I used the same values for all the ACM parameters, so they should be returning the same thing, the white circle contour.
I'm suspecting the problem is my image energy function, since gradient operations in opencv and matlab are not the same. The matlab script for the image energy is:
function [Eext] = get_eext(wl, we, wt, image)
%External Energy
[row,col] = size(image);
eline = image; %eline is simply the image intensities
[grady,gradx] = gradient(image);
eedge = -1 *(gradx .* gradx + grady .* grady);
%masks for taking various derivatives
m1 = [-1 1];
m2 = [-1;1];
m3 = [1 -2 1];
m4 = [1;-2;1];
m5 = [1 -1;-1 1];
cx = conv2(image,m1,'same');
cy = conv2(image,m2,'same');
cxx = conv2(image,m3,'same');
cyy = conv2(image,m4,'same');
cxy = conv2(image,m5,'same');
eterm = zeros(row, col);
for i = 1:row;
for j= 1:col;
% eterm as deined in Kass et al Snakes paper
eterm(i,j) = (cyy(i,j)*cx(i,j)*cx(i,j) -2 *cxy(i,j)*cx(i,j)...
*cy(i,j) + cxx(i,j)*cy(i,j)*cy(i,j))/((1+cx(i,j)*cx(i,j)...
+ cy(i,j)*cy(i,j))^1.5);
end;
end;
Eext = (wl*eline + we*eedge + wt*eterm);
And in C++ my function turned out like this:
Mat get_eext(float wl, float we, float wt, Mat image){
Mat eline, gradx, grady, img_gray, eedge;
//bitdepth defined as CV_32F
image.convertTo(img_gray, bitdepth);
//Convolution Kernels
Mat m1, m2, m3, m4, m5;
m1 = (Mat_<float>(1, 2) << -1, 1);
m2 = (Mat_<float>(2, 1) << -1, 1);
m3 = (Mat_<float>(1, 3) << 1, -2, 1);
m4 = (Mat_<float>(3, 1) << 1, -2, 1);
m5 = (Mat_<float>(2, 2) << 1, -1, -1, 1);
//cvtColor(image, img_gray, CV_BGR2GRAY); <- Not required since image already in grayscale
img_gray.copyTo(eline);
Mat kernelx = (Mat_<float>(1, 3) << -0.5, 0, 0.5);
Mat kernely = (Mat_<float>(3, 1) << -0.5, 0, 0.5);
filter2D(img_gray, gradx, -1, kernelx);
filter2D(img_gray, grady, -1, kernely);
//Edge Energy
eedge = -1 * (gradx.mul(gradx) + grady.mul(grady));
//Termination Energy Convolution
Mat cx, cy, cxx, cyy, cxy, eterm, cxm1, den, cxcx, cxcxm1, cxcxcy, cxcycxy, cycycxx;
filter2D(img_gray, cx, bitdepth, m1);
filter2D(img_gray, cy, bitdepth, m2);
filter2D(img_gray, cxx, bitdepth, m3);
filter2D(img_gray, cyy, bitdepth, m4);
filter2D(img_gray, cxy, bitdepth, m5);
//element wise operations to find Eterm
cxcx = cx.mul(cx);
cxcx.convertTo(cxcxm1, -1, 1, 1);
den = cxcxm1 + cy.mul(cy);
cv::pow(den, 1.5, den);
cxcxcy = cxcx.mul(cy);
cxcycxy = cx.mul(cy);
cxcycxy = cxcycxy.mul(cxy);
cycycxx = cy.mul(cy);
cycycxx = cycycxx.mul(cxx);
eterm = (cxcxcy - 2 * cxcycxy + cycycxx);
cv::divide(eterm,den,eterm,-1);
//Image energy
Mat eext;
eext = wl*eline + we*eedge + wt*eterm;
return eext;}
Does anyone knows what might be wrong?
See Question&Answers more detail:
os