本文整理汇总了C++中cvGEMM函数的典型用法代码示例。如果您正苦于以下问题:C++ cvGEMM函数的具体用法?C++ cvGEMM怎么用?C++ cvGEMM使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cvGEMM函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: cvKalmanCorrect
CV_IMPL const CvMat*
cvKalmanCorrect( CvKalman* kalman, const CvMat* measurement )
{
if( !kalman || !measurement )
CV_Error( CV_StsNullPtr, "" );
/* temp2 = H*P'(k) */
cvMatMulAdd( kalman->measurement_matrix, kalman->error_cov_pre, 0, kalman->temp2 );
/* temp3 = temp2*Ht + R */
cvGEMM( kalman->temp2, kalman->measurement_matrix, 1,
kalman->measurement_noise_cov, 1, kalman->temp3, CV_GEMM_B_T );
/* temp4 = inv(temp3)*temp2 = Kt(k) */
cvSolve( kalman->temp3, kalman->temp2, kalman->temp4, CV_SVD );
/* K(k) */
cvTranspose( kalman->temp4, kalman->gain );
/* temp5 = z(k) - H*x'(k) */
cvGEMM( kalman->measurement_matrix, kalman->state_pre, -1, measurement, 1, kalman->temp5 );
/* x(k) = x'(k) + K(k)*temp5 */
cvMatMulAdd( kalman->gain, kalman->temp5, kalman->state_pre, kalman->state_post );
/* P(k) = P'(k) - K(k)*temp2 */
cvGEMM( kalman->gain, kalman->temp2, -1, kalman->error_cov_pre, 1,
kalman->error_cov_post, 0 );
return kalman->state_post;
}
开发者ID:ArkaJU,项目名称:opencv,代码行数:30,代码来源:compat_video.cpp
示例2: __dgemv
// y := alpha * A * X + beta * y
inline void __dgemv(
char trans,
int m,
int n,
double alpha,
double *A, // n * m
int lda,
double *X, // m('T')
int incx,
double beta,
double *y, // n('T')
int incy
) {
assert(incx==1 && incy==1);
if(trans=='T') {
CvMat A_mat= cvMat(n, m, CV_64FC1, A);
CvMat X_mat= cvMat(m, 1, CV_64FC1, X);
CvMat y_mat= cvMat(n, 1, CV_64FC1, y);
cvGEMM(&A_mat, &X_mat, alpha, &y_mat, beta, &y_mat, 0);
} else if(trans=='N') {
CvMat A_mat= cvMat(n, m, CV_64FC1, A);
CvMat X_mat= cvMat(n, 1, CV_64FC1, X);
CvMat y_mat= cvMat(m, 1, CV_64FC1, y);
cvGEMM(&A_mat, &X_mat, alpha, &y_mat, beta, &y_mat, CV_GEMM_A_T);
} else {
printf("error in function __dgemv");
exit(-1);
}
}
开发者ID:409544320,项目名称:face_recog.src,代码行数:30,代码来源:blas_wrappers.hpp
示例3: cvKalmanPredict
CV_IMPL const CvMat*
cvKalmanPredict( CvKalman* kalman, const CvMat* control )
{
if( !kalman )
CV_Error( CV_StsNullPtr, "" );
/* update the state */
/* x'(k) = A*x(k) */
cvMatMulAdd( kalman->transition_matrix, kalman->state_post, 0, kalman->state_pre );
if( control && kalman->CP > 0 )
/* x'(k) = x'(k) + B*u(k) */
cvMatMulAdd( kalman->control_matrix, control, kalman->state_pre, kalman->state_pre );
/* update error covariance matrices */
/* temp1 = A*P(k) */
cvMatMulAdd( kalman->transition_matrix, kalman->error_cov_post, 0, kalman->temp1 );
/* P'(k) = temp1*At + Q */
cvGEMM( kalman->temp1, kalman->transition_matrix, 1, kalman->process_noise_cov, 1,
kalman->error_cov_pre, CV_GEMM_B_T );
/* handle the case when there will be measurement before the next predict */
cvCopy(kalman->state_pre, kalman->state_post);
return kalman->state_pre;
}
开发者ID:ArkaJU,项目名称:opencv,代码行数:27,代码来源:compat_video.cpp
示例4: interpolateStep
//! Performs one step of extremum interpolation.
void interpolateStep(int r, int c, ResponseLayer *t, ResponseLayer *m, ResponseLayer *b, double* xi, double* xr, double* xc )
//void interpolateStep()
{
CvMat* dD, * H, * H_inv, X;
double x[3] = { 0 };
dD = deriv3D( r, c, t, m, b );
H = hessian3D( r, c, t, m, b );
H_inv = CreateMat( 3, 3, CV_64FC1 );
cvInvert( H, H_inv, CV_SVD ); // incomplete check after invert() => CreateSVD()
//cvInitMatHeader( &X, 3, 1, CV_64FC1, x, CV_AUTOSTEP );
InitMatHeader( &X, 3, 1, CV_64FC1, x, CV_AUTOSTEP ); //check
cvGEMM( H_inv, dD, -1, NULL, 0, &X, 0 ); //incomplete
free(&dD);
free(&H);
free(&H_inv);
//cvReleaseMat( &dD );
//cvReleaseMat( &H );
//cvReleaseMat( &H_inv );
*xi = x[2];
*xr = x[1];
*xc = x[0];
}
开发者ID:ertanergun,项目名称:OpenSurfInC,代码行数:27,代码来源:fasthassian.c
示例5: printf
//============================================================================
void AAM_Basic::CalcGradientMatrix(const CvMat* CParams,
const CvMat* vCDisps,
const CvMat* vPoseDisps,
const std::vector<AAM_Shape>& AllShapes,
const std::vector<IplImage*>& AllImages)
{
int npixels = __cam.__texture.nPixels();
int np = __cam.nModes();
// do model parameter experiments
{
printf("Calculating parameter gradient matrix...\n");
CvMat* GParam = cvCreateMat(np, npixels, CV_64FC1);cvZero(GParam);
CvMat* GtG = cvCreateMat(np, np, CV_64FC1);
CvMat* GtGInv = cvCreateMat(np, np, CV_64FC1);
// estimate Rc
EstCParamGradientMatrix(GParam, CParams, AllShapes, AllImages, vCDisps);
__Rc = cvCreateMat(np, npixels, CV_64FC1);
cvGEMM(GParam, GParam, 1, NULL, 0, GtG, CV_GEMM_B_T);
cvInvert(GtG, GtGInv, CV_SVD );
cvMatMul(GtGInv, GParam, __Rc);
cvReleaseMat(&GtG);
cvReleaseMat(&GtGInv);
cvReleaseMat(&GParam);
}
// do pose experiments, this is for global shape normalization
{
printf("Calculating pose gradient matrix...\n");
CvMat* GtG = cvCreateMat(4, 4, CV_64FC1);
CvMat* GtGInv = cvCreateMat(4, 4, CV_64FC1);
CvMat* GPose = cvCreateMat(4, npixels, CV_64FC1); cvZero(GPose);
// estimate Rt
EstPoseGradientMatrix(GPose, CParams, AllShapes, AllImages, vPoseDisps);
__Rq = cvCreateMat(4, npixels, CV_64FC1);
cvGEMM(GPose, GPose, 1, NULL, 0, GtG, CV_GEMM_B_T);
cvInvert(GtG, GtGInv, CV_SVD);
cvMatMul(GtGInv, GPose, __Rq);
cvReleaseMat(&GtG);
cvReleaseMat(&GtGInv);
cvReleaseMat(&GPose);
}
}
开发者ID:aodkrisda,项目名称:face-gesture-api,代码行数:48,代码来源:AAM_Basic.cpp
示例6: get_Stitched_Size
CvSize get_Stitched_Size(CvSize im1_size, CvSize im2_size, CvMat* Homo_Mat, double XDATA[], double YDATA[] )/*{{{*/
{
int Width = 0;
int Height = 0;
double p1[3] = { 0, 0, 1 };
double p2[3] = { im2_size.width-1 , 0, 1 };
double p3[3] = { 0, im2_size.height-1, 1 };
double p4[3] = { im2_size.width-1, im2_size.height-1, 1 };
CvMat pp1 = cvMat( 3, 1, CV_64FC1, p1 );
CvMat pp2 = cvMat( 3, 1, CV_64FC1, p2 );
CvMat pp3 = cvMat( 3, 1, CV_64FC1, p3 );
CvMat pp4 = cvMat( 3, 1, CV_64FC1, p4 );
cvGEMM( Homo_Mat, &pp1, 1, NULL, 0, &pp1, 0 );
cvGEMM( Homo_Mat, &pp2, 1, NULL, 0, &pp2, 0 );
cvGEMM( Homo_Mat, &pp3, 1, NULL, 0, &pp3, 0 );
cvGEMM( Homo_Mat, &pp4, 1, NULL, 0, &pp4, 0 );
/* Normalization pp --->(x y 1) */
double L1 = cvmGet( &pp1, 2, 0 );
double L2 = cvmGet( &pp2, 2, 0 );
double L3 = cvmGet( &pp3, 2, 0 );
double L4 = cvmGet( &pp4, 2, 0 );
XDATA[0] = min( min( min( cvmGet(&pp1,0,0)/L1, cvmGet(&pp2,0,0)/L2 ), cvmGet(&pp3,0,0)/L3 ),cvmGet(&pp4,0,0 )/L4 );
YDATA[0] = min( min( min( cvmGet(&pp1,1,0)/L1, cvmGet(&pp2,1,0)/L2 ), cvmGet(&pp3,1,0)/L3 ),cvmGet(&pp4,1,0 )/L4 );
XDATA[1] = max( max( max( cvmGet(&pp1,0,0)/L1, cvmGet(&pp2,0,0)/L2 ), cvmGet(&pp3,0,0)/L3 ),cvmGet(&pp4,0,0 )/L4 );
YDATA[1] = max( max( max( cvmGet(&pp1,1,0)/L1, cvmGet(&pp2,1,0)/L2 ), cvmGet(&pp3,1,0)/L3 ),cvmGet(&pp4,1,0 )/L4 );
Width = max( max( max( im1_size.width, XDATA[1] ), im1_size.width-XDATA[0] ), XDATA[1]-XDATA[0] ) ;
Height = max( max( max( im1_size.height, YDATA[1] ), im1_size.height-YDATA[0] ), YDATA[1]-YDATA[0] ) ;
Width = cvRound( Width );
printf("New image's Width is %d\n", Width );
Height = cvRound( Height );
printf("New image's Height is %d\n", Height );
return cvSize( Width, Height );
}/*}}}*/
开发者ID:huaijin-chen,项目名称:SIFT.huaijin,代码行数:45,代码来源:stitcher.c
示例7: cvCopy
void BazARTracker::show_result(CamAugmentation &augment, IplImage *video, IplImage **dst)
{
if (getDebugMode()){
if (*dst==0) *dst=cvCloneImage(video);
else cvCopy(video, *dst);
}
CvMat *m = augment.GetProjectionMatrix(0);
// Flip...(This occured from OpenGL origin / camera origin)
CvMat *coordinateTrans = cvCreateMat(3, 3, CV_64F);
cvmSetIdentity(coordinateTrans);
cvmSet(coordinateTrans, 1, 1, -1);
cvmSet(coordinateTrans, 1, 2, m_cparam->cparam.ysize);
cvMatMul(coordinateTrans, m, m);
// extract intrinsic camera parameters from bazar's projection matrix..
GetARToolKitRTfromBAZARProjMat(g_matIntrinsic, m, matCameraRT4_4);
cvTranspose(matCameraRT4_4, matCameraRT4_4);
cvReleaseMat(&coordinateTrans);
// Debug
if (getDebugMode()) {
// draw the coordinate system axes
double w =video->width/2.0;
double h =video->height/2.0;
// 3D coordinates of an object
double pts[4][4] = {
{w,h,0, 1}, // 0,0,0,1
{w*2,h,0, 1}, // w, 0
{w,h*2,0, 1}, // 0, h
{w,h,-w-h, 1} // 0, 0, -
};
CvMat ptsMat, projectedMat;
cvInitMatHeader(&ptsMat, 4, 4, CV_64FC1, pts);
cvInitMatHeader(&projectedMat, 3, 4, CV_64FC1, projected);
cvGEMM(m, &ptsMat, 1, 0, 0, &projectedMat, CV_GEMM_B_T );
for (int i=0; i<4; i++)
{
projected[0][i] /= projected[2][i];
projected[1][i] /= projected[2][i];
}
// draw the projected lines
cvLine(*dst, cvPoint((int)projected[0][0], (int)projected[1][0]),
cvPoint((int)projected[0][1], (int)projected[1][1]), CV_RGB(255,0,0), 2);
cvLine(*dst, cvPoint((int)projected[0][0], (int)projected[1][0]),
cvPoint((int)projected[0][2], (int)projected[1][2]), CV_RGB(0,255,0), 2);
cvLine(*dst, cvPoint((int)projected[0][0], (int)projected[1][0]),
cvPoint((int)projected[0][3], (int)projected[1][3]), CV_RGB(0,0,255), 2);
}
}
开发者ID:soulsheng,项目名称:osgART,代码行数:56,代码来源:BazARTracker.cpp
示例8: cvMat
//计算图2的四个角经矩阵H变换后的坐标
void SiftMatch::CalcFourCorner()
{
//计算图2的四个角经矩阵H变换后的坐标
double v2[]={0,0,1};//左上角
double v1[3];//变换后的坐标值
CvMat V2 = cvMat(3,1,CV_64FC1,v2);
CvMat V1 = cvMat(3,1,CV_64FC1,v1);
cvGEMM(H,&V2,1,0,1,&V1);//矩阵乘法
leftTop.x = cvRound(v1[0]/v1[2]);
leftTop.y = cvRound(v1[1]/v1[2]);
//cvCircle(xformed,leftTop,7,CV_RGB(255,0,0),2);
//将v2中数据设为左下角坐标
v2[0] = 0;
v2[1] = img2->height;
V2 = cvMat(3,1,CV_64FC1,v2);
V1 = cvMat(3,1,CV_64FC1,v1);
cvGEMM(H,&V2,1,0,1,&V1);
leftBottom.x = cvRound(v1[0]/v1[2]);
leftBottom.y = cvRound(v1[1]/v1[2]);
//cvCircle(xformed,leftBottom,7,CV_RGB(255,0,0),2);
//将v2中数据设为右上角坐标
v2[0] = img2->width;
v2[1] = 0;
V2 = cvMat(3,1,CV_64FC1,v2);
V1 = cvMat(3,1,CV_64FC1,v1);
cvGEMM(H,&V2,1,0,1,&V1);
rightTop.x = cvRound(v1[0]/v1[2]);
rightTop.y = cvRound(v1[1]/v1[2]);
//cvCircle(xformed,rightTop,7,CV_RGB(255,0,0),2);
//将v2中数据设为右下角坐标
v2[0] = img2->width;
v2[1] = img2->height;
V2 = cvMat(3,1,CV_64FC1,v2);
V1 = cvMat(3,1,CV_64FC1,v1);
cvGEMM(H,&V2,1,0,1,&V1);
rightBottom.x = cvRound(v1[0]/v1[2]);
rightBottom.y = cvRound(v1[1]/v1[2]);
//cvCircle(xformed,rightBottom,7,CV_RGB(255,0,0),2);
}
开发者ID:githubcjl,项目名称:uVision_cjl,代码行数:44,代码来源:siftmatch.cpp
示例9: cvGEMM
void EyeTracker::calculateScenePoint()
{
centerMatrix->data.db[0] = aver_center.x;
centerMatrix->data.db[1] = aver_center.y;
centerMatrix->data.db[2] = 1;
cvGEMM(homography, centerMatrix, 1, 0 , 0, hedefp);
scenePoint = cvPoint(cvRound(hedefp->data.db[0] / hedefp->data.db[2]),
cvRound(hedefp->data.db[1] / hedefp->data.db[2]));
}
开发者ID:burakkoray,项目名称:EyeTracker,代码行数:11,代码来源:EyeTracker.cpp
示例10: MT_CVQuadraticMul
void MT_CVQuadraticMul(const CvMat* X,
const CvMat* W,
CvMat* dst,
bool transpose_X,
CvMat* tmp_prod)
{
bool own_prod = (tmp_prod == NULL);
if(own_prod)
{
tmp_prod = cvCreateMat(W->rows, X->cols, cvGetElemType(X));
}
cvGEMM(W, X, 1.0, NULL, 1.0, tmp_prod, transpose_X ? CV_GEMM_B_T : 0);
cvGEMM(X, tmp_prod, 1.0, NULL, 1.0, dst, transpose_X ? 0 : CV_GEMM_A_T);
if(own_prod)
{
cvReleaseMat(&tmp_prod);
}
}
开发者ID:davidmandle,项目名称:MADTraC,代码行数:21,代码来源:OpenCVmath.cpp
示例11: cvL1QCSolve
int cvL1QCSolve( CvMat* A, CvMat* B, CvMat* X, double epsilon, double mu, CvTermCriteria lb_term_crit, CvTermCriteria cg_term_crit )
{
CvMat* AAt = cvCreateMat( A->rows, A->rows, CV_MAT_TYPE(A->type) );
cvGEMM( A, A, 1, NULL, 0, AAt, CV_GEMM_B_T );
CvMat* W = cvCreateMat( A->rows, 1, CV_MAT_TYPE(X->type) );
if ( cvCGSolve( AAt, B, W, cg_term_crit ) > .5 )
{
cvReleaseMat( &W );
cvReleaseMat( &AAt );
return -1;
}
cvGEMM( A, W, 1, NULL, 0, X, CV_GEMM_A_T );
cvReleaseMat( &W );
cvReleaseMat( &AAt );
CvMat* U = cvCreateMat( X->rows, X->cols, CV_MAT_TYPE(X->type) );
cvAbsDiffS( X, U, cvScalar(0) );
CvScalar sumAbsX = cvSum( U );
double minAbsX, maxAbsX;
cvMinMaxLoc( U, &minAbsX, &maxAbsX );
cvConvertScale( U, U, .95, maxAbsX * .1 );
double tau = MAX( (2 * X->rows + 1) / sumAbsX.val[0], 1 );
if ( !(lb_term_crit.type & CV_TERMCRIT_ITER) )
lb_term_crit.max_iter = ceil( (log(2 * X->rows + 1) - log(lb_term_crit.epsilon) - log(tau)) / log(mu) );
CvTermCriteria nt_term_crit = cvTermCriteria( CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 50, lb_term_crit.epsilon );
for ( int i = 0; i < lb_term_crit.max_iter; ++i )
{
icvL1QCNewton( A, B, X, U, epsilon, tau, nt_term_crit, cg_term_crit );
tau *= mu;
}
cvReleaseMat( &U );
return 0;
}
开发者ID:caomw,项目名称:l1cs,代码行数:39,代码来源:cvl1qc.cpp
示例12: interp_contr
/*
Calculates interpolated pixel contrast. Based on Eqn. (3) in Lowe's paper.
@param dog_pyr difference of Gaussians scale space pyramid
@param octv octave of scale space
@param intvl within-octave interval
@param r pixel row
@param c pixel column
@param xi interpolated subpixel increment to interval
@param xr interpolated subpixel increment to row
@param xc interpolated subpixel increment to col
@param Returns interpolated contrast.
*/
double interp_contr( IplImage*** dog_pyr, int octv, int intvl, int r,
int c, double xi, double xr, double xc )
{
CvMat* dD, X, T;
double t[1], x[3] = { xc, xr, xi };
cvInitMatHeader( &X, 3, 1, CV_64FC1, x, CV_AUTOSTEP );
cvInitMatHeader( &T, 1, 1, CV_64FC1, t, CV_AUTOSTEP );
dD = deriv_3D( dog_pyr, octv, intvl, r, c );
cvGEMM( dD, &X, 1, NULL, 0, &T, CV_GEMM_A_T );
cvReleaseMat( &dD );
return pixval32f( dog_pyr[octv][intvl], r, c ) + t[0] * 0.5;
}
开发者ID:cherubjywh,项目名称:opencv,代码行数:28,代码来源:sift.cpp
示例13: meanX
void ConformalResizing::Constrian(const ConstrainUnits& unit, CvMat*& M)
{
// Preprocess unit to make Matrix M less singular
double meanX(0), meanY(0);
for (int i = 0; i < unit.n; i++)
{
meanX += unit.pnts[i].x;
meanY += unit.pnts[i].y;
}
meanX /= unit.n;
meanY /= unit.n;
int n = unit.n * 2;
M = cvCreateMat(n, n, CV_64F);
CvMat* A = cvCreateMat(n, 4, CV_64F);
CvMat* Q = cvCreateMat(n, 4, CV_64F);
CvMat* P = cvCreateMat(4, 4, CV_64F);
// Initial A
cvZero(A);
for (int i = 0; i < unit.n; i++)
{
double x = unit.pnts[i].x - meanX;
double y = unit.pnts[i].y - meanY;
CV_MAT_ELEM(*A, double, 2*i, 0) = x;
CV_MAT_ELEM(*A, double, 2*i, 1) = -y;
CV_MAT_ELEM(*A, double, 2*i, 2) = 1;
CV_MAT_ELEM(*A, double, 2*i+1, 0) = y;
CV_MAT_ELEM(*A, double, 2*i+1, 1) = x;
CV_MAT_ELEM(*A, double, 2*i+1, 3) = 1;
}
cvMulTransposed(A, P, 1); // P = (A^T * A)
cvInvert(P, P, CV_SVD_SYM); // P = (A^T * A)^(-1)
cvMatMul(A, P, Q);
cvGEMM(Q, A, 1, NULL, 0, M, CV_GEMM_B_T);
// M = M - I
double* d = M->data.db;
for (int i = 0; i < n; i++, d += n+1)
{
*d -= 1;
}
cvReleaseMat(&A);
cvReleaseMat(&Q);
cvReleaseMat(&P);
}
开发者ID:LyqSpace,项目名称:ImageRetarget-CMM,代码行数:48,代码来源:ConformalResizing.cpp
示例14: if
//============================================================================
void AAM_IC::Draw(IplImage* image, const AAM_Shape& Shape, int type)
{
if(type == 0) AAM_Common::DrawPoints(image, Shape);
else if(type == 1) AAM_Common::DrawTriangles(image, Shape, __paw.__tri);
else if(type == 2)
{
cvGEMM(__error_t, __texture.GetBases(), 1, NULL, 1, __lamda, CV_GEMM_B_T);
__texture.CalcTexture(__lamda, __warp_t);
AAM_PAW paw;
double minV, maxV;
cvMinMaxLoc(__warp_t, &minV, &maxV);
cvConvertScale(__warp_t, __warp_t, 255/(maxV-minV), -minV*255/(maxV-minV));
paw.Train(Shape, __Points, __Storage, __paw.GetTri(), false);
AAM_Common::DrawAppearance(image, Shape, __warp_t, paw, __paw);
}
else fprintf(stderr, "ERROR(%s, %d): Unsupported drawing type\n",
__FILE__, __LINE__);
}
开发者ID:HVisionSensing,项目名称:aamlibrary,代码行数:19,代码来源:AAM_IC.cpp
示例15: cvmSet
//============================================================================
void AAM_IC::CalcModifiedSD(CvMat* SD, const CvMat* dTx, const CvMat* dTy,
const CvMat* Jx, const CvMat* Jy)
{
int i, j;
//create steepest descent images
double* _x = dTx->data.db;
double* _y = dTy->data.db;
double temp;
for(i = 0; i < __shape.nModes()+4; i++)
{
for(j = 0; j < __paw.nPix(); j++)
{
temp = _x[3*j ]*cvmGet(Jx,j,i) +_y[3*j ]*cvmGet(Jy,j,i);
cvmSet(SD,i,3*j,temp);
temp = _x[3*j+1]*cvmGet(Jx,j,i) +_y[3*j+1]*cvmGet(Jy,j,i);
cvmSet(SD,i,3*j+1,temp);
temp = _x[3*j+2]*cvmGet(Jx,j,i) +_y[3*j+2]*cvmGet(Jy,j,i);
cvmSet(SD,i,3*j+2,temp);
}
}
//project out appearance variation (and linear lighting parameters)
const CvMat* B = __texture.GetBases();
CvMat* V = cvCreateMat(4+__shape.nModes(), __texture.nModes(), CV_64FC1);
CvMat SDMat, BMat;
cvGEMM(SD, B, 1., NULL, 1., V, CV_GEMM_B_T);
// Equation (63),(64)
for(i = 0; i < __shape.nModes()+4; i++)
{
for(j = 0; j < __texture.nModes(); j++)
{
cvGetRow(SD, &SDMat, i);
cvGetRow(B, &BMat, j);
cvScaleAdd(&BMat, cvScalar(-cvmGet(V,i,j)), &SDMat, &SDMat);
}
}
cvReleaseMat(&V);
}
开发者ID:HVisionSensing,项目名称:aamlibrary,代码行数:44,代码来源:AAM_IC.cpp
示例16: interp_contr
/*
Calculates interpolated pixel contrast. Based on Eqn. (3) in Lowe's paper.
@param dog_pyr difference of Gaussians scale space pyramid
@param octv octave of scale space
@param intvl within-octave interval
@param r pixel row
@param c pixel column
@param xi interpolated subpixel increment to interval
@param xr interpolated subpixel increment to row
@param xc interpolated subpixel increment to col
@param Returns interpolated contrast.
*/
static double interp_contr( IplImage*** dog_pyr, int octv, int intvl, int r,
int c, double xi, double xr, double xc )
{
CvMat* dD, X, T;
double t[1], x[3] = { xc, xr, xi };
//偏移量组成的列向量X,其中是x,y,σ三方向上的偏移量
cvInitMatHeader( &X, 3, 1, CV_64FC1, x, CV_AUTOSTEP );
//矩阵乘法的结果T,是一个数值
cvInitMatHeader( &T, 1, 1, CV_64FC1, t, CV_AUTOSTEP );
//在DoG金字塔中计算某点的x方向、y方向以及尺度方向上的偏导数,结果存放在列向量dD中
dD = deriv_3D( dog_pyr, octv, intvl, r, c );
//矩阵乘法:T = dD^T * X
cvGEMM( dD, &X, 1, NULL, 0, &T, CV_GEMM_A_T );
cvReleaseMat( &dD );
//返回计算出的对比度值:D + 0.5 * dD^T * X (具体公式推导见SIFT算法说明)
return pixval32f( dog_pyr[octv][intvl], r, c ) + t[0] * 0.5;
}
开发者ID:githubcjl,项目名称:uVision_cjl,代码行数:31,代码来源:sift.c
示例17: cvCreateMat
CvPoint2D32f BackgroundModel::convertToSurfaceCoordinates(CvPoint pointInDepthImage) const
{
CvPoint2D32f p;
CvMat *src = cvCreateMat(3, 1, CV_32FC1);
CvMat *dst = cvCreateMat(3, 1, CV_32FC1);
CV_MAT_ELEM(*src, float, 0, 0) = pointInDepthImage.x;
CV_MAT_ELEM(*src, float, 1, 0) = pointInDepthImage.y;
CV_MAT_ELEM(*src, float, 2, 0) = 1.0f;
// apply the homography
cvGEMM(_img2surface, src, 1.0f, NULL, 0.0f, dst, 0);
p.x = CV_MAT_ELEM(*dst, float, 0, 0) / CV_MAT_ELEM(*dst, float, 2, 0);
p.y = CV_MAT_ELEM(*dst, float, 1, 0) / CV_MAT_ELEM(*dst, float, 2, 0);
cvReleaseMat(&src);
cvReleaseMat(&dst);
return p;
}
开发者ID:alemart,项目名称:mestrado,代码行数:20,代码来源:BackgroundModel.cpp
示例18: cvFitPlane
// Fit a hyperplane to a set of ND points.
// Note: Input points must be in the form of an NxM matrix, where M is the dimensionality.
// This function finds the best-fit plane P, in the least-squares
// sense, between the points (X,Y,Z). The resulting plane P is described
// by the coefficient vector W, where W(1)*X + W(2)*Y +W(3)*Z = W(3), for
// (X,Y,Z) on the plane P.
void cvFitPlane(const CvMat* points, float* plane){
// Estimate geometric centroid.
int nrows = points->rows;
int ncols = points->cols;
int type = points->type;
CvMat* centroid = cvCreateMat(1, ncols, type);
cvSet(centroid, cvScalar(0));
for(int c=0; c<ncols; c++){
for(int r=0; r<nrows; r++)
centroid->data.fl[c] += points->data.fl[ncols*r+c];
centroid->data.fl[c] /= nrows;
}
// Subtract geometric centroid from each point.
CvMat* points2 = cvCreateMat(nrows, ncols, type);
for(int r=0; r<nrows; r++)
for(int c=0; c<ncols; c++)
points2->data.fl[ncols*r+c] = points->data.fl[ncols*r+c] - centroid->data.fl[c];
// Evaluate SVD of covariance matrix.
CvMat* A = cvCreateMat(ncols, ncols, type);
CvMat* W = cvCreateMat(ncols, ncols, type);
CvMat* V = cvCreateMat(ncols, ncols, type);
cvGEMM(points2, points, 1, NULL, 0, A, CV_GEMM_A_T);
cvSVD(A, W, NULL, V, CV_SVD_V_T);
// Assign plane coefficients by singular vector corresponding to smallest singular value.
plane[ncols] = 0;
for(int c=0; c<ncols; c++){
plane[c] = V->data.fl[ncols*(ncols-1)+c];
plane[ncols] += plane[c]*centroid->data.fl[c];
}
// Release allocated resources.
cvReleaseMat(¢roid);
cvReleaseMat(&points2);
cvReleaseMat(&A);
cvReleaseMat(&W);
cvReleaseMat(&V);
}
开发者ID:Pacmanfan,项目名称:MultiScan,代码行数:47,代码来源:cvUtilProCam.cpp
示例19: interp_step
void interp_step( IplImage*** dog_pyr, int octv, int intvl, int r, int c,
double* xi, double* xr, double* xc )
{
CvMat* dD, * H, * H_inv, X;
double x[3] = { 0 };
dD = deriv_3D( dog_pyr, octv, intvl, r, c );
H = hessian_3D( dog_pyr, octv, intvl, r, c );
H_inv = cvCreateMat( 3, 3, CV_64FC1 );
cvInvert( H, H_inv, CV_SVD );
cvInitMatHeader( &X, 3, 1, CV_64FC1, x, CV_AUTOSTEP );
cvGEMM( H_inv, dD, -1, NULL, 0, &X, 0 );
cvReleaseMat( &dD );
cvReleaseMat( &H );
cvReleaseMat( &H_inv );
*xi = x[2];
*xr = x[1];
*xc = x[0];
}
开发者ID:cherubjywh,项目名称:opencv,代码行数:21,代码来源:sift.cpp
示例20: deriv3D
//! Performs one step of extremum interpolation.
void FastHessian::interpolateStep(int r, int c, ResponseLayer *t, ResponseLayer *m, ResponseLayer *b,
double* xi, double* xr, double* xc )
{
CvMat* dD, * H, * H_inv, X;
double x[3] = { 0 };
dD = deriv3D( r, c, t, m, b );
H = hessian3D( r, c, t, m, b );
H_inv = cvCreateMat( 3, 3, CV_64FC1 );
cvInvert( H, H_inv, CV_SVD );
cvInitMatHeader( &X, 3, 1, CV_64FC1, x, CV_AUTOSTEP );
cvGEMM( H_inv, dD, -1, NULL, 0, &X, 0 );
cvReleaseMat( &dD );
cvReleaseMat( &H );
cvReleaseMat( &H_inv );
*xi = x[2];
*xr = x[1];
*xc = x[0];
}
开发者ID:amitahire,项目名称:sim,代码行数:22,代码来源:fasthessian.cpp
注:本文中的cvGEMM函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论