• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ cvConvertScale函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中cvConvertScale函数的典型用法代码示例。如果您正苦于以下问题:C++ cvConvertScale函数的具体用法?C++ cvConvertScale怎么用?C++ cvConvertScale使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了cvConvertScale函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: cvWaitKey

string MCRenderer::response(const IplImage* currentImage)
{
	int key = cvWaitKey(10);
	
	switch(key)
	{
	case 's':
		if(currentImage)
		{
			IplImage *temp = cvCreateImage(cvSize(currentImage->width, currentImage->height), IPL_DEPTH_32F, 3);
			cvConvertScale(currentImage, temp, 1);
			saveImagePFM(savePath, temp);
			cvReleaseImage(&temp);
		}
		break;
	case 'q':
		return "quit";
	}
	return "";
}
开发者ID:winmad,项目名称:RenderX,代码行数:20,代码来源:MCRenderer.cpp


示例2: optimizeDepthMap

bool optimizeDepthMap()
{	
	cvErode(uImage,uImage,0,2);		//Smoothen the User Map as well
	cvDilate(uImage,uImage,0,2);
	CvScalar depthMean=cvAvg(dImage,uImage);							//Get teh Average Depth Value of the User Pixels
	cvNot(uImage,uImage);												//Invert the user pixels to paint the rest of the image with average user depth									 
	//viewImage(dImage);
	cvSet(dImage,depthMean,uImage);										 
	IplImage* tempImage=cvCreateImage(dSize,IPL_DEPTH_8U,1);
	cvConvertScale(dImage,tempImage,1.0/256);
	cvSmooth(tempImage,tempImage,CV_GAUSSIAN,7);//Perform Gaussian Smoothing, depth map is optimized.
	cvConvert(tempImage,dImage);
	cvScale(dImage,dImage,256);
	cvSet(dImage,cvScalar(0),uImage);	
	//viewImage(dImage);
	//cvSmooth(dImage,dImage,CV_GAUSSIAN,gaussian_m,gaussian_n,gaussian_e);//Perform Gaussian Smoothing, depth map is optimized.
	cvNot(uImage,uImage);
	cvReleaseImage(&tempImage);
	return true;
}
开发者ID:umutgultepe,项目名称:Thesis,代码行数:20,代码来源:ClothResizer.cpp


示例3: cvCreateImage

IplImage *get_gray(const IplImage *img) {
    if (!img) {
        return NULL;    
    }

    IplImage *gray8, *gray32;

    gray32 = cvCreateImage(cvGetSize(img), IPL_DEPTH_32F, 1);
    if (img->nChannels == 1) {
        gray8 = (IplImage *)cvClone(img);
    } else {
        gray8 = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
        cvCvtColor(img, gray8, CV_BGR2GRAY);
    }

    cvConvertScale(gray8, gray32, 1.0 / 255.0, 0);
    cvReleaseImage(&gray8);

    return gray32;
}
开发者ID:cherip,项目名称:dct,代码行数:20,代码来源:utils.cpp


示例4: cvCreateImage

/* Create a camshift tracked object from a region in image. */
TrackedObj* FaceBl0r::create_tracked_object (IplImage* image, CvRect* region) {
  TrackedObj* obj;
  
  //allocate memory for tracked object struct
  if((obj = (TrackedObj *) malloc(sizeof *obj)) != NULL) {
    //create-image: size(w,h), bit depth, channels
    obj->hsv  = cvCreateImage(cvGetSize(image), 8, 3);
    obj->mask = cvCreateImage(cvGetSize(image), 8, 1);
    obj->hue  = cvCreateImage(cvGetSize(image), 8, 1);
    obj->prob = cvCreateImage(cvGetSize(image), 8, 1);

    int hist_bins = 30;           //number of histogram bins
    float hist_range[] = {0,180}; //histogram range
    float* range = hist_range;
    obj->hist = cvCreateHist(1,             //number of hist dimensions
                             &hist_bins,    //array of dimension sizes
                             CV_HIST_ARRAY, //representation format
                             &range,        //array of ranges for bins
                             1);            //uniformity flag
  }
  
  //create a new hue image
  update_hue_image(image, obj);

  float max_val = 0.f;
  
  //create a histogram representation for the face
  cvSetImageROI(obj->hue, *region);
  cvSetImageROI(obj->mask, *region);
  cvCalcHist(&obj->hue, obj->hist, 0, obj->mask);
  cvGetMinMaxHistValue(obj->hist, 0, &max_val, 0, 0 );
  cvConvertScale(obj->hist->bins, obj->hist->bins,
                 max_val ? 255.0/max_val : 0, 0);
  cvResetImageROI(obj->hue);
  cvResetImageROI(obj->mask);
  
  //store the previous face location
  obj->prev_rect = *region;

  return obj;
}
开发者ID:Distrotech,项目名称:frei0r,代码行数:42,代码来源:facebl0r.cpp


示例5: bgr2hsv

/**
 * @brief Convierte una imágen RGB a HSV
 * @param bgr La imágen original en RGB
 * @param objFRAME El objeto OpenCL asociado a la imágen RGB usado en el kernel
 * @param context El contexto de dispositivos OpenCL
 * @param kernelHSV El kernel OpenCL que se debe ejecutar para calcular la imágen HSV
 * @param command_queue La cola del dispositivo OpenCL
 * @param work_items El número de unidades de cómputo a usar en el cálculo
 * @return Una nueva imágen ya en HSV de 32-bit con S y V en el rango [0,1] y H en [0,360]
 */
IplImage* bgr2hsv( IplImage *bgr, cl_mem *objFRAME, cl_context *context, cl_kernel *kernelHSV, cl_command_queue *command_queue, size_t work_items ) {
	
	cl_int ret;
	IplImage *bgr32f;
	bgr32f = cvCreateImage( cvGetSize(bgr), IPL_DEPTH_32F, 3 );
	cvConvertScale( bgr, bgr32f, 1.0 / 255.0, 0 );
	ret = clEnqueueWriteBuffer(command_queue[0], objFRAME[0], CL_FALSE, 0, bgr32f->imageSize, bgr32f->imageData, 0, NULL, NULL);

	// Establecemos los argumentos del kernel
	ret = clSetKernelArg(kernelHSV[0], 0, sizeof(cl_mem), (void *)objFRAME);
	ret = clSetKernelArg(kernelHSV[0], 1, sizeof(int), &bgr32f->widthStep);
	ret = clSetKernelArg(kernelHSV[0], 2, sizeof(int), &bgr32f->height);
	ret = clSetKernelArg(kernelHSV[0], 3, sizeof(int), &bgr32f->width);
	
	size_t local = 128;
	size_t global = work_items * local;

	// Ejecutamos el kernel como paralelismo de datos
	ret = clEnqueueNDRangeKernel(command_queue[0], kernelHSV[0], 1, NULL, &global, &local, 0, NULL, NULL);
	return bgr32f;
}
开发者ID:rotty11,项目名称:Pfc,代码行数:31,代码来源:observacion.cpp


示例6: BOCV_Mask_attach

/**
 * Create Mask from comp buf node
 * @param cbuf
 * @return IplImage of Mask
 */
IplImage* BOCV_Mask_attach(CompBuf* cbuf)
{
    IplImage *mask;
    IplImage *img;
    if(cbuf == NULL)
        return NULL;
    if(cbuf->x>0 && cbuf->y>0 ){
        //Create image from comp buf
    	img = cvCreateImageHeader(cvSize(cbuf->x,cbuf->y),IPL_DEPTH_32F,cbuf->type);
	cvSetData(img,cbuf->rect,cbuf->x * cbuf->type * sizeof(float)); // always 4 byte align.
        
        mask= cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, cbuf->type);
        //Convert to 8 bit unsigned
        cvConvertScale(img, mask,1,0);
            
        return mask;
    }else{
        return NULL;
    }
	
}
开发者ID:jamesguo,项目名称:blendocv,代码行数:26,代码来源:BOCV_util.c


示例7: gst_motiondetect_log_image

static void
gst_motiondetect_log_image (const IplImage * image,
    const char * debugDirectory, int index, const char * filename)
{
  if (image && debugDirectory) {
    char *filepath;
    asprintf (&filepath, "%s/%05d_%s", debugDirectory, index, filename);

    if (image->depth == IPL_DEPTH_32F) {
      IplImage *scaledImageToLog = cvCreateImage (
          cvSize (image->width, image->height), IPL_DEPTH_8U, 1);
      cvConvertScale (image, scaledImageToLog, 255.0, 0);
      cvSaveImage (filepath, scaledImageToLog, NULL);
      cvReleaseImage (&scaledImageToLog);
    } else {
      cvSaveImage (filepath, image, NULL);
    }

    free (filepath);
  }
}
开发者ID:ekelly30,项目名称:stb-tester,代码行数:21,代码来源:gstmotiondetect.c


示例8: cvCopy

void CamShift::CalcHistogram(const ImgBgr& img, const CRect& sel)
{
  selection.x = sel.left;
  selection.y = img.Height()-sel.bottom-1;
  selection.width = sel.Width();
  selection.height = sel.Height();

  cvCopy(ImgIplImage(img), image, 0 );
  cvCvtColor( image, hsv, CV_BGR2HSV );
  cvFlip(hsv,hsv,0);
  //cvSaveImage("hsv.bmp", hsv);
  //cvSaveImage("img.bmp", image);
  int _vmin = vmin, _vmax = vmax;
  cvInRangeS( hsv, cvScalar(0,smin,MIN(_vmin,_vmax),0),
  cvScalar(180,256,MAX(_vmin,_vmax),0), mask );
  cvSplit( hsv, hue, 0, 0, 0 );
  float max_val = 0.f;
  cvSetImageROI(hue, selection );
  cvSetImageROI( mask, selection );
  cvCalcHist( &hue, hist, 0, mask );
  cvGetMinMaxHistValue( hist, 0, &max_val, 0, 0 );
  cvConvertScale( hist->bins, hist->bins, max_val ? 255. / max_val : 0., 0 );
  cvResetImageROI( hue );
  cvResetImageROI( mask );
  track_window = selection;
//  cvZero( histimg );
//  int bin_w = histimg->width / hdims;
//  for(int i = 0; i < hdims; i++ )
//  {
//    int a = cvGetReal1D(hist->bins,i);
//    int val = cvRound( cvGetReal1D(hist->bins,i)*histimg->height/255 );
//    CvScalar color = hsv2rgb(i*180.f/hdims);
//    cvRectangle( histimg, cvPoint(i*bin_w,histimg->height),
//      cvPoint((i+1)*bin_w,histimg->height - val),
//      color, -1, 8, 0 );
//  }
//  cvNamedWindow( "Histogram", 1 );
//  
//  cvShowImage( "Histogram", histimg );
}
开发者ID:gouthampacha,项目名称:blepo_opencv,代码行数:40,代码来源:CamShift.cpp


示例9: startTracking

//////////////////////////////////
// startTracking()
//
void startTracking(IplImage * pImg, CvRect * pFaceRect)
{
	float maxVal = 0.f;

	// Make sure internal data structures have been allocated
	if( !pHist ) createTracker(pImg);

	// Create a new hue image
	updateHueImage(pImg);

	// Create a histogram representation for the face
    cvSetImageROI( pHueImg, *pFaceRect );
    cvSetImageROI( pMask,   *pFaceRect );
    cvCalcHist( &pHueImg, pHist, 0, pMask );
    cvGetMinMaxHistValue( pHist, 0, &maxVal, 0, 0 );
    cvConvertScale( pHist->bins, pHist->bins, maxVal? 255.0/maxVal : 0, 0 );
    cvResetImageROI( pHueImg );
    cvResetImageROI( pMask );

	// Store the previous face location
	prevFaceRect = *pFaceRect;
}
开发者ID:friggasaint,项目名称:Mirror,代码行数:25,代码来源:camshift_wrapper.c


示例10: cvL1QCSolve

int cvL1QCSolve( CvMat* A, CvMat* B, CvMat* X, double epsilon, double mu, CvTermCriteria lb_term_crit, CvTermCriteria cg_term_crit )
{
	CvMat* AAt = cvCreateMat( A->rows, A->rows, CV_MAT_TYPE(A->type) );
	cvGEMM( A, A, 1, NULL, 0, AAt, CV_GEMM_B_T );
	CvMat* W = cvCreateMat( A->rows, 1, CV_MAT_TYPE(X->type) );
	if ( cvCGSolve( AAt, B, W, cg_term_crit ) > .5 )
	{
		cvReleaseMat( &W );
		cvReleaseMat( &AAt );
		return -1;
	}
	cvGEMM( A, W, 1, NULL, 0, X, CV_GEMM_A_T );
	cvReleaseMat( &W );
	cvReleaseMat( &AAt );

	CvMat* U = cvCreateMat( X->rows, X->cols, CV_MAT_TYPE(X->type) );
	cvAbsDiffS( X, U, cvScalar(0) );
	CvScalar sumAbsX = cvSum( U );
	double minAbsX, maxAbsX;
	cvMinMaxLoc( U, &minAbsX, &maxAbsX );
	cvConvertScale( U, U, .95, maxAbsX * .1 );
	
	double tau = MAX( (2 * X->rows + 1) / sumAbsX.val[0], 1 );

	if ( !(lb_term_crit.type & CV_TERMCRIT_ITER) )
		lb_term_crit.max_iter = ceil( (log(2 * X->rows + 1) - log(lb_term_crit.epsilon) - log(tau)) / log(mu) );

	CvTermCriteria nt_term_crit = cvTermCriteria( CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 50, lb_term_crit.epsilon );
	
	for ( int i = 0; i < lb_term_crit.max_iter; ++i )
	{
		icvL1QCNewton( A, B, X, U, epsilon, tau, nt_term_crit, cg_term_crit );
		tau *= mu;
	}

	cvReleaseMat( &U );

	return 0;
}
开发者ID:caomw,项目名称:l1cs,代码行数:39,代码来源:cvl1qc.cpp


示例11: cvInRangeS

void BoatDetecting::startTrackObject(){
		cvInRangeS(hsv, cvScalar(0, smin, MIN(vmin, vmax), 0), cvScalar(180, 256, MAX(vmin, vmax), 0), mask);
	// 10,256,30
	
	cvSplit(hsv, hue, 0, 0, 0);
	if (!isTrackingInitialized){ // 如果跟踪窗口未初始化
		float max_val = 0.f;		
		cvSetImageROI(hue, selection);
		cvSetImageROI(mask, selection);		
		cvCalcHist(&hue, hist, 0, mask);
		cvGetMinMaxHistValue(hist, 0, &max_val, 0, 0);
		cvConvertScale(hist->bins, hist->bins, max_val ? 255. / max_val : 0., 0);
		cvResetImageROI(hue);
		cvResetImageROI(mask);
		trackWindow = selection;
		isTrackingInitialized = true;

	}

	cvCalcBackProject(&hue, backproject, hist);
	//cvShowImage("Hue Channel",backproject);
	
	cvAnd(backproject, mask, backproject, 0);
	
	//if (trackWindow.x + trackWindow.width/2< allfWidth &&trackWindow.y + trackWindow.height/2< allfHeight &&trackWindow.x>0)
	if (trackWindow.x + trackWindow.width< allfWidth &&trackWindow.y + trackWindow.height< allfHeight &&trackWindow.x>0)
		cvCamShift(backproject, trackWindow, cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 20, 1), &trackComp, 0);//初始化跟踪窗口以后直接用trackWindow做跟踪,每帧都会更新
	

	//if (trackComp.rect.width<90 && trackComp.rect.y<200){
	//	trackWindow = trackComp.rect;
	//}
	//if (trackComp.rect.y>200)
	//{
	//	trackWindow = trackComp.rect;
	//}
	trackWindow = trackComp.rect;
	
}
开发者ID:IvelynHsu,项目名称:BridgeWarningSystem,代码行数:39,代码来源:BoatDetecting.cpp


示例12: color

/*
Converts an image to 32-bit grayscale
@param img a 3-channel 8-bit color (BGR) or 8-bit gray image
@return Returns a 32-bit grayscale image
*/
static IplImage* convert_to_gray32( IplImage* img )
{
	IplImage* gray8, * gray32;

    gray32 = cvCreateImage( cvGetSize(img), IPL_DEPTH_32F, 1 );//创建32位单通道图像

    //首先将原图转换为8位单通道图像
    if( img->nChannels == 1 )//若原图本身就是单通道,直接克隆原图
		gray8 = cvClone( img );
    else//若原图是3通道图像
	{
        gray8 = cvCreateImage( cvGetSize(img), IPL_DEPTH_8U, 1 );//创建8位单通道图像
        cvCvtColor( img, gray8, CV_BGR2GRAY );//将原图转换为8为单通道图像
	}

    //然后将8为单通道图像gray8转换为32位单通道图像,并进行归一化处理(除以255)
	cvConvertScale( gray8, gray32, 1.0 / 255.0, 0 );

    cvReleaseImage( &gray8 );//释放临时图像

    return gray32;//返回32位单通道图像
}
开发者ID:githubcjl,项目名称:uVision_cjl,代码行数:27,代码来源:sift.c


示例13: renderChainsWithBoxes

void renderChainsWithBoxes(IplImage * SWTImage,
		std::vector<std::vector<Point2d> > & components,
		std::vector<Chain> & chains,
		std::vector<std::pair<Point2d, Point2d> > & compBB,
		std::vector<std::pair<CvPoint, CvPoint> > & bb,
		IplImage * output) {
	// keep track of included components
	std::vector<bool> included;
	included.reserve(components.size());
	for (unsigned int i = 0; i != components.size(); i++) {
		included.push_back(false);
	}
	for (std::vector<Chain>::iterator it = chains.begin(); it != chains.end();
			it++) {
		for (std::vector<int>::iterator cit = it->components.begin();
				cit != it->components.end(); cit++) {
			included[*cit] = true;
		}
	}
	std::vector<std::vector<Point2d> > componentsRed;
	for (unsigned int i = 0; i != components.size(); i++) {
		if (included[i]) {
			componentsRed.push_back(components[i]);
		}
	}
	IplImage * outTemp = cvCreateImage(cvGetSize(output), IPL_DEPTH_32F, 1);

	LOGL(LOG_CHAINS, componentsRed.size() << " components after chaining");

	renderComponents(SWTImage, componentsRed, outTemp);

	bb = findBoundingBoxes(chains, compBB, outTemp);

	IplImage * out = cvCreateImage(cvGetSize(output), IPL_DEPTH_8U, 1);
	cvConvertScale(outTemp, out, 255, 0);
	cvCvtColor(out, output, CV_GRAY2RGB);
	cvReleaseImage(&out);
	cvReleaseImage(&outTemp);
}
开发者ID:yarec,项目名称:bibnumber,代码行数:39,代码来源:textdetection.cpp


示例14: cvSetReal2D

//------------------------------------------------------------------------------
// Color Similarity Matrix Calculation
//------------------------------------------------------------------------------
CvMat *colorsim(int nbins, double sigma) {

	CvMat *xc=cvCreateMat(1,nbins, CV_32FC1);
	CvMat *yr=cvCreateMat(nbins,1, CV_32FC1);

	CvMat *x=cvCreateMat(nbins,nbins, CV_32FC1);
	CvMat *y=cvCreateMat(nbins,nbins, CV_32FC1);
	CvMat *m=cvCreateMat(x->rows,x->rows, CV_32FC1);


	// Set x,y directions 
	for (int j=0;j<nbins;j++) {
		cvSetReal2D(xc,0,j,(j+1-0.5)/nbins);
		cvSetReal2D(yr,j,0,(j+1-0.5)/nbins);
	}

	// Set u,v, meshgrids
	for (int i=0;i<x->rows;i++) {
		cvRepeat(xc,x);
		cvRepeat(yr,y);
	}

	CvMat *sub = cvCreateMat(x->rows,y->cols,CV_32FC1);
	cvSub(x,y,sub);
	cvAbs(sub,sub);
	cvMul(sub,sub,sub);
	cvConvertScale(sub,sub,-1.0/(2*sigma*sigma));
	cvExp(sub,sub);
	cvSubRS(sub,cvScalar(1.0),m);

	cvReleaseMat(&xc);
	cvReleaseMat(&yr);
	cvReleaseMat(&x);
	cvReleaseMat(&y);
	cvReleaseMat(&sub);

	return m;
}
开发者ID:xufango,项目名称:contrib_bk,代码行数:41,代码来源:savgol.cpp


示例15: cvConvertScale

//============================================================================
void AAM_IC::InverseCompose(const CvMat* dpq, const CvMat* s, CvMat* NewS)
{
	// Firstly: Estimate the corresponding changes to the base mesh
	cvConvertScale(dpq, __inv_pq, -1);
	__shape.CalcShape(__inv_pq, __update_s0);	// __update_s0 = N.W(s0, -delta_p, -delta_q)

	//Secondly: Composing the Incremental Warp with the Current Warp Estimate.
	double *S0 = __update_s0->data.db;
	double *S = s->data.db;
	double *SEst = NewS->data.db;
	double x, y, xw, yw;
	int k, tri_idx;
	int v1, v2, v3;
	const std::vector<std::vector<int> >& tri = __paw.__tri;
	const std::vector<std::vector<int> >& vtri = __paw.__vtri;

	for(int i = 0; i < __shape.nPoints(); i++)
	{
		x = 0.0;	y = 0.0;
		k = 0;
		//The only problem with this approach is which triangle do we use?
		//In general there will be several triangles that share the i-th vertex.
		for(k = 0; k < vtri[i].size(); k++)// see Figure (11)
		{
			tri_idx = vtri[i][k];
			v1 = tri[tri_idx][0];
			v2 = tri[tri_idx][1];
			v3 = tri[tri_idx][2];

			AAM_PAW::Warp(S0[2*i],S0[2*i+1],
				__sMean[v1].x, __sMean[v1].y,__sMean[v2].x, __sMean[v2].y,__sMean[v3].x, __sMean[v3].y,
					xw, yw,	S[2*v1], S[2*v1+1], S[2*v2], S[2*v2+1], S[2*v3], S[2*v3+1]);
			x += xw;		y += yw;
		}
		// average the result so as to smooth the warp at each vertex
		SEst[2*i] = x/k;		SEst[2*i+1] = y/k;
	}
}
开发者ID:HVisionSensing,项目名称:aamlibrary,代码行数:39,代码来源:AAM_IC.cpp


示例16: splat

static IplImage* splat(int *coeffs, CvSize size, int *plane_coeffs)
{
    IplImage *g = cvCreateImage(size, IPL_DEPTH_16S, 1);
    IplImage *b = cvCreateImage(size, IPL_DEPTH_16S, 1);
    IplImage *r = cvCreateImage(size, IPL_DEPTH_16S, 1);
    IplImage *rgb = cvCreateImage(size, IPL_DEPTH_16S, 3);
    IplImage *img = cvCreateImage(size, IPL_DEPTH_8U, 3);
    IplImage *trans = cvCreateImage(size, IPL_DEPTH_16S, 1);
    int dim = plane_coeffs[0] + plane_coeffs[1] + plane_coeffs[2];
    unsigned *order_p0 = build_path(plane_coeffs[0], KERNS);
    unsigned *order_p1 = build_path(plane_coeffs[1], KERNS);
    unsigned *order_p2 = build_path(plane_coeffs[2], KERNS);

    memset(trans->imageData, 0, trans->imageSize);
    dequantize(trans, plane_coeffs[0], order_p0, KERNS, coeffs, dim);
    iwht2d(trans, g);
    memset(trans->imageData, 0, trans->imageSize);
    dequantize(trans, plane_coeffs[1], order_p1, KERNS,
        coeffs+plane_coeffs[0], dim);
    iwht2d(trans, b);
    memset(trans->imageData, 0, trans->imageSize);
    dequantize(trans, plane_coeffs[2], order_p2, KERNS,
        coeffs+plane_coeffs[0]+plane_coeffs[1], dim);
    iwht2d(trans, r);

    cvMerge(g, b, r, NULL, rgb);
    cvConvertScale(rgb, img, 1, 0);

    cvReleaseImage(&g);
    cvReleaseImage(&b);
    cvReleaseImage(&r);
    cvReleaseImage(&rgb);
    cvReleaseImage(&trans);
    free(order_p0);
    free(order_p1);
    free(order_p2);
    return img;
}
开发者ID:j0sh,项目名称:thesis,代码行数:38,代码来源:cd.c


示例17: prepare_image_for_cat

DMZ_INTERNAL void prepare_image_for_cat(IplImage *image, IplImage *as_float, CharacterRectListIterator rect) {
  // Input image: IPL_DEPTH_8U [0 - 255]
  // Data for models: IPL_DEPTH_32F [0.0 - 1.0]
  
  cvSetImageROI(image, cvRect(rect->left, rect->top, kTrimmedCharacterImageWidth, kTrimmedCharacterImageHeight));
  
  // TODO: optimize this a lot!
  
  // Gradient
  IplImage *filtered_image = cvCreateImage(cvSize(kTrimmedCharacterImageWidth, kTrimmedCharacterImageHeight), IPL_DEPTH_8U, 1);
  //llcv_morph_grad3_2d_cross_u8(image, filtered_image);
  IplConvKernel *kernel = cvCreateStructuringElementEx(3, 3, 1, 1, CV_SHAPE_CROSS, NULL);
  cvMorphologyEx(image, filtered_image, NULL, kernel, CV_MOP_GRADIENT, 1);
  cvReleaseStructuringElement(&kernel);
  
  // Equalize
  llcv_equalize_hist(filtered_image, filtered_image);
  
  // Bilateral filter
  int aperture = 3;
  double space_sigma = (aperture / 2.0 - 1) * 0.3 + 0.8;
  double color_sigma = (aperture - 1) / 3.0;
  IplImage *smoothed_image = cvCreateImage(cvSize(kTrimmedCharacterImageWidth, kTrimmedCharacterImageHeight), IPL_DEPTH_8U, 1);
  cvSmooth(filtered_image, smoothed_image, CV_BILATERAL, aperture, aperture, space_sigma, color_sigma);
  
  // Convert to float
  cvConvertScale(smoothed_image, as_float, 1.0f / 255.0f, 0);
  
  cvReleaseImage(&smoothed_image);
  cvReleaseImage(&filtered_image);
  
  cvResetImageROI(image);

#if DEBUG_EXPIRY_CATEGORIZATION_PERFORMANCE
  dmz_debug_timer_print("prepare image", 2);
#endif
}
开发者ID:COCUS-NEXT,项目名称:card.io-dmz,代码行数:37,代码来源:expiry_categorize.cpp


示例18: main

int main( int argc, char** argv )
{
  IplImage* hsv_img;
  IplImage** hsv_ref_imgs;
  IplImage* l32f, * l;
  histogram* ref_histo;
  double max;
  int i;

  arg_parse( argc, argv );

  /* compute HSV histogram over all reference image */
  hsv_img = bgr2hsv( in_img );
  hsv_ref_imgs = (IplImage**)malloc( num_ref_imgs * sizeof( IplImage* ) );
  for( i = 0; i < num_ref_imgs; i++ )
    hsv_ref_imgs[i] = bgr2hsv( ref_imgs[i] );
  ref_histo = calc_histogram( hsv_ref_imgs, num_ref_imgs );
  normalize_histogram( ref_histo );

  /* compute likelihood at every pixel in input image */
  fprintf( stderr, "Computing likelihood... " );
  fflush( stderr );
  l32f = likelihood_image( hsv_img, ref_imgs[0]->width,
			   ref_imgs[0]->height, ref_histo );
  fprintf( stderr, "done\n");

  /* convert likelihood image to uchar and display */
  cvMinMaxLoc( l32f, NULL, &max, NULL, NULL, NULL );
  l = cvCreateImage( cvGetSize( l32f ), IPL_DEPTH_8U, 1 );
  cvConvertScale( l32f, l, 255.0 / max, 0 );
  cvNamedWindow( "likelihood", 1 );
  cvShowImage( "likelihood", l );
  cvNamedWindow( "image", 1 );
  cvShowImage( "image", in_img );
  cvWaitKey(0);
}
开发者ID:sangwook236,项目名称:general-development-and-testing,代码行数:36,代码来源:observe.cpp


示例19: cvCvtColor

void CamShift::Track(IplImage *frame, CvRect &selection, bool calc_hist)
{
	int i, bin_w, c;

	cvCvtColor( frame, _hsv, CV_BGR2HSV );

	cvInRangeS( _hsv, cvScalar(0,_smin,MIN(_vmin,_vmax),0),
		cvScalar(180,256,MAX(_vmin,_vmax),0), _mask );
	cvSplit( _hsv, _hue, 0, 0, 0 );

	if(calc_hist)
	{
		float max_val = 0.f;
		cvSetImageROI( _hue, selection );
		cvSetImageROI( _mask, selection );
		cvCalcHist( &_hue, _hist, 0, _mask );
		cvGetMinMaxHistValue( _hist, 0, &max_val, 0, 0 );
		cvConvertScale( _hist->bins, _hist->bins, max_val ? 255. / max_val : 0., 0 );
		cvResetImageROI( _hue );
		cvResetImageROI( _mask );
		_track_window = selection; 
	}

	cvCalcBackProject( &_hue, _backproject, _hist );
	cvAnd( _backproject, _mask, _backproject, 0 );
	cvCamShift( _backproject, _track_window,
		cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),
		&_track_comp, &_track_box );
	_track_window = _track_comp.rect;

	if( frame->origin )
		_track_box.angle = -_track_box.angle;

	selection = cvRect(_track_box.center.x-_track_box.size.width/2, _track_box.center.y-_track_box.size.height/2,
		selection.width, selection.height);
}
开发者ID:ayushpurohit,项目名称:human-action-recognition,代码行数:36,代码来源:CamShift.cpp


示例20: get_convolution

static IplImage*
get_convolution (const IplImage *image,
                 const IplImage *filter)
{
  CvSize dft_size;
  IplImage *reversed_image, *reversed_filter;
  IplImage *dft_image, *dft_filter, *dft_res;
  IplImage *res;

  dft_size.height = cvGetOptimalDFTSize(image->height + filter->height - 1);
  dft_size.width = cvGetOptimalDFTSize(image->width + filter->width - 1);

  res = cvCreateImage(cvSize(image->width,
                             image->height),
                      IPL_DEPTH_32F,
                      N_CHANNELS_GRAY);
  reversed_image = cvCreateImage(cvGetSize(image),
                                 IPL_DEPTH_8U,
                                 N_CHANNELS_GRAY);
  reversed_filter = cvCreateImage(cvGetSize(filter),
                                  IPL_DEPTH_8U,
                                  N_CHANNELS_GRAY);

  cvNot(image, reversed_image);
  cvNot(filter, reversed_filter);

  dft_image = cvCreateImage(dft_size,
                            IPL_DEPTH_32F,
                            N_CHANNELS_GRAY);
  cvSet(dft_image, cvScalar(0, 0, 0, 0), NULL);
  dft_filter = cvCreateImage(dft_size,
                             IPL_DEPTH_32F,
                             N_CHANNELS_GRAY);
  cvSet(dft_filter, cvScalar(0, 0, 0, 0), NULL);

  cvSetImageROI(dft_image, cvRect(0, 0,
                                  reversed_image->width,
                                  reversed_image->height));
  cvSetImageROI(dft_filter, cvRect(0, 0,
                                   reversed_filter->width,
                                   reversed_filter->height));
  double scaling_factor = 1.0/255;
  cvConvertScale(reversed_image, dft_image, scaling_factor, 0);
  cvConvertScale(reversed_filter, dft_filter, scaling_factor, 0);
  cvResetImageROI(dft_image);
  cvResetImageROI(dft_filter);


  cvDFT(dft_image, dft_image, CV_DXT_FORWARD, image->height);
  cvDFT(dft_filter, dft_filter, CV_DXT_FORWARD, filter->height);

  dft_res = cvCreateImage(dft_size,
                          IPL_DEPTH_32F,
                          N_CHANNELS_GRAY);

  cvMulSpectrums(dft_image, dft_filter, dft_res, 0);

  cvDFT(dft_res, dft_res, CV_DXT_INVERSE, res->height);
  cvSetImageROI(dft_res, cvRect(0, 0, res->width, res->height));
  cvCopy(dft_res, res, NULL);
  cvResetImageROI(dft_res);

  cvReleaseImage(&reversed_filter);
  cvReleaseImage(&reversed_image);
  cvReleaseImage(&dft_image);
  cvReleaseImage(&dft_filter);
  cvReleaseImage(&dft_res);

  return res;
}
开发者ID:voyagerok,项目名称:objects-detector,代码行数:70,代码来源:convolution.c



注:本文中的cvConvertScale函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ cvCreateCameraCapture函数代码示例发布时间:2022-05-30
下一篇:
C++ cvConvertImage函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap