• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ cvErode函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中cvErode函数的典型用法代码示例。如果您正苦于以下问题:C++ cvErode函数的具体用法?C++ cvErode怎么用?C++ cvErode使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了cvErode函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: cvCvtColor

void Hand_recognition::Detect_Skin(IplImage *src, IplImage *dst){

	cvCvtColor(src, img_YCrCb, CV_BGR2YCrCb);
	cvCvtColor(src, img_HSV, CV_BGR2HSV);

	cvZero(dst);

	for(int i = 0; i < dst->height; i++){
		for(int j = 0; j < dst->width; j++){

			B = (unsigned char)src->imageData[(j * 3) + i * src->widthStep];
			G = (unsigned char)src->imageData[(j * 3) + i * src->widthStep + 1];
			R = (unsigned char)src->imageData[(j * 3) + i * src->widthStep + 2];

			bool a = R1(R,G,B);

			if(a){
				H = (unsigned char)img_HSV->imageData[(j * 3) + i * img_HSV->widthStep];
				S = (unsigned char)img_HSV->imageData[(j * 3) + i * img_HSV->widthStep + 1];
				V = (unsigned char)img_HSV->imageData[(j * 3) + i * img_HSV->widthStep + 2];

				bool c = R3(H,S,V);;

				if(c){
					Y = (unsigned char)img_YCrCb->imageData[(j * 3) + i * img_YCrCb->widthStep];
					Cr = (unsigned char)img_YCrCb->imageData[(j * 3) + i * img_YCrCb->widthStep + 1];
					Cb = (unsigned char)img_YCrCb->imageData[(j * 3) + i * img_YCrCb->widthStep + 2];

					bool b = R2(Y,Cr,Cb);

					if(b)
						dst->imageData[j + i * dst->widthStep] = (unsigned char) 255;
				}

			}
		}
	}

	cvErode(dst, dst, 0, MOP_NUM);
	cvDilate(dst, dst, 0, MOP_NUM);
}
开发者ID:sp9103,项目名称:OpenCVE,代码行数:41,代码来源:Hand_recognition.cpp


示例2: cvResizeDsize

void GripPipeline::Process(cv::Mat & source0)
{
	cv::Mat cvResizeSrc = source0;
	cv::Size cvResizeDsize(0, 0);
	double cvResizeFx = 0.75;  // default Double
	double cvResizeFy = 0.75;  // default Double
	int cvResizeInterpolation = cv::INTER_LINEAR;
	cvResize(cvResizeSrc, cvResizeDsize, cvResizeFx, cvResizeFy, cvResizeInterpolation, this->cvResizeOutput);

	cv::Mat hsvThresholdInput = cvResizeOutput;

	double hsvThresholdHue[] = {69,180};
	double hsvThresholdSaturation[] = {172,255};
	double hsvThresholdValue[] = {112,255};
	hsvThreshold(hsvThresholdInput, hsvThresholdHue, hsvThresholdSaturation, hsvThresholdValue, this->hsvThresholdOutput);

	cv::Mat findContoursInput = hsvThresholdOutput;
	source0= hsvThresholdOutput;

	std::vector<std::vector<cv::Point> > filterContoursContours = findContoursOutput;

	cv::Mat cvErodeSrc = hsvThresholdOutput;
	cv::Mat cvErodeKernel;
	cv::Point cvErodeAnchor(-1, -1);
	double cvErodeIterations = 1;
	int cvErodeBordertype = cv::BORDER_CONSTANT;
	cv::Scalar cvErodeBordervalue(-1);
	cvErode(cvErodeSrc, cvErodeKernel, cvErodeAnchor, cvErodeIterations, cvErodeBordertype, cvErodeBordervalue, this->cvErodeOutput);

	cv::Mat findLinesInput = cvErodeOutput;
	findLines(findLinesInput, this->findLinesOutput);

//	print the lines
//	GripPipeline::printLines(source0, findLinesOutput);

	// find center
	GripPipeline::findCenter(source0, findLinesOutput);

	// find distance
	GripPipeline::findDistance(source0, findLinesOutput, difference);
}
开发者ID:FRCTeam1967,项目名称:FRCTeam1967,代码行数:41,代码来源:GripPipeline.cpp


示例3: givedepth

void givedepth(IplImage *localimagergb)
{	IplImage*localimage=cvCreateImage(cvGetSize(localimagergb),IPL_DEPTH_8U,3);
	cvCvtColor(localimagergb,localimage,CV_BGR2HSV);
	IplImage *blobbedscaling=cvCreateImage(cvGetSize(localimagergb),IPL_DEPTH_8U,3);
	uchar *itemp=(uchar *)(localimage->imageData);
    IplImage *binaryscaling=cvCreateImage(cvGetSize(localimagergb),IPL_DEPTH_8U,1);
	uchar *itemp1=(uchar *)(binaryscaling ->imageData);
	for(int i=0;i<hi2->height;i++){
			for(int j=0;j<hi2->width;j++){	

					if((itemp[i*localimage->widthStep+j*localimage->nChannels] <hh)	       
					&&
					(itemp[i*localimage->widthStep+j*localimage->nChannels]>hl)
				    &&
					(itemp[i*localimage->widthStep+j*localimage->nChannels+1]<sh)
					&&
					(itemp[i*localimage->widthStep+j*localimage->nChannels+1]>sl)
					&& 
					( itemp[i*localimage->widthStep+j*localimage->nChannels+2]<vh)			
					 &&
					( itemp[i*localimage->widthStep+j*localimage->nChannels+2]>vl)		//previous 124
					)	{
						itemp1[i*binaryscaling->widthStep+j]=0;					//dark regions black rest white
						}
					else
						itemp1[i*binaryscaling->widthStep+j]=255;
			}}
    cvErode( binaryscaling, binaryscaling, NULL, 4);
	cvDilate(binaryscaling, binaryscaling, NULL, 4);
	CBlobResult  blob;				
	CBlob *currentBlob=NULL;
	blob=CBlobResult(binaryscaling,NULL,255);
	blob.Filter(blob,B_EXCLUDE,CBlobGetArea(),B_LESS,500);
	cvMerge(binaryscaling,binaryscaling,binaryscaling,NULL,blobbedscaling);
	CBlob hand1,hand2;																//two blobs,one for each hand
	blob.GetNthBlob( CBlobGetArea(), 0, (hand2));	
	blob.GetNthBlob( CBlobGetArea(), 1, (hand1 ));
	hand1.FillBlob(blobbedscaling,CV_RGB(0,0,255));											//fill the color of blob of hand one with blue
	hand2.FillBlob(blobbedscaling,CV_RGB(0,255,0));											//fill the color of blob of hand two with green 
	coordinates (blobbedscaling,0);
}
开发者ID:harshitm26,项目名称:Virtual-Boxing,代码行数:41,代码来源:boxing+final.cpp


示例4: cvCreateImage

void clsTracking2D::calcBGsubtraction(IplImage *image, IplImage *foreground, IplImage *debugImage)
{
        if(!BGcolorstarted)
        {
                bgcolorImage = cvCreateImage( cvSize( image->width, image->height ),image->depth, image->nChannels );
                bgsGray = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);
                cvCvtColor(image, bgsGray, CV_BGR2GRAY);
                readyForParticles = true;
                BGcolorstarted = true;
        }
        
        cvCopy(image,bgcolorImage);
        
        if(paramsInt["blur"] > 0)
                cvSmooth(image, bgcolorImage, CV_BLUR, paramsInt["blur"], paramsInt["blur"], 0, 0);
        
        //converting to HSV
        // cvCvtColor(bgcolorImage, bgcolorImage, CV_BGR2HSV);
        
        //filtrating by color... good on this video!
        cvInRangeS(bgcolorImage, cvScalar(paramsInt["minH"], paramsInt["minS"], paramsInt["minL"]), cvScalar(paramsInt["maxH"], paramsInt["maxS"], paramsInt["maxL"]), foreground);
        // cv::invert(bgcolorImage,bgcolorImage);
        // cvCvtColor(bgcolorImage, bgsGray, CV_BGR2GRAY);
        // cvThreshold(bgsGray, foreground, paramsInt["minThreshold"], paramsInt["maxThreshold"], CV_THRESH_BINARY );
        
        // cvThreshold(bgsGray, foreground, paramsInt["minThreshold"], paramsInt["maxThreshold"], CV_THRESH_BINARY);
        // cvThreshold(bgsGray, foreground, paramsInt["minThreshold"], paramsInt["maxThreshold"], CV_THRESH_BINARY_INV);
        
        // cvThreshold(foreground, foreground, 5,255, 1 );
        //removing loose points
        cvErode(foreground, foreground, NULL,1);
        
        //augmenting neighbour points
        cvDilate(foreground, foreground, NULL,1);
        
        //calculating edges
        // cvCanny(foreground,foreground,10,100,3);
        
        
}
开发者ID:eemponet,项目名称:flywalkreloaded,代码行数:40,代码来源:tracking2d.cpp


示例5: cvResizeDsize

/**
* Runs an iteration of the Pipeline and updates outputs.
*
* Sources need to be set before calling this method. 
*
*/
void GripPipeline::process(cv::Mat source0){
	//Step CV_resize0:
	//input
	cv::Mat cvResizeSrc = source0;
	cv::Size cvResizeDsize(0, 0);
	double cvResizeFx = 0.25;  // default Double
	double cvResizeFy = 0.25;  // default Double
    int cvResizeInterpolation = cv::INTER_LINEAR;
	cvResize(cvResizeSrc, cvResizeDsize, cvResizeFx, cvResizeFy, cvResizeInterpolation, this->cvResizeOutput);
	//Step HSV_Threshold0:
	//input
	cv::Mat hsvThresholdInput = cvResizeOutput;
	double hsvThresholdHue[] = {42.086330935251794, 86.7911714770798};
	double hsvThresholdSaturation[] = {32.10431654676259, 207.37691001697794};
	double hsvThresholdValue[] = {91.72661870503596, 255.0};
	hsvThreshold(hsvThresholdInput, hsvThresholdHue, hsvThresholdSaturation, hsvThresholdValue, this->hsvThresholdOutput);
	//Step CV_erode0:
	//input
	cv::Mat cvErodeSrc = hsvThresholdOutput;
	cv::Mat cvErodeKernel;
	cv::Point cvErodeAnchor(-1, -1);
	double cvErodeIterations = 1;  // default Double
    int cvErodeBordertype = cv::BORDER_CONSTANT;
	cv::Scalar cvErodeBordervalue(-1);
	cvErode(cvErodeSrc, cvErodeKernel, cvErodeAnchor, cvErodeIterations, cvErodeBordertype, cvErodeBordervalue, this->cvErodeOutput);
	//Step Mask0:
	//input
	cv::Mat maskInput = cvResizeOutput;
	cv::Mat maskMask = cvErodeOutput;
	mask(maskInput, maskMask, this->maskOutput);
	//Step Find_Blobs0:
	//input
	cv::Mat findBlobsInput = maskOutput;
	double findBlobsMinArea = 0.0;  // default Double
	double findBlobsCircularity[] = {0.0, 1.0};
	bool findBlobsDarkBlobs = true;  // default Boolean
	findBlobs(findBlobsInput, findBlobsMinArea, findBlobsCircularity, findBlobsDarkBlobs, this->findBlobsOutput);
}
开发者ID:FRCTeam1967,项目名称:FRCTeam1967,代码行数:44,代码来源:GripPipeline.cpp


示例6: main

int main( int argc, char** argv )
{
	//Check if user specify image to process
	if(argc >= 2 )
	{
		char* filename= argv[1];
		//load image  in gray level
		imagen=cvLoadImage(filename,0);
	}
	else
	{
		printf("Use:\n\t%s image\n",argv[0]);
		return 0;
	}	
//------------------------------------------------------------------------------------
//NUMBER ISOLATION

	//Create needed images
	smooth= cvCreateImage(cvSize(imagen->width, imagen->height), IPL_DEPTH_8U, 1);
	threshold= cvCreateImage(cvSize(imagen->width, imagen->height), IPL_DEPTH_8U, 1);
	open_morf= cvCreateImage(cvSize(imagen->width, imagen->height), IPL_DEPTH_8U, 1);
	cvSmooth(imagen, smooth, CV_GAUSSIAN, 3, 0, 0, 0);
	
	CvScalar avg;
	CvScalar avgStd;
	cvAvgSdv(smooth, &avg, &avgStd, NULL);
	//printf("Avg: %f\nStd: %f\n", avg.val[0], avgStd.val[0]);
	//threshold image
	cvThreshold(smooth, threshold, (int)avg.val[0]+4*(int)(avgStd.val[0]/8), 255, CV_THRESH_BINARY);
	//Morfologic filters
	cvErode(threshold, open_morf, NULL,1); 
	cvDilate(open_morf, open_morf, NULL,1); 
	int lprs=cvSaveImage(argv[2],open_morf,&lprs);
	//Duplicate image for countour
	cvReleaseImage(&imagen);
	cvReleaseImage(&open_morf);
	return 0;
}
开发者ID:amnosuperman,项目名称:LPRS,代码行数:38,代码来源:dataproc.c


示例7: cutterDetect

IplImage* cutterDetect(IplImage *img) {

    // Convert the image into an HSV image
    IplImage* imgHSV = cvCreateImage(cvGetSize(img), 8, 3);
    // Create an image for the output
    IplImage* out = cvCreateImage( cvGetSize(img), IPL_DEPTH_8U, 3 );
    IplImage *temp = cvCreateImage(cvSize(img->width,img->height),8,1);
    IplImage* imgThreshed = cvCreateImage(cvGetSize(img), 8, 1);

    // Perform a Gaussian blur
    cvSmooth( img, out, CV_GAUSSIAN,15, 15);
    cvCvtColor(out, imgHSV, CV_BGR2HSV);
    
    cvInRangeS(imgHSV, BLUE_LOW, BLUE_HIGH, imgThreshed);
   
    cvErode(imgThreshed,temp,NULL,1);
    cvDilate(temp,imgThreshed,NULL,1);
    
    cvReleaseImage(&imgHSV);
    cvReleaseImage(&temp);	
    cvReleaseImage( &out );	
    return imgThreshed;
}
开发者ID:devbhave,项目名称:bot-harvest,代码行数:23,代码来源:cutterController.cpp


示例8: node_composit_exec_cvErode

static void node_composit_exec_cvErode(void *data, bNode *node, bNodeStack **in, bNodeStack **out)
{
	int iterations;	
	IplImage *src, *dst;
	CompBuf *dst_buf;

	if(in[0]->hasinput==0) return;
	if(out[0]->hasoutput==0) return;

	src= BOCV_IplImage_attach(in[0]->data);
	dst_buf = dupalloc_compbuf(in[0]->data);
	iterations=(int)in[1]->vec[0];
	dst = BOCV_IplImage_attach(dst_buf);

	cvErode(src,dst,0,iterations);

	out[0]->data = dst_buf;

	BOCV_IplImage_detach(src);
	BOCV_IplImage_detach(dst);


}
开发者ID:gmaclair,项目名称:blendocv,代码行数:23,代码来源:CMP_CvErode.c


示例9: x

  int regionTracker::calcCentroidAndArea()
  {
    // input: none
    // output: set centroid and area, CvPoint and int
    // return: 0
    //
    // Calculate controid and area of region

    int areaCount = 0;         // count total pixel of region
    int xSum = 0, ySum = 0;    // sum of x (or y) coordinate of each pixel in the region
    CvScalar current;
    int i, j;
    int iteration = 5;

    // contract 'result' for reduce influence by arm when calcurate centroid
    cvErode (result, contractedResult, element, iteration);

    for(i=0; i<result->width; i++)
      for(j=0; j<result->height; j++)
	{
	  current = cvGet2D(contractedResult, j, i);
	  if(current.val[0] != 0)
	    {
	      areaCount++;
	      xSum += i;
	      ySum += j;
	    }
	}

    // set result
    if(areaCount == 0) return -1;
    area = areaCount;
    centroid.x = xSum / areaCount;
    centroid.y = ySum / areaCount;

    return 0;
  }
开发者ID:edison2301,项目名称:FaceTrack,代码行数:37,代码来源:regionTracker.cpp


示例10: if

int regionDetector::getRegion(IplImage *src, int x, int y, IplImage *dst)
{
  // input: image (depth, intensity and so on), IplImage, 1 channel, (recommend 8 bit depth)
  //        a coordinate which is contained in region you want to get, int
  // return: region image, IplImage, binary image
  //         or 0 if coordinate is invalid (out of iamge size or there are no region)
  //
  // Take (depth, intensity, binary and so on) image, classify the image by the region.
  // The region is pixels which has almost same value between that pixel and around pixels.
  // And return the region which contain pixel (x, y).

  int bitDepth;
  int iteration;

  // prepare images
  original = src;
  result->imageData = dst->imageData;

  // set threshold value.
  bitDepth = src->depth;
  if(bitDepth == IPL_DEPTH_8U)
    threshold = 20;
  else if(bitDepth == IPL_DEPTH_16U)
    threshold = 4000;
  else
    return 0;

  // get region
  traverse(x, y, NONE);

  // noise reduction
  iteration = 2;
  cvErode (dst, dst, element, iteration);
  cvDilate (dst, dst, element, iteration);

  return 0;
}
开发者ID:edison2301,项目名称:FaceTrack,代码行数:37,代码来源:regionDetector.cpp


示例11: cvCalcMotionGradient

CV_IMPL void
cvCalcMotionGradient( const CvArr* mhiimg, CvArr* maskimg,
                      CvArr* orientation,
                      double delta1, double delta2,
                      int aperture_size )
{
    cv::Ptr<CvMat> dX_min, dY_max;

    CvMat  mhistub, *mhi = cvGetMat(mhiimg, &mhistub);
    CvMat  maskstub, *mask = cvGetMat(maskimg, &maskstub);
    CvMat  orientstub, *orient = cvGetMat(orientation, &orientstub);
    CvMat  dX_min_row, dY_max_row, orient_row, mask_row;
    CvSize size;
    int x, y;

    float  gradient_epsilon = 1e-4f * aperture_size * aperture_size;
    float  min_delta, max_delta;

    if( !CV_IS_MASK_ARR( mask ))
        CV_Error( CV_StsBadMask, "" );

    if( aperture_size < 3 || aperture_size > 7 || (aperture_size & 1) == 0 )
        CV_Error( CV_StsOutOfRange, "aperture_size must be 3, 5 or 7" );

    if( delta1 <= 0 || delta2 <= 0 )
        CV_Error( CV_StsOutOfRange, "both delta's must be positive" );

    if( CV_MAT_TYPE( mhi->type ) != CV_32FC1 || CV_MAT_TYPE( orient->type ) != CV_32FC1 )
        CV_Error( CV_StsUnsupportedFormat,
        "MHI and orientation must be single-channel floating-point images" );

    if( !CV_ARE_SIZES_EQ( mhi, mask ) || !CV_ARE_SIZES_EQ( orient, mhi ))
        CV_Error( CV_StsUnmatchedSizes, "" );

    if( orient->data.ptr == mhi->data.ptr )
        CV_Error( CV_StsInplaceNotSupported, "orientation image must be different from MHI" );

    if( delta1 > delta2 )
    {
        double t;
        CV_SWAP( delta1, delta2, t );
    }

    size = cvGetMatSize( mhi );
    min_delta = (float)delta1;
    max_delta = (float)delta2;
    dX_min = cvCreateMat( mhi->rows, mhi->cols, CV_32F );
    dY_max = cvCreateMat( mhi->rows, mhi->cols, CV_32F );

    // calc Dx and Dy
    cvSobel( mhi, dX_min, 1, 0, aperture_size );
    cvSobel( mhi, dY_max, 0, 1, aperture_size );
    cvGetRow( dX_min, &dX_min_row, 0 );
    cvGetRow( dY_max, &dY_max_row, 0 );
    cvGetRow( orient, &orient_row, 0 );
    cvGetRow( mask, &mask_row, 0 );

    // calc gradient
    for( y = 0; y < size.height; y++ )
    {
        dX_min_row.data.ptr = dX_min->data.ptr + y*dX_min->step;
        dY_max_row.data.ptr = dY_max->data.ptr + y*dY_max->step;
        orient_row.data.ptr = orient->data.ptr + y*orient->step;
        mask_row.data.ptr = mask->data.ptr + y*mask->step;
        cvCartToPolar( &dX_min_row, &dY_max_row, 0, &orient_row, 1 );

        // make orientation zero where the gradient is very small
        for( x = 0; x < size.width; x++ )
        {
            float dY = dY_max_row.data.fl[x];
            float dX = dX_min_row.data.fl[x];

            if( fabs(dX) < gradient_epsilon && fabs(dY) < gradient_epsilon )
            {
                mask_row.data.ptr[x] = 0;
                orient_row.data.i[x] = 0;
            }
            else
                mask_row.data.ptr[x] = 1;
        }
    }

    cvErode( mhi, dX_min, 0, (aperture_size-1)/2);
    cvDilate( mhi, dY_max, 0, (aperture_size-1)/2);

    // mask off pixels which have little motion difference in their neighborhood
    for( y = 0; y < size.height; y++ )
    {
        dX_min_row.data.ptr = dX_min->data.ptr + y*dX_min->step;
        dY_max_row.data.ptr = dY_max->data.ptr + y*dY_max->step;
        mask_row.data.ptr = mask->data.ptr + y*mask->step;
        orient_row.data.ptr = orient->data.ptr + y*orient->step;
        
        for( x = 0; x < size.width; x++ )
        {
            float d0 = dY_max_row.data.fl[x] - dX_min_row.data.fl[x];

            if( mask_row.data.ptr[x] == 0 || d0 < min_delta || max_delta < d0 )
            {
                mask_row.data.ptr[x] = 0;
//.........这里部分代码省略.........
开发者ID:SCS-B3C,项目名称:OpenCV2-2,代码行数:101,代码来源:motempl.cpp


示例12: erode_end_frame_filter

static void erode_end_frame_filter(AVFilterContext *ctx, IplImage *inimg, IplImage *outimg)
{
    OCVContext *ocv = ctx->priv;
    DilateContext *dilate = ocv->priv;
    cvErode(inimg, outimg, dilate->kernel, dilate->nb_iterations);
}
开发者ID:richardpl,项目名称:libav,代码行数:6,代码来源:vf_libopencv.c


示例13: findSquares4

// returns sequence of squares detected on the image.  
// the sequence is stored in the specified memory storage  
CvSeq* findSquares4( IplImage* img, CvMemStorage* &storage,bool isForDay)  
{  

	CvSeq* contours;  

	int i, l, N = 50;
	int thresh2,thresh1=15;
	if(isForDay) {
		thresh2=240;}
	else {thresh2=180;}
	CvSize sz = cvSize( img->width & -2, img->height & -2 );  

	IplImage* timg = cvCloneImage( img ); // make a copy of input image  
	IplImage* gray = cvCreateImage( cvGetSize(timg), 8, 1 );   
	//IplImage* pyr = cvCreateImage( cvSize(sz.width/2, sz.height/2), 8, 3 );  
	IplImage* tgray;  
	CvSeq* result;  
	double s, t;  
	//	storage=cvCreateMemStorage(0);  
	// create empty sequence that will contain points -  
	// 4 points per square (the square's vertices)  
	CvSeq* squares = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPoint), storage );  
	// select the maximum ROI in the image  
	// with the width and height divisible by 2  
	cvSetImageROI( timg, cvRect( 0, 0, sz.width, sz.height ));  

	// down-scale and upscale the image to filter out the noise  
	//	cvPyrDown( timg, pyr, 7 );  
	//	cvPyrUp( pyr, timg, 7 );  
	tgray = cvCreateImage( sz, 8, 1 );  
	//	cvCvtColor(timg,timg,CV_BGR2HSV);
	//	cvCvtColor(timg,tgray,CV_HSV2GRAY);
	//	cvShowImage("tg",tgray);
	// find squares in every color plane of the image  

	for(int c = 0; c < 3; c++ )  
	{  
	//	cout<<c<<endl;
		// extract the c-th color plane  
		cvSetImageCOI( timg, c+1 );  
		cvCopy( timg, tgray, 0 );  
		//	cvEqualizeHist(tgray,tgray);
		// try several threshold levels  
		for( l = 0; l < N; l++ )  
		{  
			// hack: use Canny instead of zero thres50hold level.  
			// Canny helps to catch squares with gradient shading     
			if( l == 0 )  
			{  
				// apply Canny. Take the upper threshold from slider  
				// and set the lower to 0 (which forces edges merging)   
				cvCanny( tgray, gray,thresh1, thresh2, 3 );     // 白天
				//		cvCanny( tgray, gray,15, 240, 3 );  
				//		ImagePreprocess::colorEdgeDetect1(img,gray,15,240);
				// dilate canny output to remove potential  
				// holes between edge segments   
				cvDilate( gray, gray, 0, 1 );  
				cvErode( gray, gray, 0, 1 );  

			}  
			else 
			{  
				//apply threshold if l!=0:  
				cvThreshold( tgray, gray, (l+1)*255/N, 255, CV_THRESH_BINARY );  
				//		cvShowImage("gray",gray);
			}

			// find contours and store them all as a list  
			cvFindContours( gray, storage, &contours, sizeof(CvContour),  
				CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );  


			// test each contour  
			while( contours )  
			{  
				// approximate contour with accuracy proportional  
				// to the contour perimeter  
				result = cvApproxPoly( contours, sizeof(CvContour), storage,  
					CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0 );  
				// square contours should have 4 vertices after approximation  
				// relatively large area (to filter out noisy contours)  
				// and be convex.  
				// Note: absolute value of an area is used because  
				// area may be positive or negative - in accordance with the  
				// contour orientation  
				if( result->total == 4 &&  
					fabs(cvContourArea(result,CV_WHOLE_SEQ)) > 4000 &&   
					fabs(cvContourArea(result,CV_WHOLE_SEQ)) < 10000 &&
					cvCheckContourConvexity(result) )  
				{  
					s = 0;  
					for( i = 0; i < 5; i++ )  
					{  
						// find minimum angle between joint  
						// edges (maximum of cosine)  
						if( i >= 2 )
						{
							t = fabs(angle(
//.........这里部分代码省略.........
开发者ID:dalinhuang,项目名称:my-anquandai,代码行数:101,代码来源:license_plate_locate.cpp


示例14: get_fish_pos

void get_fish_pos(){
	mod++;
	if(g_msPrm.isDrawing){
		cvCopy(frame,cl_frame,NULL);
		switch(g_mouse){
		case AVG_POS:
			set_avg_pos(cl_frame);
		break;
		case RANGE_POS:
			set_range_pos(cl_frame);
		break;

		default:
		break;
		}
	}
	else{

		frame = cvQueryFrame( g_capture );
//			if(!(++mod&0x3))
//			cvShowImage("Camera",frame);
#if 1
//		if(g_msPrm.box.width != 0){
//			draw_box(frame,g_msPrm.box);
//		}
//		else{
//		}
		cvCircle(frame,
			 fishPos,
			 sqrt(mom.m00)/1,
			 cvScalar(0x00,0x00,0x00,0),1,8,0
			 );
		//if(!(++mod&0x3))
		cvShowImage("Camera",frame);
		if(g_is_range){
			cvSetImageROI(frame,g_range);
			cvSetZero(gr_frame);
			cvSetImageROI(gr_frame,g_range);
		}

		cvSmooth( frame, frame, CV_GAUSSIAN, 3, 3 ,0,0);
		//cvCvtColor(frame,frame,CV_RGB2HSV);
		cvInRangeS(frame,g_hsv_min,g_hsv_max,gr_frame);

		if(g_is_range){
			cvSetImageROI(frame,g_range);
		}
		cvErode(gr_frame,gr_frame,NULL,2);
		cvDilate(gr_frame,gr_frame,NULL,2);
		if(g_is_range){
			cvResetImageROI(frame);
			cvResetImageROI(gr_frame);
		}
		cvMoments(gr_frame,&mom,1);
		fishPos.x = (mom.m10/mom.m00);
		fishPos.y = (mom.m01/mom.m00);

		cvShowImage( "set_HSV", gr_frame );
//			cvErode(gr_frame,gr_frame,NULL,10);
#endif
	}

}
开发者ID:gomord,项目名称:opencv2,代码行数:63,代码来源:fish.c


示例15: gst_gcs_transform_ip

static GstFlowReturn gst_gcs_transform_ip(GstBaseTransform * btrans, GstBuffer * gstbuf) 
{
  GstGcs *gcs = GST_GCS (btrans);

  GST_GCS_LOCK (gcs);

  //////////////////////////////////////////////////////////////////////////////
  // get image data from the input, which is RGBA or BGRA
  gcs->pImageRGBA->imageData = (char*)GST_BUFFER_DATA(gstbuf);
  cvSplit(gcs->pImageRGBA,   gcs->pImgCh1, gcs->pImgCh2, gcs->pImgCh3, gcs->pImgChX );
  cvCvtColor(gcs->pImageRGBA,  gcs->pImgRGB, CV_BGRA2BGR);


  //////////////////////////////////////////////////////////////////////////////
  ////////////////////////////////////////////////////////MOTION CUES INTEGR////
  //////////////////////////////////////////////////////////////////////////////

  //////////////////////////////////////////////////////////////////////////////
  // apply step 1. filtering using bilateral filter. Cannot happen in-place => scratch
  cvSmooth(gcs->pImgRGB, gcs->pImgScratch, CV_BILATERAL, 3, 50, 3, 0);
  // create GRAY image
  cvCvtColor(gcs->pImgScratch, gcs->pImgGRAY, CV_BGR2GRAY);

  // Frame difference the GRAY and the previous one
  // not intuitive: first smooth frames, then 
  cvCopy( gcs->pImgGRAY,   gcs->pImgGRAY_copy,  NULL);
  cvCopy( gcs->pImgGRAY_1, gcs->pImgGRAY_1copy, NULL);
  get_frame_difference( gcs->pImgGRAY_copy, gcs->pImgGRAY_1copy, gcs->pImgGRAY_diff);
  cvErode( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, NULL, 3);
  cvDilate( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, NULL, 3);


  //////////////////////////////////////////////////////////////////////////////
  //////////////////////////////////////////////////////////////////////////////
  // ghost mapping
  gcs->dstTri[0].x = gcs->facepos.x - gcs->facepos.width/2 ;
  gcs->dstTri[0].y = gcs->facepos.y - gcs->facepos.height/2;
  gcs->dstTri[1].x = gcs->facepos.x - gcs->facepos.width/2;
  gcs->dstTri[1].y = gcs->facepos.y + gcs->facepos.height/2;
  gcs->dstTri[2].x = gcs->facepos.x + gcs->facepos.width/2;
  gcs->dstTri[2].y = gcs->facepos.y + gcs->facepos.height/2;

  if( gcs->ghostfilename){
    cvGetAffineTransform( gcs->srcTri, gcs->dstTri, gcs->warp_mat );
    cvWarpAffine( gcs->cvGhostBwResized, gcs->cvGhostBwAffined, gcs->warp_mat );
  }




  //////////////////////////////////////////////////////////////////////////////
  //////////////////////////////////////////////////////////////////////////////
  // GrabCut algorithm preparation and running

  gcs->facepos.x = gcs->facepos.x - gcs->facepos.width/2;
  gcs->facepos.y = gcs->facepos.y - gcs->facepos.height/2;

  // create an IplImage  with the skin colour pixels as 255
  compose_skin_matrix(gcs->pImgRGB, gcs->pImg_skin);
  // And the skin pixels with the movement mask
  cvAnd( gcs->pImg_skin,  gcs->pImgGRAY_diff,  gcs->pImgGRAY_diff);
  //cvErode( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(5, 5, 3, 3, CV_SHAPE_RECT,NULL), 1);
  cvDilate(gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(7,7, 5,5, CV_SHAPE_RECT,NULL), 2);
  cvErode( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(5,5, 3,3, CV_SHAPE_RECT,NULL), 2);

  // if there is alpha==all 1's coming in, then we ignore it: prevents from no vibe before us
  if((0.75*(gcs->width * gcs->height) <= cvCountNonZero(gcs->pImgChX)))
    cvZero(gcs->pImgChX);
  // OR the input Alpha
  cvOr( gcs->pImgChX,  gcs->pImgGRAY_diff,  gcs->pImgGRAY_diff);


  //////////////////////////////////////////////////////////////////////////////
  // try to consolidate a single mask from all the sub-patches
  cvDilate(gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(7,7, 5,5, CV_SHAPE_RECT,NULL), 3);
  cvErode( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(5,5, 3,3, CV_SHAPE_RECT,NULL), 4);

  //////////////////////////////////////////////////////////////////////////////
  // use either Ghost or boxes-model to create a PR foreground starting point in gcs->grabcut_mask
  if( gcs->ghostfilename)
    compose_grabcut_seedmatrix3(gcs->grabcut_mask, gcs->cvGhostBwAffined, gcs->pImgGRAY_diff  );
  else{
    // toss it all to the bbox creation function, together with the face position and size
    compose_grabcut_seedmatrix2(gcs->grabcut_mask, gcs->facepos, gcs->pImgGRAY_diff, gcs->facefound );
  }


  //////////////////////////////////////////////////////////////////////////////
#ifdef KMEANS
  gcs->num_clusters = 18; // keep it even to simplify integer arithmetics
  cvCopy(gcs->pImgRGB, gcs->pImgRGB_kmeans, NULL);
  posterize_image(gcs->pImgRGB_kmeans);
  create_kmeans_clusters(gcs->pImgRGB_kmeans, gcs->kmeans_points, gcs->kmeans_clusters, 
                         gcs->num_clusters, gcs->num_samples);
  adjust_bodybbox_w_clusters(gcs->grabcut_mask, gcs->pImgRGB_kmeans, gcs->num_clusters, gcs->facepos);
#endif //KMEANS


  //////////////////////////////////////////////////////////////////////////////
  if( gcs->debug < 70)
//.........这里部分代码省略.........
开发者ID:miguelao,项目名称:gst_plugins_tsunami,代码行数:101,代码来源:gstgcs.c


示例16: CheckForHand


//.........这里部分代码省略.........
				{
					highMatch = currMatch;
					highValX = x;
					highValY = y;
					highSplitType = splitType;
			
				}				
			}
			for (x=-i; x <= i; x++)
			{
				y = i;
				currMatch = CheckForHand(image, x, y, &splitType);
				if (currMatch > highMatch)
				{
					highMatch = currMatch;
					highValX = x;
					highValY = y;
					highSplitType = splitType;
				}				
			}
			for (y=-i; y <= i; y++)
			{
				x = -i;
				currMatch = CheckForHand(image, x, y, &splitType);
				if (currMatch > highMatch)
				{
					highMatch = currMatch;
					highValX = x;
					highValY = y;
					highSplitType = splitType;
				}				
			}
			for (y=-i; y <= i; y++)
			{
				x = i;
				currMatch = CheckForHand(image, x, y, &splitType);
				if (currMatch > highMatch)
				{
					highMatch = currMatch;
					highValX = x;
					highValY = y;
					highSplitType = splitType;
				}				
			}
		}
	}

	if (highMatch > 0)
	{
		int x1, y1, x2, y2;
		cvCopy(backupImage.getIplImage(), image->getIplImage(), NULL);
		computeBlob(&backupImage, &backupImage, m_center.x+highValX, m_center.y+highValY, 100, &x1, &y1, &x2, &y2);

		CPianoHand tempHand; 
		
		
		if (highSplitType == 0)	//Center Reference
			tempHand = *(new CPianoHand(0, x1, y1, x2, y2));
		else if (highSplitType == 1)//Top-left reference
			tempHand = *(new CPianoHand(0, x1, y1, x1+m_boundingBox.width, y1+m_boundingBox.height));
		else if (highSplitType == 2)	//bottom-right reference
			tempHand = *(new CPianoHand(0, x2-m_boundingBox.width, y2-m_boundingBox.height, x2, y2));
		else	//Center reference, without much width change
			tempHand = *(new CPianoHand(0, x1, y1, x1+m_boundingBox.width, y2));
		UpdateWithHand(&tempHand);


		//Create Image Hands Mask Image from Bounding Box
		for (x=0; x < IMAGE_WIDTH; x++)
		{
			for (y=0; y < IMAGE_HEIGHT; y++)
			{
				m_handsImage.getIplImage()->imageData[y*IMAGE_WIDTH+x]=0;
				m_traceImage.getIplImage()->imageData[y*IMAGE_WIDTH+x]=0;

				if (x >= tempHand.m_boundingBox.x && x < (tempHand.m_boundingBox.x+tempHand.m_boundingBox.width))
				{
					if (y >= tempHand.m_boundingBox.y && y < (tempHand.m_boundingBox.y+tempHand.m_boundingBox.height))
					{
							m_handsImage.getIplImage()->imageData[y*IMAGE_WIDTH+x] = 
								(unsigned char)image->getIplImage()->imageData[y*IMAGE_WIDTH+x];
						
					}
				}

			}
		}

		CIplImage tempImage;
		tempImage.initialize(IMAGE_WIDTH, IMAGE_HEIGHT, 8);

		cvDilate(m_handsImage.getIplImage(), m_edgeImage.getIplImage(), NULL, 1);
		cvErode(m_edgeImage.getIplImage(), tempImage.getIplImage(), NULL, 1);
		cvCanny(tempImage.getIplImage(), m_edgeImage.getIplImage(), 0, 1, 3);

		
		/*DrawBox(m_imb_edgeDetectedImage.getIplImage(), x1, y1, x2, y2, 1);
		(*numHands)++;*/
	}
}
开发者ID:dvdrndlph,项目名称:dactylize,代码行数:101,代码来源:Copy+of+PianoHand.cpp


示例17: while

void CMFC_systemServerDlg::Thread_getImage(LPVOID lParam)
{
	CMythreadParam * Thread_Info = (CMythreadParam *)lParam;
	CMFC_systemServerDlg * hWnd = (CMFC_systemServerDlg *)CWnd::FromHandle((HWND)Thread_Info->hWnd);
	
	Kinect2Capture kinect;
	kinect.Open(1, 1, 0);

	kinect.uDepthMax = 2000;
	IplImage* img_get = nullptr;
	while (1)
	{
		img_get = kinect.DepthImage();
		if (img_get != NULL)
		{
			cv::Mat src_img = img_get;
			// 設定變換[之前]與[之後]的坐標 (左上,左下,右下,右上)
			cv::Point2f pts1[4] = { roi.pts_depth[0], roi.pts_depth[1], roi.pts_depth[2], roi.pts_depth[3] };
			cv::Point2f pts2[4] = { roi.pts_to[0], roi.pts_to[1], roi.pts_to[2], roi.pts_to[3] };
			// 透視變換行列計算
			cv::Mat perspective_matrix = cv::getPerspectiveTransform(pts1, pts2);
			cv::Mat dst_img;
			// 變換
			cv::warpPerspective(src_img, dst_img, perspective_matrix, cvSize(320, 240), cv::INTER_LINEAR);
			
			cvCopy(&(IplImage)dst_img, m_TabPage1.sImage_depth);
			//*****影像相減*****
			cvAbsDiff(m_TabPage1.sImage_depth, m_TabPage1.sImage_depthGround, m_TabPage1.sImage_depth);
			cvThreshold(m_TabPage1.sImage_depth, m_TabPage1.sImage_depth, 1, 255, CV_THRESH_BINARY);
			//*******************
			//*****erode & dilate*****
			IplConvKernel *pKernel = NULL;
			pKernel = cvCreateStructuringElementEx(3, 3, 1, 1, CV_SHAPE_RECT, NULL);
			cvErode(m_TabPage1.sImage_depth, m_TabPage1.sImage_depth, pKernel, 1);
			cvDilate(m_TabPage1.sImage_depth, m_TabPage1.sImage_depth, pKernel, 1);
			//*************************
			hWnd->ShowImage(m_TabPage1.sImage_depth, hWnd->GetDlgItem(IDC_IMAGE_binPickLiveDepth),1);
		}
		cvReleaseImage(&img_get);

		img_get = kinect.RGBAImage();
		if (img_get != NULL)
		{
			cv::Mat src_img = img_get;
			// 設定變換[之前]與[之後]的坐標 (左上,左下,右下,右上)
			cv::Point2f pts1[4] = { roi.pts_color[0], roi.pts_color[1], roi.pts_color[2], roi.pts_color[3] };
			cv::Point2f pts2[4] = { roi.pts_to[0], roi.pts_to[1], roi.pts_to[2], roi.pts_to[3] };
			// 透視變換行列計算
			cv::Mat perspective_matrix = cv::getPerspectiveTransform(pts1, pts2);
			cv::Mat dst_img;
			// 變換
			cv::warpPerspective(src_img, dst_img, perspective_matrix, cvSize(320, 240), cv::INTER_LINEAR);
			
			cvCopy(&(IplImage)dst_img, m_TabPage1.sImage_live);
			 
			IplImage* image_show = cvCreateImage(cvSize(320, 240), IPL_DEPTH_8U, 3);
			cvCvtColor(m_TabPage1.sImage_live, image_show, CV_BGRA2BGR);
			hWnd->ShowImage(image_show, hWnd->GetDlgItem(IDC_IMAGE_binPickLive));
			cvReleaseImage(&image_show);
		}
		cvReleaseImage(&img_get);
	}
}
开发者ID:FIREoo,项目名称:105_RobotCompetition_InternetOfRobotics,代码行数:63,代码来源:MFC_systemServerDlg.cpp


示例18: cvAbsDiff

//--------------------------------------------------------------
void ofApp::update() {
    kinect.update();
    
    if(kinect.getHasNewFrame()){
        grayImage = kinect.getPatchedCvImage(); // get the merged cvImage from the two kinects
        
        // set new background image
        if(bLearnBackground){
            bgImage = grayImage;   // let this frame be the background image from now on
            bLearnBackground = false;
            bBackgroundLearned = true;
        }
        
        // forget background image
        if(bForgetBackground){
            bBackgroundLearned = false;
            bForgetBackground = false;
        }
        // set minimal blob area
        contFinder.setMinArea(minArea);
        
        grayImage.flagImageChanged();
        if(bBackgroundLearned){
            cvAbsDiff(bgImage.getCvImage(), grayImage.getCvImage(), grayDiff.getCvImage());
            cvErode(grayDiff.getCvImage(), grayDiff.getCvImage(), NULL, 2);
            cvDilate(grayDiff.getCvImage(), grayDiff.getCvImage(), NULL, 1);
            // threshold ignoring little differences
            cvThreshold(grayDiff.getCvImage(), grayDiff.getCvImage(), 4, 255, CV_THRESH_BINARY);
            grayDiff.flagImageChanged();
            // update the ofImage to be used as background mask for the blob finder
            grayDiffOfImage.setFromPixels(grayDiff.getPixels(), kinect.width, kinect.height);
            
            // update the cv images
            grayDiffOfImage.flagImageChanged();

            // pass image on to contour finder
            contFinder.findContours(grayDiffOfImage.getCvImage());
        } else {
            contFinder.findContours(grayImage.getCvImage());
        }//backGroundLearned
    }
    
    
    // send a osc message for every blob
    // format /blobs <index> <label> <age> <area> <x> <y>
    ofPoint loc;
    ofRectangle area;
    int label;
    if( contFinder.size() > 0) {
        for(unsigned int i = 0; i<contFinder.size(); ++i) {
            area = ofxCv::toOf(contFinder.getBoundingRect(i));
            if(area.getCenter().y > kinect.height * 0.5){
                ofxOscMessage m;
                m.setAddress("/blobs");
                m.addIntArg( i );                                       // index
                m.addIntArg( (label = contFinder.getLabel(i)) );        // label
                m.addIntArg( contFinder.getTracker().getAge(label) );   // age
                m.addIntArg(( area.width*area.height ));                // area
                loc = ofxCv::toOf(contFinder.getCenter(i));
                m.addIntArg(loc.x);                                     // x
                m.addIntArg(loc.y);                                     // y
                sender.sendMessage(m);
                cout << "message sent with label: " << contFinder.getLabel(i) << endl;
            }
        } //for
    } else {
        ofxOscMessage m;
        m.setAddress("/blobs");
        for(int i = 0; i<6;++i){
            m.addIntArg(0); // send to all poly instances, all info set to zero
        }
        sender.sendMessage(m);
        
    }// if
}
开发者ID:TMHGLND,项目名称:so-ffortress,代码行数:76,代码来源:ofApp.cpp


示例19: display

void display()
{
	glClearColor(0.0, 0.0, 0.0, 0.0);
    	glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);


/*	glPushMatrix();
	glTranslatef(xavg,yavg,0);
	glutSolidCube(200);
	glPopMatrix();
/*

	glBegin(GL_QUADS);
		glVertex3f(xr,xb,0);	
		glVertex3f(xb,yb,0);	
		glVertex3f(xl,yl,0);
		glVertex3f(xt,yt,0);
	glEnd();
*/
///////////////////////////////////////////////////////////nishanthprakash20///////////////////////////////////////////////////
	captured=cvQueryFrame(video1);
	disp=cvCreateImage(cvGetSize(captured),IPL_DEPTH_8U,3);	
	eroded=cvCreateImage(cvGetSize(captured),IPL_DEPTH_8U,3);	
	dilated=cvCreateImage(cvGetSize(captured),IPL_DEPTH_8U,3);

//	data=cvGet2D(captured,240,320);
//	printf("%f,%f,%f\n",data.val[0],data.val[1],data.val[2]);
	

	thresh1=150;
	thresh2=100;
	thresh3=100;


	for(i=0;i<disp->height;i++)
	for(j=0;j<disp->width;j++)
		{
			data=cvGet2D(captured,i,j);
			
			if(data.val[1]>thresh1&&data.val[2]<thresh2&&data.val[0]<thresh3)
			{	
				cvSet2D(disp,i,j,data);
			}
		}

	cvErode(disp,eroded,NULL,1);
	cvDilate(eroded,dilated,NULL,4);
	for(i=0;i<disp->height;i++)
	for(j=0;j<disp->width;j++)
		{
			data=cvGet2D(dilated,i,j);
			
			if(data.val[1]>thresh1&&data.val[2]<thresh2&&data.val[0]<thresh3)
			{	goto donetop;
				
			}
		}
	donetop:
	xt=j;
	yt=i;
	
	for(i=479;i>0;i--)
	for(j=0;j<disp->width;j++)
		{
			data=cvGet2D(dilated,i,j);
			
			if(data.val[1]>thresh1&&data.val[2]<thresh2&&data.val[0]<thresh3)
			{	goto doneleft;
				
			}
		}
	doneleft:
	xb=j;
	yb=i;
	
	inclination=((float)atan((yt-yb)/(xt-xb))-(float)atan(10.0/21))*180/3.14;
	if(inclination<0)	inclination+=60;
	printf("%f\n",inclination);
	
	cvNamedWindow("Cap");
	cvShowImage("Cap",dilated) 

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ cvFindContours函数代码示例发布时间:2022-05-30
下一篇:
C++ cvDrawContours函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap