• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ cvRect函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中cvRect函数的典型用法代码示例。如果您正苦于以下问题:C++ cvRect函数的具体用法?C++ cvRect怎么用?C++ cvRect使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了cvRect函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: processPlateChars

plateInfo* processPlateChars( IplImage * orig_img )
{
	//Create placeholder for gray image
	IplImage * gray_img = cvCreateImage(cvGetSize(orig_img), IPL_DEPTH_8U, 1);
		
	//Convert to image grayscale
	cvCvtColor( orig_img, gray_img, CV_RGB2GRAY );
		
	//Create placeholder for black and white image
	IplImage * bw_img = cvCreateImage(cvGetSize(gray_img), IPL_DEPTH_8U,	1);

	//Convert gray image to binary (black and white)
	cvThreshold( gray_img, bw_img, 128, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);

	//Invert image
	IplImage * rev_img = cvCreateImage(cvGetSize(bw_img), IPL_DEPTH_8U, 1);
	cvNot( bw_img, rev_img );

	//Save results
//	cvSaveImage( "bw_img.jpg", bw_img, NULL );
//	cvSaveImage( "rev_img.jpg", rev_img, NULL );


	//Resize the reversed image: 400x200 (435x218??)
	IplImage * resize_img = cvCreateImage(cvSize(400, 200), IPL_DEPTH_8U, 1);
	cvResize( rev_img, resize_img, CV_INTER_LINEAR) ;

	//Save results
//	cvSaveImage( "resize_img.jpg", resize_img, NULL );
	
	//Okay, now find the reversed (and resized) image's size
	CvSize resize_size = cvGetSize( resize_img );
	int w = resize_size.width;
	int h = resize_size.height;

	printf("Width: %d\nHieght: %d\n", w, h);

	//Allrighty, now try to crop
	//First, create new image in the right size
	//Old data for below: cvSize( w, h-108 ) (w-30, h-108)
	IplImage * resize_crop = cvCreateImage(cvSize(w-60, h-108), IPL_DEPTH_8U, 1);

	//Old data for below: cvRect( 0, 54, w, h-108)  (15, 54 w-30, h-108)
	cvSetImageROI( resize_img, cvRect(30, 54, w-60, h-108) );//15
	cvCopy( resize_img, resize_crop, NULL );

	//Save this result
//	cvSaveImage( "resize_crop.jpg", resize_crop, NULL );

	CvSize resizeCrop_size = cvGetSize( resize_crop );
	int w2 = resizeCrop_size.width;
	int h2 = resizeCrop_size.height;

	printf("\nWidth: %d\nHieght: %d\n", w2, h2);

	//Now get the characters (using stuff from commented section below
	struct plateInfo * plate_info = malloc( sizeof(plateInfo) * 8 );
	
	CvSeq * cvSeq = 0;
	CvMemStorage * storage = cvCreateMemStorage(0);
	int numContours;
	int i;

	//Poor man's debugger...
	char * plate_buffer[] = {"plate0.jpg", "plate1.jpg", "plate2.jpg",
		"plate3.jpg", "plate4.jpg","plate5.jpg", "plate6.jpg",
		"plate7.jpg"};


	//This should be 8
	numContours = cvFindContours( resize_crop, storage, &cvSeq,
			sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );
	
	printf("\nnumContours plate: %d\n", numContours);
	
	cvDrawContours( resize_crop, cvSeq, cvScalarAll(255), cvScalarAll(0),
			1, -1, 8, cvPoint(0,0) );
	  
	for( i = 0; i < 8; i++)
	{
		//Get bounding rect
		CvRect char_rect = cvBoundingRect( cvSeq, 0 );
		
		//Create and set individual images
		plate_info[i].charImage = NULL;
		plate_info[i].charImage = cvCreateImage(cvSize(char_rect.width, char_rect.height), IPL_DEPTH_8U, 1);

		//****Moved to 'fix' below - contour area and perimeter*****
		//plate_info[i].contourArea = cvContourArea( cvSeq, CV_WHOLE_SEQ, 0 );
		//plate_info[i].arcLength = cvArcLength( cvSeq, CV_WHOLE_SEQ, -1 );

		//Set ROI for copying and copy	
		cvSetImageROI( resize_crop, char_rect );
		cvCopy( resize_crop, plate_info[i].charImage, NULL );
	
		//For the poor man's debugger
		//cvSaveImage( plate_buffer[i], plate_chars[i], NULL );
		//cvSaveImage( plate_buffer[i], plate_info[i].charImage, NULL );

		cvSeq = cvSeq->h_next;
//.........这里部分代码省略.........
开发者ID:12019,项目名称:LicensePlateReaders,代码行数:101,代码来源:fp.c


示例2: c

void CDataOut::R_Out()
{
  int ii=0;
  RFile<<"library(rgl) "<<endl;
  RFile<<"rgl.clear(\"all\")"<<endl;
  RFile<<"rgl.bg(sphere = TRUE, color = c(\"black\", \"green\"), lit = FALSE, size=2, alpha=0.2, back = \"lines\")"<<endl;
  RFile<<"rgl.light()"<<endl;
  RFile<<"rgl.bbox()"<<endl;
  CvMat *temp2,*temp3;
  temp2=cvCreateMat(3,6,CV_32FC1);
  temp3=cvCreateMat(3,3,CV_32FC1);
  	    CvMat* m = cvCreateMat(3,1,CV_32FC1);
  CvMat *temp,*jtemp;
  temp=cvCreateMatHeader(6,6,CV_32FC1);
  jtemp=cvCreateMat(3,6,CV_32FC1);
  for ( list<CElempunto*>::iterator It=pEstimator->pMap->bbdd.begin();
	It != pEstimator->pMap->bbdd.end(); It++ )
    {
      if((*It)->state>2)
	{
	  float s_th=sin((*It)->theta);
	  float c_th=cos((*It)->theta);
	  float s_ph=sin((*It)->phi);
	  float c_ph=cos((*It)->phi);

	  cvmSet(m,0,0,sin((*It)->phi));
	  cvmSet(m,1,0,-sin((*It)->theta));
	  cvmSet(m,2,0,cos((*It)->phi));
	  cvNormalize( m, m);

	  float xreal=(*It)->wx +cvmGet(m,0,0)/(*It)->rho;
	  float yreal=(*It)->wy +cvmGet(m,1,0)/(*It)->rho;
	  float zreal=(*It)->wz +cvmGet(m,2,0)/(*It)->rho;


	  cvZero(jtemp);
	  cvmSet(jtemp,0,0,1);
	  cvmSet(jtemp,1,1,1);
	  cvmSet(jtemp,2,2,1);

	  cvmSet(jtemp,0,3,(-s_th*s_ph)/(*It)->rho);
	  cvmSet(jtemp,1,3,(-c_th     )/(*It)->rho);
	  cvmSet(jtemp,2,3,(-s_th*c_ph)/(*It)->rho);

	  cvmSet(jtemp,0,4,(c_th*c_ph)/(*It)->rho);
	  cvmSet(jtemp,1,4,(0    )/(*It)->rho);
	  cvmSet(jtemp,2,4,(-c_th*s_ph)/(*It)->rho);

	  cvmSet(jtemp,0,5,(-c_th*s_ph)/((*It)->rho*(*It)->rho));
	  cvmSet(jtemp,1,5,(s_th     )/((*It)->rho*(*It)->rho));
	  cvmSet(jtemp,2,5,(-c_th*c_ph)/((*It)->rho*(*It)->rho));

	  if (12+ii*6< pEstimator->pCovMat->width && 12+ii*6< pEstimator->pCovMat->height)
	    {

	      cvGetSubRect( pEstimator->pCovMat,temp,cvRect(12+ii*6,12+ii*6,6,6) );
	      cvMatMul(jtemp,temp,temp2);
	      cvGEMM( temp2,jtemp,1,NULL,0,temp3,CV_GEMM_B_T );

	      RFile<<"p"<<ii<< " <- matrix(c(" ;
	      for (int i=0; i<2 ; i++)
		for (int j=0; j<3;j++)
		  {
		    RFile<<cvmGet(temp3,i,j)<<",";
		  }
	      RFile<<cvmGet(temp3,2,0)<<",";
	      RFile<<cvmGet(temp3,2,1)<<",";
	      RFile<<cvmGet(temp3,2,2);
	      RFile<<"),3,3)"<<endl;
	      RFile<<"pos <- c("<<xreal<<", ";
	      RFile<<yreal<<", ";
	      RFile<<zreal<<")"<<endl;
	      RFile<<"try(plot3d( ellipse3d(p"<<ii<<",centre=";
	      RFile<<"pos), col=\"blue\", alpha=0.5, add = TRUE) )"<<endl;
	    }
	}
      ii++;
    }
  RFile<<"p"<<ii<< " <- matrix(c(" ;
  for (int i=0; i<2 ; i++)
    for (int j=0; j<3;j++)
      {
	RFile<<cvmGet(pEstimator->pCovMat,2*i,2*j)<<",";
      }
   RFile<<cvmGet(pEstimator->pCovMat,4,0)<<",";
   RFile<<cvmGet(pEstimator->pCovMat,4,2)<<",";
   RFile<<cvmGet(pEstimator->pCovMat,4,4);
   RFile<<"),3,3)"<<endl;
   RFile<<"pos <- c("<< cvmGet(pEstimator->pDataCam->translation,0,0)<<", ";
   RFile<< cvmGet(pEstimator->pDataCam->translation,1,0)<<", ";
   RFile<< cvmGet(pEstimator->pDataCam->translation,2,0)<<") "<<endl;
   RFile<<"plot3d( ellipse3d(p"<<ii<<",centre=";
   RFile<<"pos), col=\"red\", alpha=0.5, add = TRUE) "<<endl;
   RFile<<"rgl.viewpoint(45,30)"<<endl;
   RFile<<"rgl.snapshot(\"c:\\\\out"<<iter<<".png\")"<<endl;
   //RFile.close();

   cvReleaseMat(&temp);
   cvReleaseMat(&temp2);
   cvReleaseMat(&temp3);
//.........这里部分代码省略.........
开发者ID:dirmich,项目名称:3dslam,代码行数:101,代码来源:dataout.cpp


示例3: main

int main(int argc, char* argv[]){
    IplImage *frame, *frameL, *frameR;

    //load image
    //images are loaded by openCV with color channels in this order: BGR
    frame = cvLoadImage(argv[1], 1);

    //Simple error handling
    if(!frame){
       printf("Erro ao abrir a imagem.");
       exit(-1);
    }

    //Verify if image have width divisible by 2
    if(frame->width % 2 != 0){
          printf("Imagem possui largura não divisível por 2. Favor cortar!");
          exit(-1);
    }

    //get width and height from original image
    int width = frame->width;
    int height = frame->height;


    //new images will have half width of original image
    CvSize size = cvSize( width/2, height);

    //copy image properties
    frameL = cvCreateImage(size, frame->depth, frame->nChannels);
    frameR = cvCreateImage(size, frame->depth, frame->nChannels);
    cvZero(frameL);
    cvZero(frameR);

    //set ROI to cut the image on the left (half of the original)
    cvSetImageROI(frame, cvRect(0,0, width/2, height));

    //copy image portion
    cvCopy(frame, frameL);

    //reset ROI
    cvResetImageROI(frame);

    //set ROI to cut the image on the right (second half of the original)
    cvSetImageROI(frame, cvRect(width/2, 0, width/2, height));

    //copy image portion
    cvCopy(frame, frameR);

    //reset ROI
    cvResetImageROI(frame);

    //save images
    cvSaveImage("frameLeft.bmp", frameL);
    cvSaveImage("frameRight.bmp", frameR);

    //create anaglyph
    createAnaglyph(frameL, frameR);

    //free pointers
    cvReleaseImage(&frame);
    cvReleaseImage(&frameL);
    cvReleaseImage(&frameR);

    printf("\n\nImagens separadas com sucesso!\n\n");

    return 0;
}
开发者ID:andrecurvello,项目名称:tests-zingarelli,代码行数:67,代码来源:SBS-to-anaglyph-image.cpp


示例4: process_image_common

static void process_image_common(IplImage *frame)
{
  CvFont font;
  cvInitFont(&font, CV_FONT_VECTOR0, 0.25f, 0.25f);

  CvSize video_size;
#if defined(USE_POSIX_SHARED_MEMORY)
  video_size.height = *shrd_ptr_height;
  video_size.width  = *shrd_ptr_width;
#else
  // XXX These parameters should be set ROS parameters
  video_size.height = frame->height;
  video_size.width  = frame->width;
#endif
  CvSize    frame_size = cvSize(video_size.width, video_size.height/2);
  IplImage *temp_frame = cvCreateImage(frame_size, IPL_DEPTH_8U, 3);
  IplImage *gray       = cvCreateImage(frame_size, IPL_DEPTH_8U, 1);
  IplImage *edges      = cvCreateImage(frame_size, IPL_DEPTH_8U, 1);
  IplImage *half_frame = cvCreateImage(cvSize(video_size.width/2, video_size.height/2), IPL_DEPTH_8U, 3);

  CvMemStorage *houghStorage = cvCreateMemStorage(0);

  cvPyrDown(frame, half_frame, CV_GAUSSIAN_5x5); // Reduce the image by 2

  /* we're intersted only in road below horizont - so crop top image portion off */
  crop(frame, temp_frame, cvRect(0, frame_size.height, frame_size.width, frame_size.height));
  cvCvtColor(temp_frame, gray, CV_BGR2GRAY); // contert to grayscale

  /* Perform a Gaussian blur & detect edges */
  // smoothing image more strong than original program
  cvSmooth(gray, gray, CV_GAUSSIAN, 15, 15);
  cvCanny(gray, edges, CANNY_MIN_TRESHOLD, CANNY_MAX_TRESHOLD);

  /* do Hough transform to find lanes */
  double rho = 1;
  double theta = CV_PI/180;
  CvSeq *lines = cvHoughLines2(edges, houghStorage, CV_HOUGH_PROBABILISTIC,
                               rho, theta, HOUGH_TRESHOLD, HOUGH_MIN_LINE_LENGTH, HOUGH_MAX_LINE_GAP);

  processLanes(lines, edges, temp_frame, frame);

#ifdef SHOW_DETAIL
  /* show middle line */
  cvLine(temp_frame, cvPoint(frame_size.width/2, 0),
         cvPoint(frame_size.width/2, frame_size.height), CV_RGB(255, 255, 0), 1);

  // cvShowImage("Gray", gray);
  // cvShowImage("Edges", edges);
  // cvShowImage("Color", temp_frame);
  // cvShowImage("temp_frame", temp_frame);
  // cvShowImage("frame", frame);
#endif

#if defined(USE_POSIX_SHARED_MEMORY)
  setImage_toSHM(frame);
#endif

#ifdef SHOW_DETAIL
  // cvMoveWindow("Gray", 0, 0);
  // cvMoveWindow("Edges", 0, frame_size.height+25);
  // cvMoveWindow("Color", 0, 2*(frame_size.height+25));
#endif

  cvReleaseMemStorage(&houghStorage);
  cvReleaseImage(&gray);
  cvReleaseImage(&edges);
  cvReleaseImage(&temp_frame);
  cvReleaseImage(&half_frame);
}
开发者ID:Keerecles,项目名称:Autoware,代码行数:69,代码来源:lane_detector.cpp


示例5: main

int main(int argc, char **argv)
{
    // Initialize ROS
    ros::init(argc, argv, "ic2020_vodom");
    ros::NodeHandle n;   

    ros::Subscriber surf_sub = n.subscribe("/surf/keyframes", 5, optflowCallback);
    ros::Publisher vodom_pub = n.advertise<ic2020_vodom::keyframe>("/vodom/keyframes", 100);    

    // Wait for video streams to be up
    BlockWhileWaitingForVideo();  

    // Create display images
	view_im = cvCreateImage( cvSize(2*newArrival->width, newArrival->height), 8, IPL_PXL_BYTES );

	#ifdef VISUALIZE
	cvNamedWindow("VisualOdom", CV_WINDOW_AUTOSIZE);
	#endif

    // Main loop
    printf("Entering main loop\n");

    while (ros::ok())
    {
        char c = cvWaitKey(5);
        if (c == 'Q' || c == 'q')
            break; 

        // Get Images
        ros::spinOnce();

        // Check if new keyframe is available
        if (newArrival == NULL) { continue; }
        printf ("\33[2J");
        
        // Rotate in new data
        RotateNewArrivalIn();
        
        /**********************************
            Check we have two keyframes
        ***********************************/
        if (kA == 0 || kB == 0) { continue; }
        
        printf("Keyframe A: %i\n", kA->keyframe_num);
        printf("Keyframe B: %i\n", kB->keyframe_num);
        
        // COPY IMAGE DATA TO DOUBLE SIZE IMAGE
        cvSetImageROI( view_im, cvRect(0, 0, kB->im->width, kB->im->height));
        cvCopy( kB->im, view_im );
        cvSetImageROI( view_im, cvRect(kB->im->width, 0, kA->im->width, kA->im->height));
        cvCopy( kA->im, view_im );
        cvResetImageROI( view_im );

        // DRAW RED CIRCLES ON FEATURES
        for (unsigned int i = 0; i < kB->features.size(); i++) {
            cvCircle(view_im, cvPoint(cvRound(kB->features[i].point2D[0]),
                     cvRound(kB->features[i].point2D[1])), 3.0f, colors[0], 2, 8);
        }
        for (unsigned int i = 0; i < kA->features.size(); i++) {
            cvCircle(view_im, cvPoint(cvRound(kA->features[i].point2D[0]) + kB->im->width, 
                     cvRound(kA->features[i].point2D[1])), 3.0f, colors[0], 2, 8);              
        }
        for (unsigned int i = 0; i < kB->numCorn1; i++) {
            cvCircle(view_im, cvPoint(cvRound(kB->corn1[i].point2D[0]),
                     cvRound(kB->corn1[i].point2D[1])), 3.0f, colors[1], 1, 8);
        }
        for (unsigned int i = 0; i < kA->numCorn2; i++) {
            cvCircle(view_im, cvPoint(cvRound(kA->corn2[i].point2D[0]) + kB->im->width, 
                     cvRound(kA->corn2[i].point2D[1])), 3.0f, colors[1], 1, 8);              
        }
        
        /**********************************
          Initial RANSAC w SURF and STCorn
        ***********************************/
        
        // GET SURF PAIRS
        tt = (double)cvGetTickCount();
        std::vector<unsigned int> pairs;
        SURFHelper::findSURFPairs(&kA->descBuffer, &kB->descBuffer, pairs);
        tt = (double)cvGetTickCount() - tt;
        //printf( "SURF Match Time = %gms\n", tt/(cvGetTickFrequency()*1000.));        
        printf( "Found %i SURF Matches \n", pairs.size()/2);
        
        // RANSAC
        std::vector<unsigned int> filtered_surf_pairs;
        std::vector<unsigned int> filtered_corn_pairs;
        tt = (double)cvGetTickCount();
        if (kA->numCorn2 == kB->numCorn1) {
            if (!VisualOdometry::RANSAC6DFast(&kA->features, &kB->features, &pairs, &filtered_surf_pairs,
                                          &kA->corn2[0], &kB->corn1[0], &kB->status[0], kB->numCorn1, &filtered_corn_pairs,
                                          kB->im->width, kB->im->height, 10, 10, 1)) 
            //if (!VisualOdometry::RANSAC6D(&kA->features, &kB->features, &pairs, &filtered_surf_pairs,
            //                              &kA->corn2[0], &kB->corn1[0], &kB->status[0], kB->numCorn1, &filtered_corn_pairs)) 
            //if (!VisualOdometry::RANSAC6DReproj(&kA->features, &kB->features, &pairs, &filtered_surf_pairs))
            {
                printf("RANSAC MATCHES FEATURE # AREN'T EQUAL OR LESS THAN 7 FEATURES \n");
                continue;
            }
        } else {
            printf("WTF KEYFRAME A's FORWARD ST FEATURES != KEYFRAME B's BACK ST FEATURES \n");
//.........这里部分代码省略.........
开发者ID:mlab-upenn,项目名称:HAWK-basestation,代码行数:101,代码来源:ic2020_vodom.cpp


示例6: getFeaturePyramid

/*
// Getting feature pyramid  
//
// API
// int getFeaturePyramid(IplImage * image, const filterObject **all_F, 
                      const int n_f,
                      const int lambda, const int k, 
                      const int startX, const int startY, 
                      const int W, const int H, featurePyramid **maps);
// INPUT
// image             - image
// lambda            - resize scale
// k                 - size of cells
// startX            - X coordinate of the image rectangle to search
// startY            - Y coordinate of the image rectangle to search
// W                 - width of the image rectangle to search
// H                 - height of the image rectangle to search
// OUTPUT
// maps              - feature maps for all levels
// RESULT
// Error status
*/
int getFeaturePyramid(IplImage * image,
                      const int lambda, const int k, 
                      const int startX, const int startY, 
                      const int W, const int H, CvLSVMFeaturePyramid **maps)
{
    IplImage *img2, *imgTmp, *imgResize;
    float   step, tmp;
    int      cntStep;
    int      maxcall;
    int i;
    int err;
    CvLSVMFeatureMap *map;
    
    //geting subimage
    cvSetImageROI(image, cvRect(startX, startY, W, H));
    img2 = cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
    cvCopy(image, img2, NULL);
    cvResetImageROI(image);

    if(img2->depth != IPL_DEPTH_32F)
    {
        imgResize = cvCreateImage(cvSize(img2->width , img2->height) , IPL_DEPTH_32F , 3);
        cvConvert(img2, imgResize);
    }
    else
    {
        imgResize = img2;
    }
    
    step = powf(2.0f, 1.0f/ ((float)lambda));
    maxcall = W/k;
    if( maxcall > H/k )
    {
        maxcall = H/k;
    }
    cntStep = (int)(logf((float)maxcall/(5.0f))/logf(step)) + 1;
    //printf("Count step: %f %d\n", step, cntStep);

    allocFeaturePyramidObject(maps, lambda, cntStep + lambda);

    for(i = 0; i < lambda; i++)
    {
        tmp = 1.0f / powf(step, (float)i);
        imgTmp = resize_opencv (imgResize, tmp);
        //imgTmp = resize_article_dp(img2, tmp, 4);
        err = getFeatureMaps_dp(imgTmp, 4, &map);
        err = normalizationAndTruncationFeatureMaps(map, 0.2f);
        err = PCAFeatureMaps(map);
        (*maps)->pyramid[i] = map;
        //printf("%d, %d\n", map->sizeY, map->sizeX);
        cvReleaseImage(&imgTmp);
    }

    /**********************************one**************/
    for(i = 0; i <  cntStep; i++)
    {
        tmp = 1.0f / powf(step, (float)i);
        imgTmp = resize_opencv (imgResize, tmp);
        //imgTmp = resize_article_dp(imgResize, tmp, 8);
	    err = getFeatureMaps_dp(imgTmp, 8, &map);
        err = normalizationAndTruncationFeatureMaps(map, 0.2f);
        err = PCAFeatureMaps(map);
        (*maps)->pyramid[i + lambda] = map;
        //printf("%d, %d\n", map->sizeY, map->sizeX);
		cvReleaseImage(&imgTmp);
    }/*for(i = 0; i < cntStep; i++)*/

    if(img2->depth != IPL_DEPTH_32F)
    {
        cvReleaseImage(&imgResize);
    }

    cvReleaseImage(&img2);
    return LATENT_SVM_OK;
}
开发者ID:SCS-B3C,项目名称:OpenCV2-2,代码行数:97,代码来源:featurepyramid.cpp


示例7: operateImage


//.........这里部分代码省略.........
		cvCvtColor(image1, tmp3d, CV_BGR2HSV);
		cvSplit(tmp3d, NULL, tmp1d, NULL, NULL);
		break;
	case 3: // Brightness mode
		COND_PRINTF("Brightness");
		cvCvtColor(image1, tmp3d, CV_BGR2HSV);
		cvSplit(tmp3d, NULL, NULL, tmp1d, NULL);
		break;
	case 4: // 
		COND_PRINTF("Color");
		img_full_channel = 1;
		break;
	}

	//filterByVolume(tmp1d, tmp1d, value);
	if (img_full_channel) { // Image has 3 channel
#if 0
		cvRunningAvg(image1, backgroundAcc, (double)userdata->accValue / 1024, NULL);
		cvConvertScale(backgroundAcc, background, 1, 0);
		cvNamedWindow(CONTROL_WINDOW "41", 0);
		cvResizeWindow(CONTROL_WINDOW "41", 640 / 2, 480 / 2);
		cvShowImage(CONTROL_WINDOW "41", background);
		cvCreateTrackbar("accValue", CONTROL_WINDOW "41", &(userdata->accValue), 1024, trackbarCallback);

#endif
		filterByHSV(image1, minScalar, maxScalar, tmp3d);
		if (history_mode) {
			cvCopy(image1, tmp3d, NULL);
			cvCopy(image1, tmp3d2, NULL);
			//cvCvtColor(image1, tmp3d, CV_BGR2HSV);

			//CvRect rect = cvRect(userdata->size.width * 3 / 4 - 40, userdata->size.height / 2 - 40, 80, 80);
			//CvRect rect = cvRect(userdata->size.width * 1 / 4 - 40, userdata->size.height / 2 - 40, userdata->size.width * 3 / 4, 80);
			CvRect rect = cvRect(userdata->square.origin.x, userdata->square.origin.y, userdata->square.size.width, userdata->square.size.height);
			cvSetImageROI(tmp3d, rect);
			GraficarHistograma(tmp3d, tmp3d2);
			cvResetImageROI(tmp3d);

			cvCopy(tmp3d2, tmp3d, NULL);
		}
		else {
			cvCopy(image1, tmp3d, NULL);
		}
	}
	else { // Image has 1 channel

		cvSmooth(tmp1d, tmp1d, CV_GAUSSIAN, 5, 0, 0, 0);

		if (otsu_mode) { // Apply Otsu's method
			COND_PRINTF(", Otsu");
			cvThreshold(tmp1d, tmp1d, 0, 255, CV_THRESH_OTSU);
		}

		if (smooth_mode) { // Apply Gaussian smoothing
			COND_PRINTF(", Gauss");
			cvSmooth(tmp1d, tmp1d, CV_GAUSSIAN, 5, 0, 0, 0);
		}

		if (close_mode) {
			COND_PRINTF(", closE");
			int n = kernelSize;
			cvErode(tmp1d, tmp1d, NULL, n);
			cvDilate(tmp1d, tmp1d, NULL, n);
		}

		if (canny_mode) { // Apply Canny's method
开发者ID:changeyourdestiny,项目名称:DIP,代码行数:67,代码来源:operateImage.cpp


示例8: FindOneWayDescriptor

void FindOneWayDescriptor(cv::flann::Index* m_pca_descriptors_tree, CvSize patch_size, int m_pca_dim_low, int m_pose_count, IplImage* patch, int& desc_idx, int& pose_idx, float& distance,
    CvMat* avg, CvMat* eigenvectors)
{
    desc_idx = -1;
    pose_idx = -1;
    distance = 1e10;
//--------
	//PCA_coeffs precalculating
	CvMat* pca_coeffs = cvCreateMat(1, m_pca_dim_low, CV_32FC1);
	int patch_width = patch_size.width;
	int patch_height = patch_size.height;
	//if (avg)
	//{
		CvRect _roi = cvGetImageROI((IplImage*)patch);
		IplImage* test_img = cvCreateImage(cvSize(patch_width,patch_height), IPL_DEPTH_8U, 1);
		if(_roi.width != patch_width|| _roi.height != patch_height)
		{

			cvResize(patch, test_img);
			_roi = cvGetImageROI(test_img);
		}
		else
		{
			cvCopy(patch,test_img);
		}
		IplImage* patch_32f = cvCreateImage(cvSize(_roi.width, _roi.height), IPL_DEPTH_32F, 1);
		float sum = cvSum(test_img).val[0];
		cvConvertScale(test_img, patch_32f, 1.0f/sum);

		//ProjectPCASample(patch_32f, avg, eigenvectors, pca_coeffs);
		//Projecting PCA
		CvMat* patch_mat = ConvertImageToMatrix(patch_32f);
		CvMat* temp = cvCreateMat(1, eigenvectors->cols, CV_32FC1);
		cvProjectPCA(patch_mat, avg, eigenvectors, temp);
		CvMat temp1;
		cvGetSubRect(temp, &temp1, cvRect(0, 0, pca_coeffs->cols, 1));
		cvCopy(&temp1, pca_coeffs);
		cvReleaseMat(&temp);
		cvReleaseMat(&patch_mat);
		//End of projecting

		cvReleaseImage(&patch_32f);
		cvReleaseImage(&test_img);
//	}

//--------

		//float* target = new float[m_pca_dim_low];
		//::flann::KNNResultSet res(1,pca_coeffs->data.fl,m_pca_dim_low);
		//::flann::SearchParams params;
		//params.checks = -1;

		//int maxDepth = 1000000;
		//int neighbors_count = 1;
		//int* neighborsIdx = new int[neighbors_count];
		//float* distances = new float[neighbors_count];
		//if (m_pca_descriptors_tree->findNearest(pca_coeffs->data.fl,neighbors_count,maxDepth,neighborsIdx,0,distances) > 0)
		//{
		//	desc_idx = neighborsIdx[0] / m_pose_count;
		//	pose_idx = neighborsIdx[0] % m_pose_count;
		//	distance = distances[0];
		//}
		//delete[] neighborsIdx;
		//delete[] distances;

		cv::Mat m_object(1, m_pca_dim_low, CV_32F);
		cv::Mat m_indices(1, 1, CV_32S);
		cv::Mat m_dists(1, 1, CV_32F);

		float* object_ptr = m_object.ptr<float>(0);
		for (int i=0;i<m_pca_dim_low;i++)
		{
			object_ptr[i] = pca_coeffs->data.fl[i];
		}

		m_pca_descriptors_tree->knnSearch(m_object, m_indices, m_dists, 1, cv::flann::SearchParams(-1) );

		desc_idx = ((int*)(m_indices.ptr<int>(0)))[0] / m_pose_count;
		pose_idx = ((int*)(m_indices.ptr<int>(0)))[0] % m_pose_count;
		distance = ((float*)(m_dists.ptr<float>(0)))[0];

	//	delete[] target;


//    for(int i = 0; i < desc_count; i++)
//    {
//        int _pose_idx = -1;
//        float _distance = 0;
//
//#if 0
//        descriptors[i].EstimatePose(patch, _pose_idx, _distance);
//#else
//		if (!avg)
//		{
//			descriptors[i].EstimatePosePCA(patch, _pose_idx, _distance, avg, eigenvectors);
//		}
//		else
//		{
//			descriptors[i].EstimatePosePCA(pca_coeffs, _pose_idx, _distance, avg, eigenvectors);
//		}
//.........这里部分代码省略.........
开发者ID:PR2,项目名称:pr2_plugs,代码行数:101,代码来源:one_way_descriptor.cpp


示例9: get_regions

/**
 * @brief Permite al usuario interactivamente seleccionar un objeto
 * @param regions Guarda los rectángulos que definen a cada objeto
 * @param MAX_OBJECTS Número máximo permitido de objetos a rastrear
 * @param argv Uso el nombre del video para poder leer el correspondiente fichero de regiones por defecto
 * @return El número de objetos seleccionados por el usuario (<= MAX_OBJECTS)
 */
int get_regions(CvRect **regions, int MAX_OBJECTS, char *argv ) {
	
	FILE *fich;
	char name[50], *p1, *p2;
	params p;
	CvRect* r;
	int x1, y1, x2, y2, w, h;
	
	// Si hay que leer desde fichero las regiones...
	if(MAX_OBJECTS > 0) {
		p.n = MAX_OBJECTS;

		strcpy( name, REGION_IN);
		p1 = strrchr( &argv[1], '/' );
		p2 = strrchr( &argv[1], '.' );
		strncat( name, (++p1), p2-p1 );
		strcat( name, "txt" );
		fich = fopen( name, "r" );
		if( ! fich ) {
			strcpy( name, REGION_IN);
			p1 = strrchr( &argv[1], '/' );
			p2 = strrchr( &argv[1], '.' );
			strncat( name, (++p1), (++p2)-p1 );
			strcat( name, "txt" );
			fich = fopen( name, "r" );
			if( ! fich ) {
				printf("Error leyendo las regiones iniciales\n");
				exit (-1);
			}
		}

		p.loc1 = std::vector<CvPoint>(MAX_OBJECTS);
		p.loc2 = std::vector<CvPoint>(MAX_OBJECTS);
		for( int i = 0; i < MAX_OBJECTS; ++i ) {
			int leidos = fscanf(fich, "%d", &p.loc1[i].x);
			leidos = fscanf(fich, "%d", &p.loc1[i].y);
			leidos = fscanf(fich, "%d", &p.loc2[i].x);
			leidos = fscanf(fich, "%d", &p.loc2[i].y);
		}
		fclose( fich );
	}

	// Si hay que seleccionarlas con el ratón...
	else {
		fprintf( stderr, "Selecciona la región a rastrear\n" );
		p.n = 0;
		cvNamedWindow( win_name, CV_WINDOW_AUTOSIZE );
		cvShowImage( win_name, first_frame );
		cvSetMouseCallback( win_name, &mouse, &p );
		cvWaitKey( 0 );
		cvDestroyWindow( win_name );
		if( p.n == 0 )
			return 0;
	}
	
	// Reservo espacio para la lista de regiones
	r = (CvRect*) malloc( p.n * sizeof( CvRect ) );

	for( int i = 0; i < p.n; ++i ) {
		x1 = MIN( p.loc1[i].x, p.loc2[i].x );
		x2 = MAX( p.loc1[i].x, p.loc2[i].x );
		y1 = MIN( p.loc1[i].y, p.loc2[i].y );
		y2 = MAX( p.loc1[i].y, p.loc2[i].y );
		w = x2 - x1;
		h = y2 - y1;
		
		//printf("%d %d %d %d\n", x1, y1, x2, y2);
		// Me aseguro que la altura y anchura es par
		w = ( w % 2 )? w : w+1;
		h = ( h % 2 )? h : h+1;
		r[i] = cvRect( x1, y1, w, h );
	}
	*regions = r;
	return p.n;
}
开发者ID:rotty11,项目名称:Tfm,代码行数:82,代码来源:trackerStr1.cpp


示例10: main

int main(int argc, char * argv[])
{
	if(argc < 2)
	{
		showUsage();
	}

	bool inv = false;
	for(int i=1; i<argc-1; ++i)
	{
		if(strcmp(argv[i], "-inv") == 0)
		{
			inv = true;
			printf(" Inversing option activated...\n");
			continue;
		}
		showUsage();
		printf(" Not recognized option: \"%s\"\n", argv[i]);
	}

	std::string path = argv[argc-1];
	printf(" Path = %s\n", path.c_str());

	UDirectory dir(path, "jpg bmp png tiff jpeg");
	if(!dir.isValid())
	{
		printf("Path invalid!\n");
		exit(-1);
	}

	std::string targetDirectory = path+"_joined";
	UDirectory::makeDir(targetDirectory);
	printf(" Creating directory \"%s\"\n", targetDirectory.c_str());


	std::string fileNameA = dir.getNextFilePath();
	std::string fileNameB = dir.getNextFilePath();

	int i=1;
	while(!fileNameA.empty() && !fileNameB.empty())
	{
		if(inv)
		{
			std::string tmp = fileNameA;
			fileNameA = fileNameB;
			fileNameB = tmp;
		}

		std::string ext = UFile::getExtension(fileNameA);

		std::string targetFilePath = targetDirectory+UDirectory::separator()+uNumber2Str(i++)+"."+ext;

		IplImage * imageA = cvLoadImage(fileNameA.c_str(), CV_LOAD_IMAGE_COLOR);
		IplImage * imageB = cvLoadImage(fileNameB.c_str(), CV_LOAD_IMAGE_COLOR);

		fileNameA.clear();
		fileNameB.clear();

		if(imageA && imageB)
		{
			CvSize sizeA = cvGetSize(imageA);
			CvSize sizeB = cvGetSize(imageB);
			CvSize targetSize = {0};
			targetSize.width = sizeA.width + sizeB.width;
			targetSize.height = sizeA.height > sizeB.height ? sizeA.height : sizeB.height;
			IplImage* targetImage = cvCreateImage(targetSize, imageA->depth, imageA->nChannels);
			if(targetImage)
			{
				cvSetImageROI( targetImage, cvRect( 0, 0, sizeA.width, sizeA.height ) );
				cvCopy( imageA, targetImage );
				cvSetImageROI( targetImage, cvRect( sizeA.width, 0, sizeB.width, sizeB.height ) );
				cvCopy( imageB, targetImage );
				cvResetImageROI( targetImage );

				if(!cvSaveImage(targetFilePath.c_str(), targetImage))
				{
					printf("Error : saving to \"%s\" goes wrong...\n", targetFilePath.c_str());
				}
				else
				{
					printf("Saved \"%s\" \n", targetFilePath.c_str());
				}

				cvReleaseImage(&targetImage);

				fileNameA = dir.getNextFilePath();
				fileNameB = dir.getNextFilePath();
			}
			else
			{
				printf("Error : can't allocated the target image with size (%d,%d)\n", targetSize.width, targetSize.height);
			}
		}
		else
		{
			printf("Error: loading images failed!\n");
		}

		if(imageA)
		{
//.........这里部分代码省略.........
开发者ID:FNicolai,项目名称:rtabmap,代码行数:101,代码来源:main.cpp


示例11: CVAPI

{
    CvSize size;
    int t;
    uchar cbBounds[3];
    uchar modMin[3];
    uchar modMax[3];
    CvBGCodeBookElem** cbmap;
    CvMemStorage* storage;
    CvBGCodeBookElem* freeList;
} CvBGCodeBookModel;

CVAPI(CvBGCodeBookModel*) cvCreateBGCodeBookModel();
CVAPI(void) cvReleaseBGCodeBookModel( CvBGCodeBookModel** model );

CVAPI(void) cvBGCodeBookUpdate( CvBGCodeBookModel* model, const CvArr* image,
                                CvRect roi CV_DEFAULT(cvRect(0,0,0,0)),
                                const CvArr* mask CV_DEFAULT(0) );

CVAPI(int) cvBGCodeBookDiff( const CvBGCodeBookModel* model, const CvArr* image,
                             CvArr* fgmask, CvRect roi CV_DEFAULT(cvRect(0,0,0,0)) );

CVAPI(void) cvBGCodeBookClearStale( CvBGCodeBookModel* model, int staleThresh,
                                    CvRect roi CV_DEFAULT(cvRect(0,0,0,0)),
                                    const CvArr* mask CV_DEFAULT(0) );

CVAPI(CvSeq*) cvSegmentFGMask( CvArr *fgmask, int poly1Hull0 CV_DEFAULT(1),
                               float perimScale CV_DEFAULT(4.f),
                               CvMemStorage* storage CV_DEFAULT(0),
                               CvPoint offset CV_DEFAULT(cvPoint(0,0)));

#ifdef __cplusplus
开发者ID:RavishankarDuMCA10,项目名称:cubgs,代码行数:31,代码来源:background_segm.hpp


示例12: cvFindBlobsByCCClasters

void cvFindBlobsByCCClasters(IplImage* pFG, CvBlobSeq* pBlobs, CvMemStorage* storage)
{   /* Create contours: */
    IplImage*       pIB = NULL;
    CvSeq*          cnt = NULL;
    CvSeq*          cnt_list = cvCreateSeq(0,sizeof(CvSeq),sizeof(CvSeq*), storage );
    CvSeq*          clasters = NULL;
    int             claster_cur, claster_num;

    pIB = cvCloneImage(pFG);
    cvThreshold(pIB,pIB,128,255,CV_THRESH_BINARY);
    cvFindContours(pIB,storage, &cnt, sizeof(CvContour), CV_RETR_EXTERNAL);
    cvReleaseImage(&pIB);

    /* Create cnt_list.      */
    /* Process each contour: */
    for(; cnt; cnt=cnt->h_next)
    {
        cvSeqPush( cnt_list, &cnt);
    }

    claster_num = cvSeqPartition( cnt_list, storage, &clasters, CompareContour, NULL );

    for(claster_cur=0; claster_cur<claster_num; ++claster_cur)
    {
        int         cnt_cur;
        CvBlob      NewBlob;
        double      M00,X,Y,XX,YY; /* image moments */
        CvMoments   m;
        CvRect      rect_res = cvRect(-1,-1,-1,-1);
        CvMat       mat;

        for(cnt_cur=0; cnt_cur<clasters->total; ++cnt_cur)
        {
            CvRect  rect;
            CvSeq*  cnt;
            int k = *(int*)cvGetSeqElem( clasters, cnt_cur );
            if(k!=claster_cur) continue;
            cnt = *(CvSeq**)cvGetSeqElem( cnt_list, cnt_cur );
            rect = ((CvContour*)cnt)->rect;

            if(rect_res.height<0)
            {
                rect_res = rect;
            }
            else
            {   /* Unite rects: */
                int x0,x1,y0,y1;
                x0 = MIN(rect_res.x,rect.x);
                y0 = MIN(rect_res.y,rect.y);
                x1 = MAX(rect_res.x+rect_res.width,rect.x+rect.width);
                y1 = MAX(rect_res.y+rect_res.height,rect.y+rect.height);
                rect_res.x = x0;
                rect_res.y = y0;
                rect_res.width = x1-x0;
                rect_res.height = y1-y0;
            }
        }

        if(rect_res.height < 1 || rect_res.width < 1)
        {
            X = 0;
            Y = 0;
            XX = 0;
            YY = 0;
        }
        else
        {
            cvMoments( cvGetSubRect(pFG,&mat,rect_res), &m, 0 );
            M00 = cvGetSpatialMoment( &m, 0, 0 );
            if(M00 <= 0 ) continue;
            X = cvGetSpatialMoment( &m, 1, 0 )/M00;
            Y = cvGetSpatialMoment( &m, 0, 1 )/M00;
            XX = (cvGetSpatialMoment( &m, 2, 0 )/M00) - X*X;
            YY = (cvGetSpatialMoment( &m, 0, 2 )/M00) - Y*Y;
        }
        NewBlob = cvBlob(rect_res.x+(float)X,rect_res.y+(float)Y,(float)(4*sqrt(XX)),(float)(4*sqrt(YY)));
        pBlobs->AddBlob(&NewBlob);

    }   /* Next cluster. */

    #if 0
    {   // Debug info:
        IplImage* pI = cvCreateImage(cvSize(pFG->width,pFG->height),IPL_DEPTH_8U,3);
        cvZero(pI);
        for(claster_cur=0; claster_cur<claster_num; ++claster_cur)
        {
            int         cnt_cur;
            CvScalar    color = CV_RGB(rand()%256,rand()%256,rand()%256);

            for(cnt_cur=0; cnt_cur<clasters->total; ++cnt_cur)
            {
                CvSeq*  cnt;
                int k = *(int*)cvGetSeqElem( clasters, cnt_cur );
                if(k!=claster_cur) continue;
                cnt = *(CvSeq**)cvGetSeqElem( cnt_list, cnt_cur );
                cvDrawContours( pI, cnt, color, color, 0, 1, 8);
            }

            CvBlob* pB = pBlobs->GetBlob(claster_cur);
            int x = cvRound(CV_BLOB_RX(pB)), y = cvRound(CV_BLOB_RY(pB));
//.........这里部分代码省略.........
开发者ID:colombc,项目名称:Sankore-ThirdParty,代码行数:101,代码来源:enteringblobdetection.cpp


示例13: cvSize


//.........这里部分代码省略.........
    }   /*  Analyze blod list to find best blob trajectory. */

    {   /* Delete bad tracks: */
        int i;
        for(i=m_TrackNum-1; i>=0; --i)
        {   /* Delete bad tracks: */
            if(m_TrackSeq[i].pBlobs[0]) continue;
            if(m_TrackNum>0)
                m_TrackSeq[i] = m_TrackSeq[--m_TrackNum];
        }   /* Delete bad tracks: */
    }

#ifdef USE_OBJECT_DETECTOR
    if( m_split_detector && pNewBlobList->GetBlobNum() > 0 )
    {
        int num_new_blobs = pNewBlobList->GetBlobNum();
        int i = 0;

        if( m_roi_seq ) cvClearSeq( m_roi_seq );
        m_debug_blob_seq.Clear();
        for( i = 0; i < num_new_blobs; ++i )
        {
            CvBlob* b = pNewBlobList->GetBlob(i);
            CvMat roi_stub;
            CvMat* roi_mat = 0;
            CvMat* scaled_roi_mat = 0;

            CvDetectedBlob d_b = cvDetectedBlob( CV_BLOB_X(b), CV_BLOB_Y(b), CV_BLOB_WX(b), CV_BLOB_WY(b), 0 );
            m_debug_blob_seq.AddBlob(&d_b);

            float scale = m_param_roi_scale * m_min_window_size.height / CV_BLOB_WY(b);

            float b_width =   MAX(CV_BLOB_WX(b), m_min_window_size.width / scale)
                            + (m_param_roi_scale - 1.0F) * (m_min_window_size.width / scale)
                            + 2.0F * m_max_border / scale;
            float b_height = CV_BLOB_WY(b) * m_param_roi_scale + 2.0F * m_max_border / scale;

            CvRect roi = cvRectIntersection( cvRect( cvFloor(CV_BLOB_X(b) - 0.5F*b_width),
                                                     cvFloor(CV_BLOB_Y(b) - 0.5F*b_height),
                                                     cvCeil(b_width), cvCeil(b_height) ),
                                             cvRect( 0, 0, pImg->width, pImg->height ) );
            if( roi.width <= 0 || roi.height <= 0 )
                continue;

            if( m_roi_seq ) cvSeqPush( m_roi_seq, &roi );

            roi_mat = cvGetSubRect( pImg, &roi_stub, roi );
            scaled_roi_mat = cvCreateMat( cvCeil(scale*roi.height), cvCeil(scale*roi.width), CV_8UC3 );
            cvResize( roi_mat, scaled_roi_mat );

            m_detected_blob_seq.Clear();
            m_split_detector->Detect( scaled_roi_mat, &m_detected_blob_seq );
            cvReleaseMat( &scaled_roi_mat );

            for( int k = 0; k < m_detected_blob_seq.GetBlobNum(); ++k )
            {
                CvDetectedBlob* b = (CvDetectedBlob*) m_detected_blob_seq.GetBlob(k);

                /* scale and shift each detected blob back to the original image coordinates */
                CV_BLOB_X(b) = CV_BLOB_X(b) / scale + roi.x;
                CV_BLOB_Y(b) = CV_BLOB_Y(b) / scale + roi.y;
                CV_BLOB_WX(b) /= scale;
                CV_BLOB_WY(b) /= scale;

                CvDetectedBlob d_b = cvDetectedBlob( CV_BLOB_X(b), CV_BLOB_Y(b), CV_BLOB_WX(b), CV_BLOB_WY(b), 1,
                        b->response );
                m_debug_blob_seq.AddBlob(&d_b);
            }

            if( m_detected_blob_seq.GetBlobNum() > 1 )
            {
                /*
                 * Split blob.
                 * The original blob is replaced by the first detected blob,
                 * remaining detected blobs are added to the end of the sequence:
                 */
                CvBlob* first_b = m_detected_blob_seq.GetBlob(0);
                CV_BLOB_X(b)  = CV_BLOB_X(first_b);  CV_BLOB_Y(b)  = CV_BLOB_Y(first_b);
                CV_BLOB_WX(b) = CV_BLOB_WX(first_b); CV_BLOB_WY(b) = CV_BLOB_WY(first_b);

                for( int j = 1; j < m_detected_blob_seq.GetBlobNum(); ++j )
                {
                    CvBlob* detected_b = m_detected_blob_seq.GetBlob(j);
                    pNewBlobList->AddBlob(detected_b);
                }
            }
        }   /* For each new blob. */

        for( i = 0; i < pNewBlobList->GetBlobNum(); ++i )
        {
            CvBlob* b = pNewBlobList->GetBlob(i);
            CvDetectedBlob d_b = cvDetectedBlob( CV_BLOB_X(b), CV_BLOB_Y(b), CV_BLOB_WX(b), CV_BLOB_WY(b), 2 );
            m_debug_blob_seq.AddBlob(&d_b);
        }
    }   // if( m_split_detector )
#endif

    return result;

}   /* cvDetectNewBlob */
开发者ID:colombc,项目名称:Sankore-ThirdParty,代码行数:101,代码来源:enteringblobdetection.cpp


示例14: GetMinRect

CvRect GetMinRect(IplImage* srcImg, CvScalar bgColor/*=CV_RGB(0,0,0)*/)//返回图像的外包围矩形范围
{
	CvRect roiRc=cvRect(0,0,0,0);
	if(!CV_IS_IMAGE(srcImg) )
	{
		return roiRc;
	}
	bool isGetTop=false,isGetBottom=false,isGetLeft=false,isGetRight=false;//是否得到各边界
	for(int i=0; i<srcImg->height; ++i){
		for(int top=0; top<srcImg->width && !isGetTop; ++top){
			CvScalar piexColor=bgColor;
			for (int k=0; k<srcImg->nChannels; ++k){
				uchar piex=((uchar*)srcImg->imageData + i*srcImg->widthStep)[top*srcImg->nChannels+k];
				piexColor.val[k]=piex;
			}
			if (piexColor.val[0]==bgColor.val[0] &&
				piexColor.val[1]==bgColor.val[1] &&
				piexColor.val[2]==bgColor.val[2] &&
				piexColor.val[3]==bgColor.val[3]){
					continue;
			}else{
				roiRc.y=i;//得到最顶行
				isGetTop=true;
				break;
			}
		}
	}
	for(int i=srcImg->height-1; i>0; --i){
		for(int bottom=0; bottom < srcImg->width && !isGetBottom; ++bottom){
			CvScalar piexColor=bgColor;
			for (int k=0; k<srcImg->nChannels; ++k){
				uchar piex=((uchar*)srcImg->imageData + i*srcImg->widthStep)[bottom*srcImg->nChannels+k];
				piexColor.val[k]=piex;
			}
			if (piexColor.val[0]==bgColor.val[0] &&
				piexColor.val[1]==bgColor.val[1] &&
				piexColor.val[2]==bgColor.val[2] &&
				piexColor.val[3]==bgColor.val[3]){
					continue;
			}else{
				roiRc.height=i;//得到最低行
				isGetBottom=true;
				break;
			}
		}
	}

	for(int i=0; i<srcImg->width; ++i){
		for(int left=0; left<srcImg->height && !isGetLeft; ++left){
			CvScalar piexColor=bgColor;
			for (int k=0; k<srcImg->nChannels; ++k){
				uchar piex=((uchar*)srcImg->imageData + left*srcImg->widthStep)[i*srcImg->nChannels+k];
				piexColor.val[k]=piex;
			}
			if (piexColor.val[0]==bgColor.val[0] &&
				piexColor.val[1]==bgColor.val[1] &&
				piexColor.val[2]==bgColor.val[2] &&
				piexColor.val[3]==bgColor.val[3]){
					continue;
			}else{
				roiRc.x=i;//得到最左列
				isGetLeft=true;
				break;
			}
		}
	}
	for(int i=srcImg->width - 1; i>0; --i)
	{
		for(int right=0; right<srcImg->height && !isGetRight; ++right){
			CvScalar piexColor=bgColor;
			for (int k=0; k<srcImg->nChannels; ++k){
				uchar piex=((uchar*)srcImg->imageData + right*srcImg->widthStep)[i*srcImg->nChannels+k];
				piexColor.val[k]=piex;
			}
			if (piexColor.val[0]==bgColor.val[0] &&
				piexColor.val[1]==bgColor.val[1] &&
				piexColor.val[2]==bgColor.val[2] &&
				piexColor.val[3]==bgColor.val[3]){
					continue;
			}else{
				roiRc.width=i;//得到最右列
				isGetRight=true;
				break;
			}
		}
	}
	if (!isGetLeft || !isGetRight || !isGetTop || !isGetBottom)
	{
		return roiRc;//没找到目标,全部都是背景
	}
	roiRc.width=roiRc.width-roiRc.x+1;
	roiRc.height=roiRc.height-roiRc.y+1;

	if (roiRc.x+roiRc.width>srcImg->width)
	{
		roiRc.width--;
	}
	if (roiRc.y+roiRc.height>srcImg->height)
	{
		roiRc.height--;
//.........这里部分代码省略.........
开发者ID:github188,项目名称:piglets-monitoring,代码行数:101,代码来源:Public.cpp


示例15: symmetry_img

static IplImage*  symmetry_img(IplImage* pImg, int loop)
{
	int T=10;
	int X=0;
	int i=0;
	int j=0;
	IplImage* I=pImg;
	IplImage* IL;
	IplImage* IR;
	 

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ cvReleaseFileStorage函数代码示例发布时间:2022-05-30
下一篇:
C++ cvReadIntByName函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap