• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ cvQueryFrame函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中cvQueryFrame函数的典型用法代码示例。如果您正苦于以下问题:C++ cvQueryFrame函数的具体用法?C++ cvQueryFrame怎么用?C++ cvQueryFrame使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了cvQueryFrame函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: main

int main(){
    Tserial *com;
    char ch;
    com = new Tserial();
    com->connect("COM3", 4800, spNONE);
    CvCapture *capture = 0;
    IplImage  *frame = 0;
    int       key = 0;
    com->sendChar('a');
    int rpx,rpy,ryx,ryy,bx,by,rpx1,rpy1,ryx1,ryy1,bx1,by1;
    double theta1=0.1,theta2=0.1;
    /* initialize camera */
    capture = cvCaptureFromCAM(0);
    cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_WIDTH, 1024 );

    cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_HEIGHT, 720 );

    /* always check */
    if ( !capture ) {
        fprintf( stderr, "Cannot open initialize webcam!\n" );
        return 1;
    }
 
    /* create a window for the video */
    cvNamedWindow( "image", CV_WINDOW_AUTOSIZE );
 
    while( key != 'q' ) {
        /* get a frame */
        img = cvQueryFrame( capture );
      
        /* always check */
        if( !img ) break;
        
        img0=cvCreateImage(cvGetSize(img),IPL_DEPTH_8U,3);
        cvCvtColor(img,img0,CV_BGR2HSV);
        // cvSetMouseCallback( "image", mouseHandler, img0 );
      // cvThreshold(img0, img0, 85, 255, CV_THRESH_BINARY);
       
        /* display curent frame */
        cvShowImage( "image", img0 );
        rpx=corp(img0,1);
        ryx=cory(img0,1);
        bx=corr(img0,1);
        rpy=corp(img0,0);
        ryy=cory(img0,0);
        by=corr(img0,0);
        rpx1=rpx-ryx;
        rpy1=rpx-ryy;
        bx1=bx-ryx;
        by1=by-ryy;
        theta1=atan((double)rpy1/(double)rpx1);
        theta2=atan((double)by1/(double)bx1);
        if(theta1>0 && theta1-theta2>0 && rpx1>0)
        com->sendChar('r');
        else if(theta1<=0 && M_PI+theta1-theta2>0)
        com->sendChar('l');
        else if(theta1<0 && theta2>=0 && rpx<ryx)
        com->sendChar('r');
        else if(theta1>0 && theta1-theta2<0)
        com->sendChar('l');
        else if(theta1>0 && theta1-theta2>0 && rpx1<0)
        com->sendChar('l');
        else if(theta1-theta2==0.0 && rpx1*bx1>0)
        com->sendChar('f');
        else if(theta1-theta2==0.0 && rpx1*bx1<0){
        com->sendChar('r');
        cvWaitKey(5);
        }
        /* exit if user press 'q' */
        key = cvWaitKey( 1 );
        cvReleaseImage(&img0);
        
            }


    /* free memory */
    cvDestroyWindow( "image" );
    cvReleaseCapture( &capture );
    com->disconnect();
    return 0;
}
开发者ID:asachin16,项目名称:IIT-Kanpur,代码行数:81,代码来源:techfest1.cpp


示例2: main

 int main() {
  CvPoint pt1b,pt2b, pt1t,pt2t,ptarry[4];
  int tempwidth,tempheight;
  CvRect regt,rectROIbot,rectROItop;
  rectROItop=cvRect(0,0,80,10);
  rectROIbot=cvRect(0,50,80,10);
  CvPoint b_cir_center,t_cir_center;
  CvPoint frame_center;
  CvPoint A,B,C,D;
  CvPoint temp;
  double angle,spinsize;
  int cir_radius=1; 
  int frame_width=160, frame_height=120;
  IplImage* frame;
  IplImage* threshframe;
  IplImage* hsvframe;
  IplImage* threshframebot;
  IplImage* threshframetop;
  IplImage* modframe;
  IplImage* dilframetop;
  IplImage* dilframebot;
  int release=0, rmax=100;
  int modfheight, modfwidth;
  int_serial();
  unsigned char sendBuf;
   CvCapture* capture = cvCaptureFromCAM( -1 );
   if ( !capture ) {
     fprintf(stderr, "ERROR: capture is NULL \n" );
     getchar();
     return -1;
   }
  cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH,frame_width);// 120x160 
  cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT,frame_height);
  cvNamedWindow( "mywindow", CV_WINDOW_AUTOSIZE );
   while ( 1 ) {
     // Get one frame
      frame = cvQueryFrame( capture );
     if ( !frame ) {
       fprintf( stderr, "ERROR: frame is null...\n" );
       getchar();
       break;
     }

     modfheight = frame->height;
     modfwidth = frame->width;
      modframe = cvCreateImage(cvSize((int)(modfwidth/moddiv),(int)(modfheight/moddiv)),frame->depth,frame->nChannels); //cvCreateImage(size of frame, depth, noofchannels)
     cvResize(frame, modframe,CV_INTER_LINEAR);
      hsvframe = cvCreateImage(cvGetSize(modframe),8, 3);
     cvCvtColor(modframe, hsvframe, CV_BGR2HSV); //cvCvtColor(input frame,outputframe,method)
      threshframe = cvCreateImage(cvGetSize(hsvframe),8,1);
    // cvInRangeS(hsvframe,cvScalar(0, 180, 140),cvScalar(15, 230, 235),threshframe); //cvInRangeS(input frame, cvScalar(min range),cvScalar(max range),output frame) red
     cvInRangeS(hsvframe,cvScalar(100, 20, 40),cvScalar(140, 120, 100),threshframe); //cvInRangeS(input frame, cvScalar(min range),cvScalar(max range),output frame)
     
      threshframebot=cvCloneImage(threshframe);
      cvSetImageROI(threshframebot,rectROIbot);

      threshframetop=cvCloneImage(threshframe);
      cvSetImageROI(threshframetop,rectROItop);
 
//////////////////////////////////////////////////////////////////////////////////////////
   if (seq==0) {
     threshframebot=cvCloneImage(threshframe);
     cvSetImageROI(threshframebot,rectROIbot);
     dilframebot = cvCreateImage(cvGetSize(threshframebot),8,1);
     cvDilate(threshframebot,dilframebot,NULL,2); //cvDilate(input frame,

     CBlobResult blobs_bot;
     blobs_bot = CBlobResult(dilframebot,NULL,0); // CBlobresult(inputframe, mask, threshold) Will filter all white parts of image
     blobs_bot.Filter(blobs_bot,B_EXCLUDE,CBlobGetArea(),B_LESS,50);//blobs.Filter(input, cond, criteria, cond, const) Filter all images whose area is less than 50 pixels
     CBlob biggestblob_bot;
     blobs_bot.GetNthBlob(CBlobGetArea(),0,biggestblob_bot); //GetNthBlob(criteria, number, output) Get only the largest blob based on CblobGetArea()
     // get 4 points to define the rectangle
     pt1b.x = biggestblob_bot.MinX()*moddiv;
     pt1b.y = biggestblob_bot.MinY()*moddiv+100;
     pt2b.x = biggestblob_bot.MaxX()*moddiv;
     pt2b.y = biggestblob_bot.MaxY()*moddiv+100;
     b_cir_center.x=(pt1b.x+pt2b.x)/2;
     b_cir_center.y=(pt1b.y+pt2b.y)/2;//}
//////////////////////////////////////////////////////////////////////////////////////////
//    if(seq==seqdiv){
      threshframetop=cvCloneImage(threshframe);
      cvSetImageROI(threshframetop,rectROItop);
      dilframetop = cvCreateImage(cvGetSize(threshframetop),8,1);
     cvDilate(threshframetop,dilframetop,NULL,2); //cvDilate(input frame,
     CBlobResult blobs_top;
     blobs_top = CBlobResult(dilframetop,NULL,0); // CBlobresult(inputframe, mask, threshold) Will filter all white parts of image
     blobs_top.Filter(blobs_top,B_EXCLUDE,CBlobGetArea(),B_LESS,50);//blobs.Filter(input, cond, criteria, cond, const) Filter all images whose area is less than 50 pixels
     CBlob biggestblob_top;
     blobs_top.GetNthBlob(CBlobGetArea(),0,biggestblob_top); //GetNthBlob(criteria, number, output) Get only the largest blob based on CblobGetArea()
     // get 4 points to define the rectangle
     pt1t.x = biggestblob_top.MinX()*moddiv;
     pt1t.y = biggestblob_top.MinY()*moddiv;
     pt2t.x = biggestblob_top.MaxX()*moddiv;
     pt2t.y = biggestblob_top.MaxY()*moddiv;
     t_cir_center.x=(pt1t.x+pt2t.x)/2;
     t_cir_center.y=(pt1t.y+pt2t.y)/2;// }
//////////////////////////////////////////////////////////////////////////////////////
//   if(seq==seqdiv+1) {
     frame_center.x=frame_width/2;
     frame_center.y=frame_height/2;
//.........这里部分代码省略.........
开发者ID:bhuneshwar21,项目名称:AUV,代码行数:101,代码来源:opt2s.cpp


示例3: main

int main (int argc, const char * argv[]) {
    int w=1280,h=720;
    int i=0;
    int nkeypoints=0;
    int press=0;
    char img2_file[] = "/Users/quake0day/ana2/MVI_0124_QT 768Kbps_012.mov";
    vl_bool render=1;
    vl_bool first=1;
    VlSiftFilt * myFilter=0;
    VlSiftKeypoint const* keys;
    //CvCapture * camera = cvCreateCameraCapture (CV_CAP_ANY);
    CvCapture * camera = cvCreateFileCapture(img2_file);
    vl_sift_pix *descriptorsA, *descriptorsB;
    int ndescA=0, ndescB=0;

    //DescriptorA file
    int dscfd;
    struct stat filestat;
    dscfd = open("/Users/quake0day/ana2/saveC.jpg.dsc", O_RDONLY, 0644);
    fstat(dscfd, &filestat);
    int filesize=filestat.st_size;
    descriptorsA=(vl_sift_pix*)mmap(0, filesize, PROT_READ, MAP_SHARED, dscfd, 0);
    ndescA=(filesize/sizeof(vl_sift_pix))/DESCSIZE;
    printf("number of descriptors: %d\n", ndescA);

    //Build kdtreeA
    VlKDForest *myforest=vl_kdforest_new(VL_TYPE_FLOAT, DESCSIZE, 1);
    vl_kdforest_build(myforest, ndescA, descriptorsA);

    //DescriptorsB file
    dscfd=open("/Users/quake0day/ana2/saveD.jpg.dsc", O_RDONLY, 0644);
    fstat(dscfd, &filestat);
    filesize=filestat.st_size;
    descriptorsB=(vl_sift_pix*)mmap(0, filesize, PROT_READ, MAP_SHARED, dscfd, 0);
    ndescB=(filesize/sizeof(vl_sift_pix))/DESCSIZE;
    printf("number of descriptors: %d\n", ndescB);

    //Build kdtreeB
    VlKDForest *myforestB=vl_kdforest_new(VL_TYPE_FLOAT, DESCSIZE, 1);
    vl_kdforest_build(myforestB, ndescB, descriptorsB);

    //Neighbors
    VlKDForestNeighbor *neighbors=(VlKDForestNeighbor*)malloc(sizeof(VlKDForestNeighbor));
    VlKDForestNeighbor *neighborsB=(VlKDForestNeighbor*)malloc(sizeof(VlKDForestNeighbor));

    //Image variables
    vl_sift_pix* fim;
    int err=0;
    int octave, nlevels, o_min;
    cvNamedWindow("Hello", 1);

    //For text
    CvFont font;
    double hScale=2;
    double vScale=2;
    int    lineWidth=2;
    cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, hScale,vScale,0,lineWidth, 1);

    IplImage *myCVImage=cvQueryFrame(camera);//cvLoadImage("2.jpg", -1);

    IplImage *afterCVImage=cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
    IplImage *resizingImg=cvCreateImage(cvSize(w, h), myCVImage->depth, myCVImage->nChannels);
    octave=2;
    nlevels=5;
    o_min=1;
    myFilter=vl_sift_new(w, h, octave, nlevels, o_min);
    vl_sift_set_peak_thresh(myFilter, 0.5);
    fim=malloc(sizeof(vl_sift_pix)*w*h);
    float thre;


    while (myCVImage) {
        dprintf("%d*%d\n",myCVImage->width,myCVImage->height);

        cvResize(myCVImage, resizingImg, CV_INTER_AREA);
        dprintf("resized scale:%d*%d\n",myCVImage->width,myCVImage->height);
        if (press=='s') {
            cvSaveImage("save.jpg", resizingImg);
        }
        cvConvertImage(resizingImg, afterCVImage, 0);


        for (i=0; i<h; i++) {
            for (int j=0; j<w; j++) {
                fim[i*w+j]=CV_IMAGE_ELEM(afterCVImage,uchar,i,j);
            }
        }

        //vl_sift_set_peak_thresh(myFilter, 0.5);
        //vl_sift_set_edge_thresh(myFilter, 10.0);
        first=1;
        while (1) {
            printf("~~~~~~~~~~start of octave~~~~~~~~~~~~\n");


            if (first) {
                first=0;
                thre=0.25;
                err=vl_sift_process_first_octave(myFilter, fim);
            }
//.........这里部分代码省略.........
开发者ID:quake0day,项目名称:SIFT_Project,代码行数:101,代码来源:main.c


示例4: main

/**
 * @brief Main principal
 * @param argc El número de argumentos del programa
 * @param argv Cadenas de argumentos del programa
 * @return Nada si es correcto o algún número negativo si es incorrecto
 */
int main( int argc, char** argv ) {
	
	if( argc < 4 )
		return -1;

	// Declaración de variables
	gsl_rng *rng;
	IplImage *frame, *hsv_frame;
	float **ref_histos, histo_aux[1][HTAM];
	CvCapture *video;
	particle **particles, **aux, **nuevas_particulas;
	CvScalar color_rojo = CV_RGB(255,0,0), color_azul = CV_RGB(0,0,255);
	float factor = 1.0 / 255.0, sum = 0.0f;
	CvRect *regions;
	int num_objects = 0;
	int MAX_OBJECTS = atoi(argv[3]), PARTICLES = atoi(argv[2]);
	FILE *datos;
	char name[45], num[3], *p1, *p2;
	clock_t t_ini, t_fin;
	double ms;
	
	video = cvCaptureFromFile( argv[1] );
	if( !video ) {
		printf("No se pudo abrir el fichero de video %s\n", argv[1]);
		exit(-1);
	}

	int nFrames = (int) cvGetCaptureProperty( video , CV_CAP_PROP_FRAME_COUNT );
	first_frame = cvQueryFrame( video );
	num_objects = get_regions( &regions,  MAX_OBJECTS, argv[1] );
	if( num_objects == 0 )
		exit(-1);

	t_ini = clock();
	// Inicializamos el generador de números aleatorios
	gsl_rng_env_setup();
	rng = gsl_rng_alloc( gsl_rng_mt19937 );
	gsl_rng_set(rng, (unsigned long) time(NULL));

	hsv_frame = bgr2hsv( first_frame );
	nuevas_particulas = (particle**) malloc( num_objects * sizeof( particle* ) );
	for( int j = 0; j < num_objects; ++j )
		nuevas_particulas[j] = (particle*) malloc( PARTICLES * sizeof( particle ) );
	
	// Computamos los histogramas de referencia y distribuimos las partículas iniciales
	ref_histos = compute_ref_histos( hsv_frame, regions, num_objects );
	particles = init_distribution( regions, num_objects, PARTICLES );

	// Reservamos memoria para la tabla de bins precomputada
	int **tabla_bin = (int**) malloc( hsv_frame->height * sizeof(int*) );
	for(int f = 0; f < hsv_frame->height; ++f)
		tabla_bin[f] = (int*) malloc( hsv_frame->width * sizeof(int) );

	// Mostramos el tracking
	if( show_tracking ) {

		// Mostramos todas las partículas
		if( show_all )
			for( int k = 0; k < num_objects; ++k )
				for( int j = 0; j < PARTICLES; ++j )
					display_particle( first_frame, particles[k][j], color_azul );

		// Dibujamos la partícula más prometedora de cada objeto
		for( int k = 0; k < num_objects; ++k )
			display_particle( first_frame, particles[k][0], color_rojo );

		cvNamedWindow( "RGB", 1 );
		cvShowImage( "RGB", first_frame );
		cvWaitKey( 5 );
	}

	// Exportamos los histogramas de referencia y los frames
	if( exportar ) {
		export_ref_histos( ref_histos, num_objects );
		export_frame( first_frame, 1 );

		for( int k = 0; k < num_objects; ++k ) {
			sprintf( num, "%02d", k );
			strcpy( name, REGION_BASE);
			p1 = strrchr( argv[1], '/' );
			p2 = strrchr( argv[1], '.' );
			strncat( name, (++p1), p2-p1 );
			strcat( name, num );
			strcat( name, ".txt" );
			datos = fopen( name, "a+" );
			if( ! datos ) {
				printf("Error creando fichero para datos\n");
				exit(-1);;
			}
			fprintf( datos, "%d\t%f\t%f\n", 0, particles[k][0].x, particles[k][0].y );
			fclose( datos );
		}
	}

//.........这里部分代码省略.........
开发者ID:rotty11,项目名称:Tfm,代码行数:101,代码来源:trackerStr1.cpp


示例5: camera_atualiza

void camera_atualiza(camera *cam) {
  IplImage *image = cvQueryFrame(cam->capture);

  camera_converte(cam, image);
}
开发者ID:Paulo-http,项目名称:AirNinja,代码行数:5,代码来源:camera.c


示例6: main

int main(int argc, char* argv[])
{
    // Default capture size - 640x480<br />
    CvSize size = cvSize(640,480);
    // Open capture device. 0 is /dev/video0, 1 is /dev/video1, etc.
    CvCapture* capture = cvCaptureFromCAM( 0 );
    if( !capture )
    {
        fprintf( stderr, "ERROR: capture is NULL \n" );
        getchar();
        return -1;
    }

    // Create a window in which the captured images will be presented
    cvNamedWindow( "Camera", CV_WINDOW_AUTOSIZE );
    cvNamedWindow( "HSV", CV_WINDOW_AUTOSIZE );
    cvNamedWindow( "EdgeDetection", CV_WINDOW_AUTOSIZE );

    // Detect a red ball
    CvScalar hsv_min = cvScalar(150, 84, 130, 0);
    CvScalar hsv_max = cvScalar(358, 256, 255, 0);
    IplImage* hsv_frame    = cvCreateImage(size, IPL_DEPTH_8U, 3);
    IplImage* thresholded  = cvCreateImage(size, IPL_DEPTH_8U, 1);

    while( 1 )
    {
        // Get one frame
        IplImage* frame = cvQueryFrame( capture );
        if( !frame )
        {
              fprintf( stderr, "ERROR: frame is null...\n" );
              getchar();
              break;
        }
        // Covert color space to HSV as it is much easier to filter colors in the HSV color-space.
        cvCvtColor(frame, hsv_frame, CV_BGR2HSV);
        // Filter out colors which are out of range.
        cvInRangeS(hsv_frame, hsv_min, hsv_max, thresholded);
        // Memory for hough circles
        CvMemStorage* storage = cvCreateMemStorage(0);
        // hough detector works better with some smoothing of the image
        cvSmooth( thresholded, thresholded, CV_GAUSSIAN, 9, 9 );
        CvSeq* circles = cvHoughCircles(thresholded, storage, CV_HOUGH_GRADIENT, 2,
                                        thresholded->height/4, 100, 50, 10, 400);

        for (int i = 0; i < circles->total; i++)
        {
            float* p = (float*)cvGetSeqElem( circles, i );
            printf("Ball! x=%f y=%f r=%f\n\r", p[0],p[1],p[2] );
            cvCircle( frame, cvPoint(cvRound(p[0]),cvRound(p[1])),
                                    3, CV_RGB(0,255,0), -1, 8, 0 );
            cvCircle( frame, cvPoint(cvRound(p[0]),cvRound(p[1])),
                                    cvRound(p[2]), CV_RGB(255,0,0), 3, 8, 0 );
        }

        cvShowImage( "Camera", frame ); // Original stream with detected ball overlay
        cvShowImage( "HSV", hsv_frame); // Original stream in the HSV color space
        cvShowImage( "After Color Filtering", thresholded ); // The stream after color filtering</p>
        cvReleaseMemStorage(&storage);
        // Do not release the frame!
        //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version),
        //remove higher bits using AND operator
        if( (cvWaitKey(10) & 255) == 27 ) break;
    }
     // Release the capture device housekeeping
     cvReleaseCapture( &capture );
     cvDestroyWindow( "mywindow" );
     return 0;
   }
开发者ID:myok12,项目名称:blimp_blobs,代码行数:69,代码来源:blimp_blobs_orange.cpp


示例7: main

int main(void){

	IplImage *theFrame, *theFrame2, *theFrame3;
	CvCapture *theCam=cvCreateCameraCapture(-1);
	char theChar;
	CvSize size;
	CvPoint theCentroid;
	CvSeq* theContour;
	
	ttModels theModels;
	ttInit(&theModels);
	
	if(!theCam) {
		printf("\nCamera not found\n");
		return(0);
	}
	/*theFrame=cvLoadImage("colormap.png",1);
	theFrame2=cvLoadImage("colormap.png",1);//*/
	size=cvSize(cvGetCaptureProperty(theCam,CV_CAP_PROP_FRAME_WIDTH),cvGetCaptureProperty(theCam,CV_CAP_PROP_FRAME_HEIGHT));
	theFrame2=cvCreateImage(size,IPL_DEPTH_8U,1);
	theFrame3=cvCreateImage(size,IPL_DEPTH_8U,3);
	
	cvNamedWindow("win1",1);
	cvNamedWindow("win2",1);
	cvMoveWindow("win1",0,0);
	cvMoveWindow("win2",700,0);
	/*cvNamedWindow("H",0);
	cvNamedWindow("S",0);
	cvNamedWindow("V",0);
	cvMoveWindow("H",0,500);
	cvMoveWindow("S",350,500);
	cvMoveWindow("V",700,500);//*/
	while ((theChar=cvWaitKey(20))!=0x1B){
		theFrame=cvQueryFrame(theCam);
		cvCopy(theFrame,theFrame3,NULL);
		cvZero(theFrame2);
		ttSegmenter(theFrame,theFrame2,1);
		ttImprover(theFrame2,theFrame2);
		//printf("nchannels %d\n",theFrame2->nChannels);
		//printf("chingao!\n");
		cvShowImage("win2",theFrame2);
		theContour=ttContours(theFrame2,2,&theModels,NULL);
		
		//cvZero(out);
		if (theContour==NULL)
			continue;
		theCentroid=ttFindCentroid(theContour);
		cvCircle(theFrame3,theCentroid,1,CV_RGB(255,255,255),1,8,0);
		cvCircle(theFrame3,theCentroid,6,CV_RGB(255,0,0),1,8,0);
		//cvCircle(theFrame3,theCentroid,11,CV_RGB(255,255,255),1,8,0);
		//cvCircle(theFrame3,theCentroid,16,CV_RGB(255,0,0),1,8,0);
		//cvCircle(theFrame3,ttFindCentroid(theContour),15,CV_RGB(255,255,255),1,8,0);
		cvDrawContours(theFrame3,theContour,CV_RGB(255,255,255),CV_RGB(255,255,255),1,5,8,cvPoint(0,0));
		cvShowImage("win1",theFrame3);//*/
	}
	cvDestroyAllWindows();
	cvReleaseData(theFrame);
	cvReleaseData(theFrame2);
	cvReleaseData(theFrame3);
	cvReleaseCapture(&theCam);
	return(0);
}
开发者ID:craksz,项目名称:sdk_gtk-201,代码行数:62,代码来源:testing04.c


示例8: main

int main(){
  
      CvCapture* capture =0;       
      capture = cvCaptureFromCAM(0);

      if(!capture){
printf("Capture failure\n");
return -1;
      }
      
      frame=0;
      frame = cvQueryFrame(capture);           
      if(!frame) return -1;
  
     //create a blank image and assigned to 'imgTracking' which has the same size of original video
     imgTracking=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U, 3);

     cvZero(imgTracking); //covert the image, 'imgTracking' to black

     cvNamedWindow("Video");     
     cvNamedWindow("Ball");

      //iterate through each frames of the video     
      while(true){

            frame = cvQueryFrame(capture);           
            if(!frame) break;
            frame=cvCloneImage(frame); 
            
           cvSmooth(frame, frame, CV_GAUSSIAN,3,3); //smooth the original image using Gaussian kernel

IplImage* imgHSV = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 3); 

            cvCvtColor(frame, imgHSV, CV_BGR2HSV); //Change the color format from BGR to HSV

IplImage* imgThresh = GetThresholdedImage(imgHSV, 0, 143, 149, 6, 245, 256);
	    
	     cvSmooth(imgThresh, imgThresh, CV_GAUSSIAN,3,3); //smooth the binary image using Gaussian kernel
	     
	     trackObject(imgThresh, 255, 0, 0, 1);

	     
          	
IplImage* imgHSV2 = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 3); 
	    
	      cvCvtColor(frame, imgHSV2, CV_BGR2HSV);

IplImage* imgThresh2 = GetThresholdedImage(imgHSV2, 26, 61, 152, 79, 166, 248);

 		cvSmooth(imgThresh2, imgThresh2, CV_GAUSSIAN,3,3); //smooth the binary image using Gaussian kernel

		trackObject(imgThresh2, 0, 255, 0, 2);

            // Add the tracking image and the frame

          cvAdd(frame, imgTracking, frame);

          cvShowImage("Ball", imgThresh); 
	  cvShowImage("Ball2", imgThresh2);            
          cvShowImage("Video", frame);
           
           //Clean up used images
           cvReleaseImage(&imgHSV);
	   cvReleaseImage(&imgHSV2);
           cvReleaseImage(&imgThresh);
	   cvReleaseImage(&imgThresh2);             
           cvReleaseImage(&frame);

            //Wait 10mS
            int c = cvWaitKey(10);
            //If 'ESC' is pressed, break the loop
            if((char)c==27 ) break;      
      }

      cvDestroyAllWindows() ;
      cvReleaseImage(&imgTracking);
      cvReleaseCapture(&capture);     

      return 0;
}
开发者ID:josepabloapu,项目名称:renekton,代码行数:80,代码来源:clave.cpp


示例9: cvQueryFrame

IplImage * VideoReader::getImage(void)
{
    return cvQueryFrame(m_video);
}
开发者ID:Neckara,项目名称:ProjetSI2,代码行数:4,代码来源:videoreader.cpp


示例10: shapeDetector

int shapeDetector(BoundingBox* result, CvCapture* capture, int numberOfIntervals){

    int numberOfWindows = 0;
    int interval, start_t=45, end_t, span_t=65;
    int w_counter=0;
    int threshold[100];
    int i,j;
	int frameCounter=0;
    int totalNumberOfPatterns=0;
    int numberOfReducedPatterns=0;


    char dynamicThresholding=1;
    char run=1;
    char showWindow [100];
    char got_result = 0; //Used to know if we had great success of just got canceled

    struct Metric metric[100];
    IplImage* imgGrayScale;
    IplImage* img;

    Pattern allPatterns[100];
    Pattern reducedPatterns[10];

    CvPoint s_list[4];

    CvSeq*   contourArray[100];
    CvMemStorage *storage = cvCreateMemStorage(0); //storage area for all contours

    box = result;

    srand(time(NULL));

    for(i=0; i<100; i++) {
        showWindow[i] = 0;
    }

    //*********************  SET UP IMAGES AND DISPLAY WINDOWS  ***********************
    //*********************************************************************************

    img = cvQueryFrame(capture);
    imgGrayScale = cvCreateImage(cvGetSize(img), 8, 1);

    switch( numberOfWindows ) {
        case 3:
            cvNamedWindow("Threshold 2", CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL);
        case 2:
            cvNamedWindow("Threshold 3", CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL);
        case 1:
            cvNamedWindow("Threshold 1", CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL);
    }

    cvNamedWindow("Tracked", CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL);
    cvCreateTrackbar("Threshold lower",   "Tracked", &start_t, 255,    NULL);
    cvCreateTrackbar("Threshold upper",   "Tracked", &end_t,  255,    NULL);
    //---------------------------------------------------------------------------------

    span_t = end_t - start_t;
    interval = span_t/numberOfIntervals;

    for(i=0; i<numberOfIntervals; i++){
        threshold[i] = start_t+((i+1)*interval);
    }

    while(run){ //Main loop

        //*********************  IMAGE PRE-PROCESSING  ****************************
	frameCounter++;
        img = cvQueryFrame(capture);

        //converting the original image into grayscale
        cvCvtColor(img,imgGrayScale,CV_BGR2GRAY);

        //---------------------------------------------------------------------------

        // Awesome shapeProcessing function calls
        for(i=0; i<numberOfIntervals; i++){
            metric[i] = shapeProcessing(img, imgGrayScale, threshold[i]);
                
	        //Append patterns found in the layers to allPatterns list
	        for(j=0; j<metric[i].numberOfPatterns; j++){
	        
	            allPatterns[totalNumberOfPatterns] = metric[i].p_list[j];
	            totalNumberOfPatterns++;
	        }            
        }


        // Reduce patterns
        numberOfReducedPatterns = reducePatterns(allPatterns, reducedPatterns, totalNumberOfPatterns);
    
        for(i=0; i<numberOfReducedPatterns; i++){
            drawRect(reducedPatterns[i].cv_rect, img);
        }        

        if(numberOfReducedPatterns == 4){
        
            findBox_r(&reducedPatterns[0], &s_list[0]);

            box->topLeft = s_list[0];
//.........这里部分代码省略.........
开发者ID:Zazcallabah,项目名称:dotdetector,代码行数:101,代码来源:shapedetector.c


示例11: cvQueryFrame

const IplImage * Camera::GetFrame() {
    return cvQueryFrame(camera);
}
开发者ID:InsipidPoint,项目名称:pictone,代码行数:3,代码来源:Camera.cpp


示例12: main

int main( int argc, char** argv )
{
    IplImage *current_frame=NULL;
	CvSize size;
	size.height = 300; size.width = 200;
	IplImage *corrected_frame = cvCreateImage( size, IPL_DEPTH_8U, 3 );
	IplImage *labelled_image=NULL;
	IplImage *vertical_edge_image=NULL;
    int user_clicked_key=0;
    
    // Load the video (AVI) file
    CvCapture *capture = cvCaptureFromAVI( "./Postboxes.avi" );
    // Ensure AVI opened properly
    if( !capture )
		return 1;    
    
    // Get Frames Per Second in order to playback the video at the correct speed
    int fps = ( int )cvGetCaptureProperty( capture, CV_CAP_PROP_FPS );
    
	// Explain the User Interface
    printf( "Hot keys: \n"
		    "\tESC - quit the program\n"
            "\tSPACE - pause/resume the video\n");

	CvPoint2D32f from_points[4] = { {3, 6}, {221, 11}, {206, 368}, {18, 373} };
	CvPoint2D32f to_points[4] = { {0, 0}, {200, 0}, {200, 300}, {0, 300} };
	CvMat* warp_matrix = cvCreateMat( 3,3,CV_32FC1 );
	cvGetPerspectiveTransform( from_points, to_points, warp_matrix );

	// Create display windows for images
	cvNamedWindow( "Input video", 0 );
	cvNamedWindow( "Vertical edges", 0 );
    cvNamedWindow( "Results", 0 );

	// Setup mouse callback on the original image so that the user can see image values as they move the
	// cursor over the image.
    cvSetMouseCallback( "Input video", on_mouse_show_values, 0 );
	window_name_for_on_mouse_show_values="Input video";

    while( user_clicked_key != ESC ) {
		// Get current video frame
        current_frame = cvQueryFrame( capture );
		image_for_on_mouse_show_values=current_frame; // Assign image for mouse callback
        if( !current_frame ) // No new frame available
			break;

		cvWarpPerspective( current_frame, corrected_frame, warp_matrix );

		if (labelled_image == NULL)
		{	// The first time around the loop create the image for processing
			labelled_image = cvCloneImage( corrected_frame );
			vertical_edge_image = cvCloneImage( corrected_frame );
		}
		check_postboxes( corrected_frame, labelled_image, vertical_edge_image );

		// Display the current frame and results of processing
        cvShowImage( "Input video", current_frame );
        cvShowImage( "Vertical edges", vertical_edge_image );
        cvShowImage( "Results", labelled_image );
        
        // Wait for the delay between frames
        user_clicked_key = cvWaitKey( 1000 / fps );
		if (user_clicked_key == ' ')
		{
			user_clicked_key = cvWaitKey(0);
		}
	}
    
    /* free memory */
    cvReleaseCapture( &capture );
    cvDestroyWindow( "video" );
 
    return 0;
}
开发者ID:ljw7630,项目名称:ComputerVision,代码行数:74,代码来源:postboxes.cpp


示例13: cvQueryFrame

// Primary function
double Camera::getCameraHeading(bool &coneExists)
{
	coneExists = 0;
	// Take a picture
	frame = cvQueryFrame(capture);


	//time_t timeval;


	// Set up
	data = (uchar *)frame->imageData;
	datar = (uchar *)result->imageData;

	// Save the initial picture
	//cvSaveImage("picture.jpeg",frame);
	// r 255
	// g 117
	// b 0

	int idealRed = 255;
	int idealGreen = 117;
	int idealBlue = 10;

	//int redRange = 150;
	//int greenRange = 20;
	//int blueRange = 60;	// need 100 for sun directly behind cone


	//  pixel must have a r value > idealRed - redRange
	//                  a g value < idealGreen + greenRange
	//		    a b value < idealBlue + blueRange


	// Iterate through every pixel looking for rgb values within each range
	for(int i = 0; i < (frame->height); i++) {
		for(int j = 0; j < (frame->width); j++) {
			if((data[i*frame->widthStep+j*frame->nChannels+2] > (idealRed-redRange)) && 		// red value > 255-125
			   (data[i*frame->widthStep+j*frame->nChannels+1] < (idealGreen+greenRange)) && 	// green value < 117+40
			   (data[i*frame->widthStep+j*frame->nChannels]   < (idealBlue+blueRange))) 		// blue value < 0 + 100
				datar[i*result->widthStep+j*result->nChannels] = 255;
			else
				datar[i*result->widthStep+j*result->nChannels] = 0;
		}
	}



	//std::cout << "Color change complete.\n";

	/* Apply erosion and dilation to eliminate some noise and even out blob */
	if(erosion >= 0) {
		cvErode(result,result,0,erosion);
	}
	if(dilation >= 0) {
		cvDilate(result,result,0,dilation);
	}

	//std::cout << "Erosion and dilation complete.\n";

	/* FindContours should not alter result (its const in the function declaration), but it does...
	This function looks for contours (edges of polygons) on the already monochrome image */
	cvFindContours(result,storage,&contours);

	/* Draw the contours on contourimage */
	if(contours) {
		cvDrawContours(contourimage,contours,cvScalarAll(255),cvScalarAll(255),100);
	}

	//std::cout << "Contour drawing complete.\n";

	//time(&timeval);
	//std::string filename("boxes.jpeg");
	//filename = filename + ctime(&timeval);
	//cvSaveImage(filename.c_str(),contourimage);
//	cvSaveImage("boxes.jpeg",contourimage);




	//std::cout << "Countour image saved.\n";

	/* Calculate the bounding rectangle */
	bound = cvBoundingRect(contourimage,0);

	//std::cout << "Bounding rectangle computed.\n";

	/* Reset the contourimage image (otherwise contourimage turns into an Etch A Sketch) */
	if(contours) {
		//delete contours;
		//contours = new CvSeq;
		//cvZero(contours);
		cvClearSeq(contours);
	}

	cvZero(contourimage);

	//std::cout << "Countour image zeroed.\n";

//.........这里部分代码省略.........
开发者ID:HideTheMess,项目名称:Trinidad,代码行数:101,代码来源:camera.cpp


示例14: calibrator


//.........这里部分代码省略.........
	}

	while (true){

	if (write == 1)	{

		if (onlyonce)
		{
			fout.open(filename);
			values.clear();
			onlyonce = false;
		}


		if(variablecount == 1){
			variable = "Ball";
			ball.push_back(h_min);
			ball.push_back(s_min);
			ball.push_back(v_min);
			ball.push_back(h_max);
			ball.push_back(s_max);
			ball.push_back(v_max);
			values.push_back(ball);

		}
		else if(variablecount == 2){
			variable = "Goal";
			goal.push_back(h_min);
			goal.push_back(s_min);
			goal.push_back(v_min);
			goal.push_back(h_max);
			goal.push_back(s_max);
			goal.push_back(v_max);
			values.push_back(goal);

		}
		else if(variablecount == 3){
			variable = "Lines";
			lines.push_back(h_min);
			lines.push_back(s_min);
			lines.push_back(v_min);
			lines.push_back(h_max);
			lines.push_back(s_max);
			lines.push_back(v_max);
			values.push_back(lines);
		}


		fout << variable << " " << h_min << " " << s_min <<
			" " << v_min << " "  << h_max << " " << s_max <<
			" " << v_max << endl;

		cout <<  variable << " " << h_min << " " << s_min <<
			" " << v_min << " "  << h_max << " " << s_max <<
			" " << v_max << endl;

		variablecount = variablecount +1;

		h_min = 0;
		s_min = 0;
		v_min = 0;
		h_max = 255,
		s_max = 255;
		v_max = 255;
		write = 0;


		setTrackbarPos("H min", "Thresh", h_min);
		setTrackbarPos("S min", "Thresh", s_min);
		setTrackbarPos("V min", "Thresh", v_min);
		setTrackbarPos("H max", "Thresh", h_max);
		setTrackbarPos("S max", "Thresh", s_max);
		setTrackbarPos("V max", "Thresh", v_max);
		setTrackbarPos("WRITE", "Thresh", write);
	}

	if (getconfig == 1)
	{

	}


	// take a frame, threshold it, display the thresholded image
	frame = cvQueryFrame( cap );
	frames = thresholder(frame,h_min, s_min, v_min, h_max, s_max, v_max ); // added values as argument, previously h_min, s_min ....

	imshow("Thresh", frames[0]);
	imshow("CALIBRATOR", frames[0]);



	int c = cvWaitKey(10);
	if( (char)c == 27) {
    cvDestroyAllWindows();
    return values;
	}

	}

}
开发者ID:mlaidma,项目名称:Robosome,代码行数:101,代码来源:robovideo.cpp


示例15: main

int main(int argc, char* argv[]){
    IplImage *frame, *frameL, *frameR, *anaglyph;
    CvCapture *capture = cvCreateFileCapture(argv[1]);
    int videoType = atoi(argv[2]); //0:sid-by-side or 1:above/below
    
    //Simple error handling
    if(!capture){
       printf("Erro ao abrir o video.");
       exit(-1);
    }
    
    //some verifications regarding the video
    frame = cvQueryFrame(capture);
    if(!frame){
        printf("Video vazio.");
        exit(-1);
    }
    if(frame->width % 2 != 0){
          printf("Video possui largura não divisível por 2. Favor cortar!");
          exit(-1);
    }
    
    //prepare anaglyph video
    double fps = cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);
    CvSize videosize;
    switch(videoType){    
        case 0:
             videosize = cvSize(
                                      (int)cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH)/2,
                                      (int)cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT)
                                );
             break;
        case 1:
             videosize = cvSize(
                                      (int)cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH),
                                      (int)cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT)/2
                                );
             break;
        default:                
             printf("Command call: SBS-UB-to-anaglyph-video <side-by-side_video> 0|1\n0:side-by-side and 1:above/below");
             exit(-1);          
    }
    CvVideoWriter *writer = cvCreateVideoWriter("anaglyph-gray.avi",CV_FOURCC('H','F','Y','U'), fps, videosize);
    
    //start working on each frame
    while((frame = cvQueryFrame(capture)) != NULL){ 
        //get width and height from original image
        int width = frame->width;
        int height = frame->height;      
        
        //new images will have half width of original image
        CvSize size;
        switch(videoType){
            case 0:
                 size = cvSize( width/2, height);
                 break;
            case 1:
                 size = cvSize( width, height/2);
                 break;
            default:
                 printf("Command call: SBS-UB-to-anaglyph-video <side-by-side_video> 0|1\n0:side-by-side and 1:above/below");
                 exit(-1);                    
        }
        
        //copy image properties
        frameL = cvCreateImage(size, frame->depth, frame->nChannels);
        frameR = cvCreateImage(size, frame->depth, frame->nChannels);
        cvZero(frameL);
        cvZero(frameR);
        
        //divide frames in two
        separateImages(frame, &frameL, &frameR, width, height, videoType);           
        
        anaglyph = cvCreateImage(size, frameL->depth, frameL->nChannels);
        cvZero(anaglyph);
        
        //create anaglyph
        createAnaglyph(frameL, frameR, &anaglyph);
        
        //if any error occurr (f.e. segmentation fault, check if you have the codec installed)
        //Huffyuv codec (lossless): http://neuron2.net/www.math.berkeley.edu/benrg/huffyuv.html
        cvWriteFrame(writer, anaglyph);  
        
        cvReleaseImage(&frameL);
        cvReleaseImage(&frameR);
        cvReleaseImage(&anaglyph);
    }
        
    //free pointers  
    cvReleaseCapture(&capture);
    cvReleaseVideoWriter(&writer);
    
    return 0;
}
开发者ID:andrecurvello,项目名称:tests-zingarelli,代码行数:94,代码来源:SBS-AB-to-GRAY-anaglyph-video.cpp


示例16: main

int main( int argc, char** argv )
{
    CvCapture* capture = 0;
    
    if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
        capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
    else if( argc == 2 )
        capture = cvCaptureFromAVI( argv[1] );

    if( !capture )
    {
        fprintf(stderr,"Could not initialize capturing...\n");
        return -1;
    }

    /* print a welcome message, and the OpenCV version */
    printf ("Welcome to lkdemo, using OpenCV version %s (%d.%d.%d)\n",
	    CV_VERSION,
	    CV_MAJOR_VERSION, CV_MINOR_VERSION, CV_SUBMINOR_VERSION);

    printf( "Hot keys: \n"
            "\tESC - quit the program\n"
            "\tr - auto-initialize tracking\n"
            "\tc - delete all the points\n"
            "\tn - switch the \"night\" mode on/off\n"
            "To add/remove a feature point click it\n" );

    cvNamedWindow( "LkDemo", 0 );
    cvSetMouseCallback( "LkDemo", on_mouse, 0 );

    for(;;)
    {
        IplImage* frame = 0;
        int i, k, c;

        frame = cvQueryFrame( capture );
        if( !frame )
            break;

        if( !image )
        {
            /* allocate all the buffers */
            image = cvCreateImage( cvGetSize(frame), 8, 3 );
            image->origin = frame->origin;
            grey = cvCreateImage( cvGetSize(frame), 8, 1 );
            prev_grey = cvCreateImage( cvGetSize(frame), 8, 1 );
            pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );
            prev_pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );
            points[0] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0]));
            points[1] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0]));
            status = (char*)cvAlloc(MAX_COUNT);
            flags = 0;
        }

        cvCopy( frame, image, 0 );
        cvCvtColor( image, grey, CV_BGR2GRAY );

        if( night_mode )
            cvZero( image );
        
        if( need_to_init )
        {
            /* automatic initialization */
            IplImage* eig = cvCreateImage( cvGetSize(grey), 32, 1 );
            IplImage* temp = cvCreateImage( cvGetSize(grey), 32, 1 );
            double quality = 0.01;
            double min_distance = 10;

            count = MAX_COUNT;
            cvGoodFeaturesToTrack( grey, eig, temp, points[1], &count,
                                   quality, min_distance, 0, 3, 0, 0.04 );
            cvFindCornerSubPix( grey, points[1], count,
                cvSize(win_size,win_size), cvSize(-1,-1),
                cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
            cvReleaseImage( &eig );
            cvReleaseImage( &temp );

            add_remove_pt = 0;
        }
        else if( count > 0 )
        {
            cvCalcOpticalFlowPyrLK( prev_grey, grey, prev_pyramid, pyramid,
                points[0], points[1], count, cvSize(win_size,win_size), 3, status, 0,
                cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03), flags );
            flags |= CV_LKFLOW_PYR_A_READY;
            for( i = k = 0; i < count; i++ )
            {
                if( add_remove_pt )
                {
                    double dx = pt.x - points[1][i].x;
                    double dy = pt.y - points[1][i].y;

                    if( dx*dx + dy*dy <= 25 )
                    {
                        add_remove_pt = 0;
                        continue;
                    }
                }
                
                if( !status[i] )
//.........这里部分代码省略.........
开发者ID:Changle,项目名称:mocap,代码行数:101,代码来源:lkdemo.c


示例17: get_next_frame


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ cvReadIntByName函数代码示例发布时间:2022-05-30
下一篇:
C++ cvPutText函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap