• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ cvDestroyWindow函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中cvDestroyWindow函数的典型用法代码示例。如果您正苦于以下问题:C++ cvDestroyWindow函数的具体用法?C++ cvDestroyWindow怎么用?C++ cvDestroyWindow使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了cvDestroyWindow函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: main

int main(int argc, char** argv)
{
	pthread_t 	thread_s;
	int			key;

	if (argc == 2) {
		capture = cvCaptureFromFile(argv[1]);
	} else {
		capture = cvCaptureFromCAM(0);
	}

	if (!capture) {
		quit("cvCapture failed", 1);
	}

	img0 = cvQueryFrame(capture);
	img1 = cvCreateImage(cvGetSize(img0), IPL_DEPTH_8U, 1);

	cvZero(img1);
	cvNamedWindow("stream_server", CV_WINDOW_AUTOSIZE);

	/* print the width and height of the frame, needed by the client */
	fprintf(stdout, "width:  %d\nheight: %d\n\n", img0->width, img0->height);
	fprintf(stdout, "Press 'q' to quit.\n\n");

	/* run the streaming server as a separate thread */
	if (pthread_create(&thread_s, NULL, streamServer, NULL)) {
		quit("pthread_create failed.", 1);
	}

	while(key != 'q') {
		/* get a frame from camera */
		img0 = cvQueryFrame(capture);
		if (!img0) break;

		img0->origin = 0;
		cvFlip(img0, img0, -1);

		/**
		 * convert to grayscale 
		 * note that the grayscaled image is the image to be sent to the client 
		 * so we enclose it with pthread_mutex_lock to make it thread safe 
		 */
		pthread_mutex_lock(&mutex);
		cvCvtColor(img0, img1, CV_BGR2GRAY);
		is_data_ready = 1;
		pthread_mutex_unlock(&mutex);

		/* also display the video here on server */
		cvShowImage("stream_server", img0);
		key = cvWaitKey(30);
	}

	/* user has pressed 'q', terminate the streaming server */
	if (pthread_cancel(thread_s)) {
		quit("pthread_cancel failed.", 1);
	}

	/* free memory */
	cvDestroyWindow("stream_server");
	quit(NULL, 0);
}
开发者ID:adarshvjois,项目名称:webcam_multicast,代码行数:62,代码来源:svr.c


示例2: main


//.........这里部分代码省略.........
      cvSetImageROI(threshframebot,rectROIbot);

      threshframetop=cvCloneImage(threshframe);
      cvSetImageROI(threshframetop,rectROItop);
 
//////////////////////////////////////////////////////////////////////////////////////////
    if (seq==0) {
      threshframebot=cvCloneImage(threshframe);
      cvSetImageROI(threshframebot,rectROIbot);
     dilframebot = cvCreateImage(cvGetSize(threshframebot),8,1);
     cvDilate(threshframebot,dilframebot,NULL,2); //cvDilate(input frame,

   //  tempwidth=cvGetSize(dilframebot).width;
   //  tempheight=cvGetSize(dilframebot).height;
   //  printf("dilframe: %d, %d \n",tempwidth,tempheight);
     CBlobResult blobs_bot;
     blobs_bot = CBlobResult(dilframebot,NULL,0); // CBlobresult(inputframe, mask, threshold) Will filter all white parts of image
     blobs_bot.Filter(blobs_bot,B_EXCLUDE,CBlobGetArea(),B_LESS,50);//blobs.Filter(input, cond, criteria, cond, const) Filter all images whose area is less than 50 pixels
     CBlob biggestblob_bot;
     blobs_bot.GetNthBlob(CBlobGetArea(),0,biggestblob_bot); //GetNthBlob(criteria, number, output) Get only the largest blob based on CblobGetArea()
     // get 4 points to define the rectangle
     pt1b.x = biggestblob_bot.MinX()*moddiv;
     pt1b.y = biggestblob_bot.MinY()*moddiv+100;
     pt2b.x = biggestblob_bot.MaxX()*moddiv;
     pt2b.y = biggestblob_bot.MaxY()*moddiv+100;
     b_cir_center.x=(pt1b.x+pt2b.x)/2;
     b_cir_center.y=(pt1b.y+pt2b.y)/2;}
//////////////////////////////////////////////////////////////////////////////////////////
    if(seq==seqdiv){
      threshframetop=cvCloneImage(threshframe);
      cvSetImageROI(threshframetop,rectROItop);
      dilframetop = cvCreateImage(cvGetSize(threshframetop),8,1);
     cvDilate(threshframetop,dilframetop,NULL,2); //cvDilate(input frame,
     CBlobResult blobs_top;
     blobs_top = CBlobResult(dilframetop,NULL,0); // CBlobresult(inputframe, mask, threshold) Will filter all white parts of image
     blobs_top.Filter(blobs_top,B_EXCLUDE,CBlobGetArea(),B_LESS,50);//blobs.Filter(input, cond, criteria, cond, const) Filter all images whose area is less than 50 pixels
     CBlob biggestblob_top;
     blobs_top.GetNthBlob(CBlobGetArea(),0,biggestblob_top); //GetNthBlob(criteria, number, output) Get only the largest blob based on CblobGetArea()
     // get 4 points to define the rectangle
     pt1t.x = biggestblob_top.MinX()*moddiv;
     pt1t.y = biggestblob_top.MinY()*moddiv;
     pt2t.x = biggestblob_top.MaxX()*moddiv;
     pt2t.y = biggestblob_top.MaxY()*moddiv;
     t_cir_center.x=(pt1t.x+pt2t.x)/2;
     t_cir_center.y=(pt1t.y+pt2t.y)/2;}
//////////////////////////////////////////////////////////////////////////////////////
   if(seq==seqdiv+2) {
     frame_center.x=frame_width/2;
     frame_center.y=frame_height/2;
     A.x=frame_center.x-4;
     A.y=frame_center.y;
     B.x=frame_center.x+4;
     B.y=frame_center.y;
     C.y=frame_center.y-4;
     C.x=frame_center.x;
     D.y=frame_center.y+4;
     D.x=frame_center.x;
     cvRectangle(frame,pt1t,pt2t,cvScalar(255,0,0),1,8,0);
     cvRectangle(frame,pt1b,pt2b,cvScalar(255,0,0),1,8,0); // draw rectangle around the biggest blob
     //cvRectangle(frame,pt1,pt2,cvScalar(255,0,0),1,8,0);
     cvCircle( frame, b_cir_center, cir_radius, cvScalar(0,255,255), 1, 8, 0 ); // center point of the rectangle
     cvLine(frame, A, B,cvScalar(255,0,255),2,8,0);
     cvLine(frame, C, D,cvScalar(255,0,255),2,8,0);
    

    if (b_cir_center.x!=0&&b_cir_center.y!=100) 
    {
    cvLine(frame, b_cir_center, frame_center,cvScalar(0,255,0),1,8,0);
    }

    if(t_cir_center.x!=0&&t_cir_center.y!=0)
     {
     cvLine(frame, frame_center, t_cir_center,cvScalar(255,255,0),1,8,0);
     }
     if ((b_cir_center.x!=0&&b_cir_center.y!=100)&&(t_cir_center.x!=0&&t_cir_center.y!=0)) 
     {
     cvLine(frame, b_cir_center, t_cir_center,cvScalar(0,255,255),1,8,0);
     printf("%d, %d, %d, %d\n",t_cir_center.x,t_cir_center.y,b_cir_center.x,b_cir_center.y);
     }
		
}
    seq++;
    seq=seq%(seqdiv+4);
     cvShowImage( "mywindow", frame); // show output image
     cvShowImage( "bot", threshframebot);
     cvShowImage( "top", threshframetop);

   //  cvShowImage("croped",cropped);
     //cvShowImage( "mywindow3", dilframeROI);
     // Do not release the frame!
     //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version),
     //remove higher bits using AND operator
     if ( (cvWaitKey(10) & 255) == 27 ) break;
   }
   // Release the capture device housekeeping
   cvReleaseCapture( &capture );
   cvDestroyWindow( "mywindow" );
   return 0;

 }
开发者ID:bhuneshwar21,项目名称:AUV,代码行数:101,代码来源:opt1.cpp


示例3: main


//.........这里部分代码省略.........
            if( !cvGrabFrame( capture ))
                break;
            frame = cvRetrieveFrame( capture );

            // If the frame does not exist, quit the loop
            if( !frame )
                break;
            
            // Allocate framecopy as the same size of the frame
            if( !frame_copy )
                frame_copy = cvCreateImage( cvSize(frame->width,frame->height),
                                            IPL_DEPTH_8U, frame->nChannels );

            // Check the origin of image. If top left, copy the image frame to frame_copy. 
            if( frame->origin == IPL_ORIGIN_TL )
                cvCopy( frame, frame_copy, 0 );
            // Else flip and copy the image
            else
                cvFlip( frame, frame_copy, 0 );
            
            // Call the function to detect and draw the face
            detect_and_draw( frame_copy );

            // Wait for a while before proceeding to the next frame
            if( cvWaitKey( 10 ) >= 0 )
                break;
        }

        // Release the images, and capture memory
        cvReleaseImage( &frame_copy );
        cvReleaseCapture( &capture );
    }

    // If the capture is not loaded succesfully, then:
    else
    {
        // Assume the image to be lena.jpg, or the input_name specified
        const char* filename = input_name ? input_name : (char*)"lena.jpg";

        // Load the image from that filename
        IplImage* image = cvLoadImage( filename, 1 );

        // If Image is loaded succesfully, then:
        if( image )
        {
            // Detect and draw the face
            detect_and_draw( image );

            // Wait for user input
            cvWaitKey(0);

            // Release the image memory
            cvReleaseImage( &image );
        }
        else
        {
            /* assume it is a text file containing the
               list of the image filenames to be processed - one per line */
            FILE* f = fopen( filename, "rt" );
            if( f )
            {
                char buf[1000+1];

                // Get the line from the file
                while( fgets( buf, 1000, f ) )
                {

                    // Remove the spaces if any, and clean up the name
                    int len = (int)strlen(buf);
                    while( len > 0 && isspace(buf[len-1]) )
                        len--;
                    buf[len] = '\0';

                    // Load the image from the filename present in the buffer
                    image = cvLoadImage( buf, 1 );

                    // If the image was loaded succesfully, then:
                    if( image )
                    {
                        // Detect and draw the face from the image
                        detect_and_draw( image );
                        
                        // Wait for the user input, and release the memory
                        cvWaitKey(0);
                        cvReleaseImage( &image );
                    }
                }
                // Close the file
                fclose(f);
            }
        }

    }
    
    // Destroy the window previously created with filename: "result"
    cvDestroyWindow("result");

    // return 0 to indicate successfull execution of the program
    return 0;
}
开发者ID:rams16592,项目名称:OpenCV-Modified-Programs,代码行数:101,代码来源:facedetect.cpp


示例4: color_cluster

int color_cluster(char *filename)
{
	IplImage* originimg=cvLoadImage(filename);

	int i,j;
	CvMat *samples=cvCreateMat((originimg->width)*(originimg->height),1,CV_32FC3);//创建样本矩阵,CV_32FC3代表32位浮点3通道(彩色图像)
	CvMat *clusters=cvCreateMat((originimg->width)*(originimg->height),1,CV_32SC1);//创建类别标记矩阵,CV_32SF1代表32位整型1通道

	int k=0;
	for (i=0;i<originimg->width;i++)
	{
		for (j=0;j<originimg->height;j++)
		{
			CvScalar s;
			//获取图像各个像素点的三通道值(BGR)
			s.val[0]=(float)cvGet2D(originimg,j,i).val[0];//B
			s.val[1]=(float)cvGet2D(originimg,j,i).val[1];//G
			s.val[2]=(float)cvGet2D(originimg,j,i).val[2];//R
			cvSet2D(samples,k++,0,s);//将像素点三通道的值按顺序排入样本矩阵
		}
	}

	int nCuster=2;//聚类类别数,后期可以通过学习确定分类数。
	cvKMeans2(samples,nCuster,clusters,cvTermCriteria(CV_TERMCRIT_ITER,100,1.0));//开始聚类,迭代100次,终止误差1.0

	//创建整体显示聚类后的图像
	IplImage *clusterimg=cvCreateImage(cvSize(originimg->width,originimg->height),IPL_DEPTH_8U,1);
	
	//创建用于单独显示每个聚类结果的图像
	IplImage *cluster_img0=cvCreateImage(cvSize(originimg->width,originimg->height),IPL_DEPTH_8U,1);
	IplImage *cluster_img1=cvCreateImage(cvSize(originimg->width,originimg->height),IPL_DEPTH_8U,1);
	IplImage *cluster_img2=cvCreateImage(cvSize(originimg->width,originimg->height),IPL_DEPTH_8U,1);



	k=0;
	int val=0;
	float step=255/(nCuster-1);
	CvScalar bg={223,124,124,0};//背景设置为白色
	for (i=0;i<originimg->width;i++)
	{
		for (j=0;j<originimg->height;j++)
		{
			cvSet2D(cluster_img0,j,i,bg);
			cvSet2D(cluster_img1,j,i,bg);
			cvSet2D(cluster_img1,j,i,bg);
		}
	}

	for (i=0;i<originimg->width;i++)
	{
		for (j=0;j<originimg->height;j++)
		{
			val=(int)clusters->data.i[k++];
			CvScalar s;
			s.val[0]=255-val*step;//这个是将不同类别取不同的像素值,
			cvSet2D(clusterimg,j,i,s);	//存储聚类后的图像

			//将每个聚类进行分离
			switch(val)
			{
				case 0:
					cvSet2D(cluster_img0,j,i,s);break;//白色类
				case 1:
					cvSet2D(cluster_img1,j,i,s);break;//灰色类
				case 2:
					cvSet2D(cluster_img2,j,i,s);break;//黑色类
				default:
					break;
			}	
		
		}
    }


	//cvSaveImage("PicVideo//cluster_img0.png",cluster_img0);
	//cvSaveImage("PicVideo//cluster_img1.png",cluster_img1);
	//cvSaveImage("PicVideo//cluster_img2.png",cluster_img2);


	cvNamedWindow( "原始图像", 1 ); 
	cvNamedWindow( "聚类图像", 1 );

	cvShowImage( "原始图像", originimg  );
	cvShowImage( "聚类图像", clusterimg  );
	cvSaveImage("clusterimg.png",clusterimg);//结果保存
	
	cvWaitKey(0); 

	cvDestroyWindow( "原始图像" );
	cvDestroyWindow( "聚类图像" );

	cvReleaseImage( &originimg ); 
	cvReleaseImage( &clusterimg );
	cvReleaseImage(&cluster_img0);
	cvReleaseImage(&cluster_img1);
	cvReleaseImage(&cluster_img0);

	return 0;

//.........这里部分代码省略.........
开发者ID:crescent-hacker,项目名称:OpenCV,代码行数:101,代码来源:color_cluster.cpp


示例5: main

int main( int argc, char** argv )
{
    IplImage* img = cvCreateImage( cvSize( 500, 500 ), 8, 3 );
#if !ARRAY        
    CvMemStorage* storage = cvCreateMemStorage(0);
#endif

    cvNamedWindow( "rect & circle", 1 );
        
    for(;;)
    {
        char key;
        int i, count = rand()%100 + 1;
        CvPoint pt0, pt;
        CvBox2D box;
        CvPoint2D32f box_vtx[4];
        CvPoint2D32f center;
        CvPoint icenter;
        float radius;
#if !ARRAY            
        CvSeq* ptseq = cvCreateSeq( CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvContour),
                                     sizeof(CvPoint), storage );
        for( i = 0; i < count; i++ )
        {
            pt0.x = rand() % (img->width/2) + img->width/4;
            pt0.y = rand() % (img->height/2) + img->height/4;
            cvSeqPush( ptseq, &pt0 );
        }
#ifndef _EiC /* unfortunately, here EiC crashes */
        box = cvMinAreaRect2( ptseq, 0 );
#endif
        cvMinEnclosingCircle( ptseq, &center, &radius );
#else
        CvPoint* points = (CvPoint*)malloc( count * sizeof(points[0]));
        CvMat pointMat = cvMat( 1, count, CV_32SC2, points );

        for( i = 0; i < count; i++ )
        {
            pt0.x = rand() % (img->width/2) + img->width/4;
            pt0.y = rand() % (img->height/2) + img->height/4;
            points[i] = pt0;
        }
#ifndef _EiC
        box = cvMinAreaRect2( &pointMat, 0 );
#endif
        cvMinEnclosingCircle( &pointMat, &center, &radius );
#endif
        cvBoxPoints( box, box_vtx );
        cvZero( img );
        for( i = 0; i < count; i++ )
        {
#if !ARRAY                
            pt0 = *CV_GET_SEQ_ELEM( CvPoint, ptseq, i );
#else
            pt0 = points[i];
#endif
            cvCircle( img, pt0, 2, CV_RGB( 255, 0, 0 ), CV_FILLED, CV_AA, 0 );
        }

#ifndef _EiC
        pt0.x = cvRound(box_vtx[3].x);
        pt0.y = cvRound(box_vtx[3].y);
        for( i = 0; i < 4; i++ )
        {
            pt.x = cvRound(box_vtx[i].x);
            pt.y = cvRound(box_vtx[i].y);
            cvLine(img, pt0, pt, CV_RGB(0, 255, 0), 1, CV_AA, 0);
            pt0 = pt;
        }
#endif
        icenter.x = cvRound(center.x);
        icenter.y = cvRound(center.y);
        cvCircle( img, icenter, cvRound(radius), CV_RGB(255, 255, 0), 1, CV_AA, 0 );

        cvShowImage( "rect & circle", img );

        key = (char) cvWaitKey(0);
        if( key == 27 || key == 'q' || key == 'Q' ) // 'ESC'
            break;

#if !ARRAY
        cvClearMemStorage( storage );
#else
        free( points );
#endif
    }
    
    cvDestroyWindow( "rect & circle" );
    return 0;
}
开发者ID:AndrewShmig,项目名称:FaceDetect,代码行数:90,代码来源:minarea.c


示例6: main

int main()
{
//	IplImage* img = cvLoadImage("PictureSource1.jpg");
//	IplImage* temp = 0;
//	/*---------------------------------------------------------------------------------------------------*/
//	cvNamedWindow("Testing");
//	temp = dopyrdownIpl(img);
//	cvShowImage("Testing",temp);
	/*---------------------------------------------------------------------------------------------------*/
	CvMat* mattesting = cvLoadImageM("PictureSource1.jpg");;
	CvMat* matpyr = 0;
	matpyr = dopydownMat(mattesting);
	cvNamedWindow("MatPYR", CV_WINDOW_AUTOSIZE);
	cvShowImage("MatPYR", matpyr);
	/*---------------------------------------------------------------------------------------------------*/
#if TEST==RECTANGLE
	CvMat* Mattemp = 0;
	Mattemp = cvCloneMat(matpyr);
	cvRectangle(Mattemp, cvPoint(5, 10), cvPoint(255, 255), cvScalar(255, 255, 255));
	cvNamedWindow("MatClone", CV_WINDOW_AUTOSIZE);
	cvShowImage("MatClone", Mattemp);
	cvWaitKey(0);
	cvDestroyWindow("MatClone");
	cvReleaseMat(&Mattemp);
#endif
	/*---------------------------------------------------------------------------------------------------*/
#if TEST==CANNY
	CvMat* MatCannyIn;
	CvMat* MatCannyOut = cvCreateMat(matpyr->rows, matpyr->cols, matpyr->type);
	MatCannyIn = cvCloneMat(matpyr);
	MatCannyOut = docannyMat(MatCannyIn, 10, 100);

	cvNamedWindow("MatCanny");
	cvShowImage("MatCanny",MatCannyOut);
	cvWaitKey(0);
	cvDestroyWindow("MatCanny");
	cvReleaseMat(&MatCannyIn);
	cvReleaseMat(&MatCannyOut);
#endif
	/*---------------------------------------------------------------------------------------------------*/ 
#if TEST==MAT_ELEM
	CvMat* MatELEM = cvCreateMat(matpyr->rows, matpyr->cols, matpyr->type);
	float element_test = CV_MAT_ELEM(*MatELEM, float, matpyr->rows-2, matpyr->rows-3);
	printf("element_test = %f\r\n", element_test);
	cvWaitKey(0);
	cvNamedWindow("MatELEM");
	cvShowImage("MatELEM", MatELEM);
	cvWaitKey(0);
 	cvDestroyWindow("MatELEM");
#endif
	/*---------------------------------- -----------------------------------------------------------------*/
#if TEST==GETMATPointData
	float val[] = { 0.86,-0.5,0.5,0.84,0, 0.86,-0.5,0.5,0.84,0, 0.86,-0.5,0.5,0.84,0, 0.86,-0.5,0.5,0.84,0, 0.86,-0.5,0.5,0.84,0 };
	float* Point = 0;
	float SUMResult = 0;
	CvMat* MatSUM = cvCreateMat(5, 3, CV_32FC1);
	cvInitMatHeader(MatSUM,MatSUM->rows,MatSUM->cols,MatSUM->type,&val);
	for (size_t row = 0; row < MatSUM->rows; row++)
	{
		Point = (float*)(MatSUM->data.ptr + row*MatSUM->step);
		for (size_t col = 0; col < MatSUM->cols; col++)
		{
			printf("%f\t",*Point); 
			SUMResult += *(Point);
			Point++;
		}
		printf("\r\n");
	}
	printf("TheSUMResult=%f\r\n",SUMResult);
	printf("MatSUM->rows=%d\r\n", MatSUM->rows);
	printf("MatSUM->cols=%d\r\n ", MatSUM->cols);
	
	cvWaitKey(0);
	cvReleaseMat(&MatSUM);
#endif
	/*---------------------------------- -----------------------------------------------------------------*/
#if TEST==ROITEST
	/*
		Using the ROI to come true 
	*/
	IplImage* MatToImage = cvCreateImage(cvGetSize(matpyr), IPL_DEPTH_8U, 3);
	cvGetImage(matpyr, MatToImage);
	cvNamedWindow("MatToImage");
	cvShowImage("MatToImage", MatToImage);
	cvWaitKey(0);

	cvSetImageROI(MatToImage, cvRect(10, 10, matpyr->rows - 30, matpyr->cols - 60));
	cvAddS(MatToImage, cvScalar(200), MatToImage);
	cvResetImageROI(MatToImage);
	
	cvNamedWindow("ROITEST");
	cvShowImage("ROITEST", MatToImage);
	cvWaitKey(0);
	cvDestroyWindow("ROITEST");
#endif
	/*---------------------------------- -----------------------------------------------------------------*/
#if TEST==WIDTHSTEPTEST
	/*
		Using WidthStep to come true
	*/
//.........这里部分代码省略.........
开发者ID:wcs7846,项目名称:MarkLHF,代码行数:101,代码来源:main.cpp


示例7: main

// ----- Main
int main( int argc, char** argv ) {
    if(argc != 2) {
        printf("Usage: ./motion <video name>\n");
        return 0;
    }

    cvNamedWindow( "Example Video", CV_WINDOW_AUTOSIZE );
    g_capture = cvCreateFileCapture( argv[1] );
    int frames = (int) cvGetCaptureProperty(
                     g_capture,
                     CV_CAP_PROP_FRAME_COUNT
                 );
    if( frames != 0 ) {
        cvCreateTrackbar(
            "Position",
            "Example Video",
            &g_slider_position,
            frames,
            onTrackbarSlide
        );
    }

    // Keep track of frames
    IplImage *prev_frame;
    IplImage *cur_frame = cvQueryFrame( g_capture ); // read first frame
    CvSize img_sz = cvGetSize( cur_frame );

    IplImage* imgA = cvCreateImage( img_sz, IPL_DEPTH_8U, 1 );
    IplImage* imgB = cvCreateImage( img_sz, IPL_DEPTH_8U, 1 );
    cvConvertImage( cur_frame, imgB ); // convert first frame

    IplImage* imgC = cvCreateImage( img_sz, cur_frame->depth, cur_frame->nChannels );

    while(1) {
        // Scroll to next frame and read
#ifdef OPTICAL_FLOW
        if( pyrB )
            cvCopy( pyrB, pyrA );
        if( imgB )
            cvCopy( imgB, imgA );
        if( cur_frame )
            cvCopy( cur_frame, prev_frame );
        /*
        pyrA = pyrB;
        imgA = imgB;
        prev_frame = cur_frame;
        */
#endif
        cur_frame = cvQueryFrame( g_capture );
        if( !cur_frame )
            break;

#ifdef OPTICAL_FLOW
        // Convert frames to 8U single channel
        cvConvertImage( cur_frame, imgB );
        cvCopyImage( cur_frame, imgC );
        calcOpticalFlowAndMark( imgA, imgB, imgC );
        cvShowImage( "Example Video", imgC );
#else
        cvShowImage( "Example Video", cur_frame );
#endif

        char c = cvWaitKey( 33 ); // ms to wait
        if( c == 27 ) // ESC key
            break;
    }
    cvReleaseCapture( &g_capture );
    cvDestroyWindow( "Example Video" );

    return 0;
}
开发者ID:tloinuy,项目名称:opencpi-opencv,代码行数:72,代码来源:motion_opencv.cpp


示例8: main


//.........这里部分代码省略.........
                        IPL_DEPTH_32F, 1 );

	faceTrack.tmRightMouth = cvCreateImage( cvSize( WINDOW_WIDTH  - TPL_WIDTH  + 1,  
                                WINDOW_HEIGHT - TPL_HEIGHT + 1 ),
                        IPL_DEPTH_32F, 1 );

	faceTrack.tmNose = cvCreateImage( cvSize( WINDOW_WIDTH  - TPL_WIDTH  + 1,  
                                WINDOW_HEIGHT - TPL_HEIGHT + 1 ),
                        IPL_DEPTH_32F, 1 );
   
  
    cvNamedWindow( "video", CV_WINDOW_NORMAL | CV_WINDOW_FREERATIO);
    cvSetMouseCallback( "video", mouseHandler, NULL );


   
    while( key != 'q' ) {
        faceTrack.frame = cvQueryFrame( capture );
        if( !faceTrack.frame ) break;
 
       
        /* perform tracking if template is available */
        if( faceTrack.left_eye_tracking ) faceTrack.trackLeftEye();
		if (faceTrack.right_eye_tracking) faceTrack.trackRightEye();
		if (faceTrack.left_mouth_tracking) faceTrack.trackLeftMouth();
		if (faceTrack.right_mouth_tracking) faceTrack.trackRightMouth();
		if (faceTrack.nose_tracking) faceTrack.trackNose();

		/*if user hits the space bar capture neutral face data*/
		if (key == 32)
		{  faceTrack.captureNeutralFace(); }

		/*if user hits escape key reset data*/
		if (key == 27)
		{	faceTrack.resetData(); }

		/*if we have neutral face data begin to capture facial expressions*/
		if (faceTrack.neutralDataCaptured)
		{	faceTrack.getFaceData(); }

		
		/*if user hits c key display image based on current facial expression*/
		if (key == 'c')
		{
		if (faceTrack.smile == true && faceTrack.eyeRaised == true)
		{
			filter.expressionFilter(3.0,100);

		}
		else if (faceTrack.smile == true && faceTrack.eyeFurrow == true)
		{
			filter.expressionFilter(1.0,100 );

		}
		else if (faceTrack.frown == true && faceTrack.eyeRaised == true)
		{
			filter.expressionFilter(3.0,20);

		}
		else if (faceTrack.frown == true && faceTrack.eyeFurrow == true)
		{
			filter.expressionFilter(1.0,20);

		}
		else if (faceTrack.smile == true)
		{
			filter.expressionFilter(1.5,100);

		}

		else if (faceTrack.frown == true)
		{
			filter.expressionFilter(1.5,20);
		}

		else if (faceTrack.eyeRaised == true)
		{
			filter.expressionFilter(3.0,50);
		}

		else if (faceTrack.eyeFurrow == true)
		{
			filter.expressionFilter(1.0,50);
		}

		else
			filter.expressionFilter(1.5,150);
       
		}

        cvShowImage( "video", faceTrack.frame );
        key = cvWaitKey( 1 );
   
	}

    /* free window */
    cvDestroyWindow( "video" );
   
    return 0;
}
开发者ID:KatieMcNabb,项目名称:Emotion_Detection_With_Graphic_Visualization,代码行数:101,代码来源:Main.cpp


示例9: main

int main()
{             
    CvVideoWriter *writer;
    
    //capture =cvCreateFileCapture("hand4.avi") ;
    //
    capture = cvCaptureFromCAM(0) ;
    cvNamedWindow("Webcam",0);
    //cvNamedWindow("Virtual hand",0);
    writer = cvCreateVideoWriter("palm_output2.avi",CV_FOURCC('M','J','P','G'),15,cvSize(640,480),1);
          
    while(1)
    {
        frame = cvQueryFrame(capture);
        //cvWriteFrame(writer,frame);
        cvCvtColor(frame,frame,CV_BGR2HSV); 
      
      // IMPORTANT!!
      // The following FOR loop generates binary image which contains ONLY the arm.
      // Please replace the following FOR loop with your own method to generate the ideal output image.
      // Because mine method definitely won't work for you.
      //
      for(int i=0;i<frame->height;i++) //REPLACE ME
      {
        for(int j=0;j<frame->width;j++)
        {
        //if(frame->imageData[i*frame->widthStep+(j*3)+2] < 90 && frame->imageData[i*frame->widthStep+(j*3)+2] > 0 && frame->imageData[i*frame->widthStep+(j*3)+1] < 0) 
          if(frame->imageData[i*frame->widthStep+(j*3)] < 50 || frame->imageData[i*frame->widthStep+(j*3)+2] > 170) 
             { mask->imageData[i*mask->width+j] = 255;}
          else mask->imageData[i*mask->width+j] = 0;
        }
      }
        
        cvCvtColor(frame,frame,CV_HSV2BGR);
        cvCopy(frame,frame2);
        //cvErode(mask,mask,0,2);
        
        cvErode(mask,mask,0,1); //ERODE first then DILATE to eliminate the noises.
        cvDilate(mask,mask,0,1);


        cvFindContours( mask, storage, &contours, sizeof(CvContour),
                   CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE, cvPoint(0,0) );

        // We choose the first contour in the list which is longer than 650.
        // You might want to change the threshold to which works the best for you.
        while(contours && contours->total <= 650)
        {
          contours = contours->h_next;
        }

    cvDrawContours( frame, contours, CV_RGB(100,100,100), CV_RGB(0,255,0), 1, 2, CV_AA, cvPoint(0,0) );

        //
        // Use a rectangle to cover up the contour.
        // Find the center of the rectangle (armcenter). Fingertip() needs it.
        //
        if(contours)
        {
          contourcenter =  cvMinAreaRect2(contours,0);
          armcenter.x = cvRound(contourcenter.center.x);
          armcenter.y = cvRound(contourcenter.center.y);
          //cvCircle(frame,armcenter,10,CV_RGB(255,255,255),-1,8,0);
          getconvexhull();
          fingertip();
          hand();
        }


        cvShowImage("Webcam",frame);
        
        //cvShowImage("Virtual hand",virtualhand);
        
        if(savepic)
        {
           int framenum = (int)cvGetCaptureProperty(capture,CV_CAP_PROP_POS_FRAMES);
           char name[10];
           itoa(framenum,name,10);
           sprintf(name,"%sfix4.jpg",name);
           //printf("%s\n",name);
           //cvSaveImage(name,frame);
           savepic = false;           
        }
        
        //printf("FPS:%d\n",(int)cvGetCaptureProperty(capture,CV_CAP_PROP_FPS));

       // cvZero(virtualhand);
        
        if(cvWaitKey(1)>=0 || !frame)
        {
              //cvSaveImage("normal.jpg",frame2);
              break;
        }
    }       
    cvReleaseCapture(&capture);
    cvDestroyWindow("Webcam");
    //cvDestroyWindow("Virtual hand");
    cvReleaseVideoWriter(&writer);
}
开发者ID:Lucklyric,项目名称:Fall2015MM803Project,代码行数:99,代码来源:code.cpp


示例10: main


//.........这里部分代码省略.........
			cvNamedWindow("CapturaCam",CV_WINDOW_AUTOSIZE);
			cvNamedWindow( "ForegroundCodeBook",CV_WINDOW_AUTOSIZE);
			cvNamedWindow( "CodeBook_ConnectComp",CV_WINDOW_AUTOSIZE);

			printf (">>Aprendiendo fondo\n");
		}

		// If we've got an rawImage and are good to go:                
		if( rawImage )
		{
			cvFlip(rawImage, NULL, 1);
			int w = rawImage->width;

			cvFindContours(borde,mstrg,&contours,sizeof(CvContour),CV_RETR_EXTERNAL);

			//Dibujar contorno
			cvLine(rawImage, cv::Point (w-250,0), cv::Point (w-250,250), CV_RGB(255,0,0),1, CV_AA, 0) ;
			cvLine(rawImage, cv::Point (w-250,250), cv::Point (w,250), CV_RGB(255,0,0),1, CV_AA, 0) ;
			//
			if(nframes - 1 < nframesToLearnBG)
			{
				char buffer [33];
				_itoa (nframesToLearnBG - nframes,buffer,10);
				CvFont font2;
				cvInitFont(&font2, CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, 0, 3, CV_AA);
				cvPutText(rawImage, buffer, cvPoint(50, 50), &font2, cvScalar(0, 0, 255, 0));
			}

			cvSetImageROI(rawImage, cvRect(w-250,0,250,250));
			IplImage *temp = cvCreateImage(cvGetSize(rawImage),rawImage->depth,rawImage->nChannels);

			cvCvtColor( rawImage, yuvImage, CV_BGR2YCrCb );
			//YUV para el metodo del codebook

			//Construccion del modelo del fondo
			if(nframes-1 < nframesToLearnBG  )
				cvBGCodeBookUpdate( model, yuvImage );


			if( nframes-1 == nframesToLearnBG  )
			{
				cvBGCodeBookClearStale( model, model->t/2 );
				printf (">>Fondo aprendido\n");
			}

			//Se encuentran objetos por el metodo de codebook
			if( nframes-1 >= nframesToLearnBG  )
			{
				cvBGCodeBookDiff( model, yuvImage, ImaskCodeBook );

				cvCopy(ImaskCodeBook,ImaskCodeBookCC);	
				cvSegmentFGMask( ImaskCodeBookCC );

				cvShowImage( "CodeBook_ConnectComp",ImaskCodeBookCC);

				//deteccion de imagen
				detect(ImaskCodeBookCC,rawImage);

				//base para dibujar la mano
				if(contours)
					cvDrawContours(rawImage,contours, cvScalar(255, 0, 0, 0), cvScalarAll(128), 1 );


			}
			//Display
			cvResetImageROI(rawImage);
			cvShowImage( "CapturaCam", rawImage );
			cvShowImage( "ForegroundCodeBook",ImaskCodeBook);

		}

		// User input:
		c = cvWaitKey(10)&0xFF;
		c = tolower(c);
		// End processing on ESC, q or Q
		if(c == 27 || c == 'q')
			break;
		//Else check for user input
		switch( c )
		{
		case 'c':
			saveLength = true;
			break;        
		case ' ':
			cvBGCodeBookClearStale( model, 0 );
			nframes = 0;
			break;            
		}

		if (c != 'c')
			saveLength=false;
	}		

	cvReleaseCapture( &capture );
	cvReleaseMemStorage(&mstrg);
	cvDestroyWindow( "CapturaCam" );
	cvDestroyWindow( "ForegroundCodeBook");
	cvDestroyWindow( "CodeBook_ConnectComp");
	return 0;
}
开发者ID:rovim,项目名称:LSMRecognition,代码行数:101,代码来源:convexhull2.cpp


示例11: main

int main(int arguments_size, char * arguments[]) {
    CvCapture * capture = NULL;
    String_Const capture_base_name = "video_capture";

    if (arguments_size <= 1) {
        // No arguments; let the user know the usage:
        File__format(stderr,
          "Usage: Video_Capture camera_number [capture_base_name]\n");
        return 1;
    } else {
        // Grab the arguments:
        String argument1 = arguments[1];
        if (arguments_size > 2) {
            capture_base_name = arguments[2];
        }

        // Figure whether to open a video file or a camera;
        if (isdigit(argument1[0])) {
            // Open the camera:
            unsigned int camera_number = String__to_unsigned(argument1);
            int camera_flags = CV_CAP_ANY + (int)camera_number;
            capture = cvCreateCameraCapture(camera_flags);
            if (capture == NULL) {
                File__format(stderr,
                  "Could not open camara %d\n", camera_number);
                return 1;
            }

            // Set the frame size:
            cvSetCaptureProperty(capture,
              CV_CAP_PROP_FRAME_WIDTH, (double)640);
            cvSetCaptureProperty(capture,
              CV_CAP_PROP_FRAME_HEIGHT, (double)480);
        } else {
            // Open a video file format:
            capture = cvCreateFileCapture(argument1);
            if (capture == NULL) {
                File__format(stderr,
                  "Could not open video file '%s'\n", argument1);
                return 1;
            }
        }
    }
    // We should not be able to here without a open *capture*:
    assert(capture != NULL);

    // Create the window to display the video into:
    String_Const window_name = "Video_Capture";
    cvNamedWindow(window_name, CV__window_auto_size);

    // Do a video loop:
    unsigned int capture_number = 0;
    while (1) {
        // Grab a frame from the video source:
        CV_Image frame = cvQueryFrame(capture);
        if (frame == (CV_Image)0) {
            // When *frame* is null, the video source is at end-of-file
            // or disconnected:
            break;
        }
        
        // Show the image:
        cvShowImage(window_name, frame);

        // Deal with key character:
        char character = cvWaitKey(33);
        if (character == '\033') {
            // [Esc] key causes program to escape:
            break;
        } else if (character == ' ') {
            // Write out image out to file system as a .tga file:
            String file_name =
              String__format("%s-%02d.pnm", capture_base_name, capture_number);
            CV_Image__pnm_write(frame, file_name);
            File__format(stderr, "Wrote frame out to file '%s'\n", file_name);
            capture_number += 1;
            String__free(file_name);
        }
    }

    // Clean up and leave:
    cvReleaseCapture(&capture);
    cvDestroyWindow(window_name);

    return 0;
}
开发者ID:jrlandau,项目名称:fiducials-1,代码行数:86,代码来源:Video_Capture.cpp


示例12: get_regions

/**
 * @brief Permite al usuario interactivamente seleccionar un objeto
 * @param regions Guarda los rectángulos que definen a cada objeto
 * @param MAX_OBJECTS Número máximo permitido de objetos a rastrear
 * @param argv Uso el nombre del video para poder leer el correspondiente fichero de regiones por defecto
 * @return El número de objetos seleccionados por el usuario (<= MAX_OBJECTS)
 */
int get_regions(CvRect **regions, int MAX_OBJECTS, char *argv ) {
	
	FILE *fich;
	char name[50], *p1, *p2;
	params p;
	CvRect* r;
	int x1, y1, x2, y2, w, h;
	
	// Si hay que leer desde fichero las regiones...
	if(MAX_OBJECTS > 0) {
		p.n = MAX_OBJECTS;

		strcpy( name, REGION_IN);
		p1 = strrchr( &argv[1], '/' );
		p2 = strrchr( &argv[1], '.' );
		strncat( name, (++p1), p2-p1 );
		strcat( name, "txt" );
		fich = fopen( name, "r" );
		if( ! fich ) {
			strcpy( name, REGION_IN);
			p1 = strrchr( &argv[1], '/' );
			p2 = strrchr( &argv[1], '.' );
			strncat( name, (++p1), (++p2)-p1 );
			strcat( name, "txt" );
			fich = fopen( name, "r" );
			if( ! fich ) {
				printf("Error leyendo las regiones iniciales\n");
				exit (-1);
			}
		}

		p.loc1 = std::vector<CvPoint>(MAX_OBJECTS);
		p.loc2 = std::vector<CvPoint>(MAX_OBJECTS);
		for( int i = 0; i < MAX_OBJECTS; ++i ) {
			int leidos = fscanf(fich, "%d", &p.loc1[i].x);
			leidos = fscanf(fich, "%d", &p.loc1[i].y);
			leidos = fscanf(fich, "%d", &p.loc2[i].x);
			leidos = fscanf(fich, "%d", &p.loc2[i].y);
		}
		fclose( fich );
	}

	// Si hay que seleccionarlas con el ratón...
	else {
		fprintf( stderr, "Selecciona la región a rastrear\n" );
		p.n = 0;
		cvNamedWindow( win_name, CV_WINDOW_AUTOSIZE );
		cvShowImage( win_name, first_frame );
		cvSetMouseCallback( win_name, &mouse, &p );
		cvWaitKey( 0 );
		cvDestroyWindow( win_name );
		if( p.n == 0 )
			return 0;
	}
	
	// Reservo espacio para la lista de regiones
	r = (CvRect*) malloc( p.n * sizeof( CvRect ) );

	for( int i = 0; i < p.n; ++i ) {
		x1 = MIN( p.loc1[i].x, p.loc2[i].x );
		x2 = MAX( p.loc1[i].x, p.loc2[i].x );
		y1 = MIN( p.loc1[i].y, p.loc2[i].y );
		y2 = MAX( p.loc1[i].y, p.loc2[i].y );
		w = x2 - x1;
		h = y2 - y1;
		
		//printf("%d %d %d %d ", x1, y1, x2, y2);
		// Me aseguro que la altura y anchura es par
		w = ( w % 2 )? w : w+1;
		h = ( h % 2 )? h : h+1;
		r[i] = cvRect( x1, y1, w, h );
	}
	*regions = r;
	return p.n;
}
开发者ID:rotty11,项目名称:Tfm,代码行数:82,代码来源:trackerMP1.cpp


示例13: fin

void fin(const char *name,IplImage **img)
{
	cvDestroyWindow(name);
	cvReleaseImage(img);
}
开发者ID:vabc3,项目名称:KarCv,代码行数:5,代码来源:hiutil.c


示例14: cvDestroyWindow

/** void DestroyWindow();
 ***********************************************************
 * Date		: 2012/03/29
 * Author	: Kohei Kojima
 * Note		: Destroy Window
 ***********************************************************/	
void CGraphRendering::DestroyWindow()
{
	cvDestroyWindow( WINDOW_NAME );
}
开发者ID:vivitter,项目名称:RRT_forWin_1_0_0,代码行数:10,代码来源:GraphRendering.cpp


示例15: main


//.........这里部分代码省略.........
                    errX[zz]=optical_flow_error[0][zz]- optical_flow_errorP[0][zz]; 
                    errY[zz]=optical_flow_error[1][zz]- optical_flow_errorP[1][zz]; 
 
                    sumX=sumX+errX[zz]; 
                    sumY=sumY+errY[zz]; 
 
                    optical_flow_errorP[0][zz]=optical_flow_error[0][zz]; 
                    optical_flow_errorP[1][zz]=optical_flow_error[1][zz]; 
 
                } 
 
                fprintf(ptr,"%d\n",count); 
                 
                err_X=sumX/count; 
                err_Y=sumY/count; 
 
            if(flagg==1) 
            { 
              int static startonce=0; 
 
            if(startonce==0) 
            { 
                 
             
            tempxx1=pt.x-20; 
            tempyy1=pt.y-20; 
 
            tempxx2=pt.x+20; 
            tempyy2=pt.y+20; 
 
            XX=pt.x; 
            YY=pt.y; 
 
            startonce=1; 
 
            } 
            if(err_X<3) 
            { 
                tempxx1=tempxx1+err_X; 
                tempyy1=tempyy1+err_Y; 
                tempxx2=tempxx2+err_X; 
                tempyy2=tempyy2+err_Y; 
 
                XX=XX+err_X; 
                YY=YY+err_Y; 
                fprintf(ptr,"%f %f\n",err_X,err_Y); 
            } 
 
            printf("\n%f",err_X); 
 
            //moving window 
 
            cvRectangle(image, cvPoint(tempxx1,tempyy1), cvPoint(tempxx2,tempyy2), cvScalar(255,0,0), 1); 
            cvCircle(image, cvPoint(XX,YY), 3, cvScalar(0,0,255), 1); 
        } 
            count = k; 
        } 
 
 
        if( add_remove_pt && count < MAX_COUNT ) 
        { 
            points[1][count++] = cvPointTo32f(pt); 
            cvFindCornerSubPix( grey, points[1] + count - 1, 1, 
                cvSize(win_size,win_size), cvSize(-1,-1), 
                cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03)); 
            add_remove_pt = 0; 
        } 
 
        CV_SWAP( prev_grey, grey, swap_temp ); 
        CV_SWAP( prev_pyramid, pyramid, swap_temp ); 
        CV_SWAP( points[0], points[1], swap_points ); 
        need_to_init = 0; 
 
       
        //writing image file to the file 
        //if(!cvSaveImage(str1,image)) printf("Could not save: %s\n",str1); 
        //storing in a video also 
  
         
        cvShowImage( "KLT-Tracking Group_R", image ); 
 
        c = cvWaitKey(100); 
        if( (char)c == 27 ) 
            break; 
        switch( (char) c ) 
        { 
        case 's': 
            need_to_init = 1; 
          } 
 
        counter1++; 
    } 
 
    cvReleaseCapture( &capture ); 
    cvDestroyWindow("KLT-Tracking Group_R"); 
 
    fcloseall(); 
     
    return 0; 
} 
开发者ID:KingBing,项目名称:OpenCV_OLD,代码行数:101,代码来源:LK光流算法动作跟踪程序.cpp


示例16: main

int main(int argc, char *argv[])
{
	char k;
	
	// CAMERA
	/*CvCapture *capture = cvCreateCameraCapture(1);
	
	frame = cvQueryFrame(capture);*/
	
	// IMAGE
	const char *imageFile = "./137cm.jpg";
	
	frame = cvLoadImage(imageFile,CV_LOAD_IMAGE_COLOR);
	
	imageFiltree = cvCreateImage(cvGetSize(frame),frame->depth,frame->nChannels);
	imageHSV = cvCreateImage(cvGetSize(frame),frame->depth,frame->nChannels);
	imageBinaire = cvCreateImage(cvGetSize(frame),frame->depth,1);
	imageErodee = cvCreateImage(cvGetSize(frame),frame->depth,1);
	imageDilatee = cvCreateImage(cvGetSize(frame),frame->depth,1);
	imageDilateeFiltree = cvCreateImage(cvGetSize(frame),frame->depth,1);
	imageObjectHSV = cvCreateImage(cvGetSize(frame),frame->depth,frame->nChannels);
	imageObjectRGB = cvCreateImage(cvGetSize(frame),frame->depth,frame->nChannels);
	imageFinale = cvCreateImage(cvGetSize(frame),frame->depth,frame->nChannels);
	
	storage = cvCreateMemStorage(0);
	
	for (;;)
	{
		// IMAGE
		frame = cvLoadImage(imageFile,CV_LOAD_IMAGE_COLOR);
		
		/*// CAM
		 frame = cvQueryFrame(capture);*/
		
		//cvSmooth(frame, imageFiltree, CV_BLUR,seuilFiltre,seuilFiltre,0.0,0.0);
		
		if (!frame)
			break;
		
		
		
		callback(0);
		cvCreateTrackbar("seuilFiltre", myWindow,&seuilFiltre, 11, callback);
		cvCreateTrackbar("H min", myWindow, &hmin,180, callback);
		cvCreateTrackbar("H max", myWindow, &hmax,180, callback);
		cvCreateTrackbar("S min", myWindow, &smin,255, callback);
		cvCreateTrackbar("S max", myWindow, &smax,255, callback);
		cvCreateTrackbar("V min", myWindow, &vmin,255, callback);
		cvCreateTrackbar("V max", myWindow, &vmax,255, callback);
		cvCreateTrackbar("nbDilatations", myWindow, &nbDilatations,10, callback);
		cvCreateTrackbar("nbErosions", myWindow, &nbErosions,10, callback);
		
		
		
		int delay = 10;
		k=cvWaitKey(delay);
		
		
		if(k=='s'){
			printf("sauvegarde\n");
			cvSaveImage("CaptureContours.jpg", frame,0);
			cvSaveImage("CapturePointDetecteHSV.jpg", imageObjectHSV,0);
			
		}
		if (k=='q'){
			printf("goodbye Kate\n");
			break;
		}
	}
	cvDestroyWindow(myWindowObjectHSV);
	cvDestroyWindow(myWindowObjectRGB);
	cvDestroyWindow(myWindow);
	//CAM cvReleaseCapture(&capture);
	cvReleaseImage(&imageFiltree);
	cvR 

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ cvDilate函数代码示例发布时间:2022-05-30
下一篇:
C++ cvDestroyAllWindows函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap