• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ cvNamedWindow函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中cvNamedWindow函数的典型用法代码示例。如果您正苦于以下问题:C++ cvNamedWindow函数的具体用法?C++ cvNamedWindow怎么用?C++ cvNamedWindow使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了cvNamedWindow函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: video_fb_init_preview


//.........这里部分代码省略.........
	/*
 	pscreen = SDL_SetVideoMode(fmt.fmt.pix.width, fmt.fmt.pix.height,0,SDL_VIDEO_Flags);
	overlay = SDL_CreateYUVOverlay(fmt.fmt.pix.width, fmt.fmt.pix.height,SDL_YUY2_OVERLAY,pscreen);
	p = (unsigned char *)overlay->pixels[0];
	drect.x = 0;
	drect.y = 0;
	drect.w = pscreen->w;
	drect.h = pscreen->h;
	*/

	//SDL 设置:RGB输出
	//pscreen = SDL_SetVideoMode(fmt.fmt.pix.width, fmt.fmt.pix.height, 24, SDL_SWSURFACE | SDL_DOUBLEBUF);
	rmask = 0x000000ff;
	gmask = 0x0000ff00;
	bmask = 0x00ff0000;
	amask = 0x00000000;
	bpp   = 24;
	pitch = fmt.fmt.pix.width*3;
	pixels_num = fmt.fmt.pix.width*fmt.fmt.pix.height*3;
	pixels = (unsigned char *)malloc(pixels_num);
	memset(pixels, 0, pixels_num);
	p_RGB = (unsigned char *)pixels;
	//pscreen_RGB = SDL_CreateRGBSurfaceFrom(pixels, fmt.fmt.pix.width, fmt.fmt.pix.height, bpp, pitch, rmask, gmask, bmask, amask);

	
	//lasttime = SDL_GetTicks();
	//affmutex = SDL_CreateMutex();
	//SDL 设置end
	
	//openCV 设置
	CvMemStorage*  storage = cvCreateMemStorage(0);
	IplImage*      img     = cvCreateImageHeader(cvSize(fmt.fmt.pix.width,fmt.fmt.pix.height), IPL_DEPTH_8U, 3);//image头,未开辟数据空间
	IplImage*      imggray = cvCreateImage(cvSize(fmt.fmt.pix.width,fmt.fmt.pix.height), IPL_DEPTH_8U, 1);//image,开辟数据空间
	cvNamedWindow("image", 1);

	unsigned char *pRGB = NULL;
	pRGB = (unsigned char *)calloc(1,fmt.fmt.pix.width*fmt.fmt.pix.height*3*sizeof(unsigned char));
	//openCV 设置 end

	//------------------------申请帧缓冲---------------------//
	struct v4l2_requestbuffers req;
	memset(&req, 0, sizeof (req));
	req.count = 3;	//缓存数量,即可保存的图片数量
	req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	//数据流类型,永远都是V4L2_BUF_TYPE_VIDEO_CAPTURE
	req.memory = V4L2_MEMORY_MMAP;	//存储类型:V4L2_MEMORY_MMAP或V4L2_MEMORY_USERPTR
	if (ioctl(video_fd, VIDIOC_REQBUFS, &req) == -1)//使配置生效
	{
		perror("request buffer error \n");
		return 2;
	}
	//-------------------------------------------------------//
	
	//--------将VIDIOC_REQBUFS获取内存转为物理空间-------------//
	buffers = calloc(req.count, sizeof(VideoBuffer));	
	//printf("sizeof(VideoBuffer) is %d\n", sizeof(VideoBuffer));
	struct v4l2_buffer buf;
	for (numBufs = 0; numBufs < req.count; numBufs++)
	{
		memset( &buf, 0, sizeof(buf));
		buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;	
		//存储类型:V4L2_MEMORY_MMAP(内存映射)或V4L2_MEMORY_USERPTR(用户指针)
		buf.memory = V4L2_MEMORY_MMAP;
		buf.index = numBufs;
		if (ioctl(video_fd, VIDIOC_QUERYBUF, &buf) < 0)//使配置生效
		{
			printf("VIDIOC_QUERYBUF error\n");
开发者ID:muzilike,项目名称:roubosys,代码行数:67,代码来源:litearry.c


示例2: getPegthresholdFromUser

	int getPegthresholdFromUser(IplImage *img, Gui *gui, string message, int pegThreshVal, Rect r, cv::Mat &fgMaskPeg)
	{
		cv::Mat element[1];
		int count = 0;

		element[0] = getStructuringElement(MORPH_ELLIPSE, Size(8, 8), Point(0, 0));
		
		window_name = gui->windowName();
		cvDestroyWindow(window_name.c_str());
		cvNamedWindow(window_name.c_str(), CV_WINDOW_AUTOSIZE);
		cvMoveWindow(window_name.c_str(), 100, 100);

		img0 = (IplImage *)cvClone(img);
		char TrackbarName[50];
		sprintf(TrackbarName, "thresh x %d", slider_max);

		slider_val = pegThreshVal;
		createTrackbar(TrackbarName, window_name, &slider_val, slider_max, 0);

		

		Mat src, im1, im3;
		src = Mat(img0);

		im1 = Mat::zeros(src.size(), src.type());
		cvtColor(src, im3, CV_BGR2HSV);
		vector<vector<Point> > pegsI;
		while (1)
		{
			pegsI.clear();
			Mat channel[3];
			split(im3, channel);


			//Mat fgMaskRing;
			inRange(channel[2], slider_val, 255, fgMaskPeg);
			// ROI
			for (int y = 0; y < fgMaskPeg.rows; y++)
			{
				for (int x = 0; x < fgMaskPeg.cols; x++)
				{
					if (!(x >= r.tl().x && x <= r.br().x && y >= r.tl().y && y <= r.br().y))
					{
						fgMaskPeg.at<uchar>(Point(x, y)) = 0;
					}
				}
			}
			erode(fgMaskPeg, fgMaskPeg, element[0]);
			dilate(fgMaskPeg, fgMaskPeg, element[0]);
			erode(fgMaskPeg, fgMaskPeg, element[0]);
			dilate(fgMaskPeg, fgMaskPeg, element[0]);

			//p.copyTo(p, fgMaskPeg);
			for (int y = 0; y < src.rows; y++)
			{
				for (int x = 0; x < src.cols; x++)
				{
					if (fgMaskPeg.at<uchar>(Point(x, y)))
					{
						im1.at<Vec3b>(Point(x, y)) = src.at<Vec3b>(Point(x, y));
					}
					else
					{
						im1.at<Vec3b>(Point(x, y)) = Vec3b(0,0,0);
					}
				}
			}

			Mat mask = fgMaskPeg.clone();
			vector<Vec4i> hierarchy_ring;

			//imshow("Initial mask", initial_ring_mask);
			findContours(mask, pegsI, hierarchy_ring, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
			count = pegsI.size();



			cout << "count Pegs->" << count << endl;
			cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5, 0, 1, 8);
			putText(im1, message.c_str(), cvPoint(0, 60), CV_FONT_HERSHEY_SIMPLEX, .7, Scalar(255, 255, 0), 1);

			imshow(window_name.c_str(), im1);
			char key = cvWaitKey(40);
			if ((key == '\r' || key == '\n' || key == '\r\n'))
			{
				if (count == 12)
				{
					break;
				}
			}
			count = 0;
		} 
		cvReleaseImage(&img0);
		return slider_val;
	}
开发者ID:brittybaby,项目名称:OpenTLD,代码行数:95,代码来源:Gui.cpp


示例3: printf

bool VisionPipeLine::init()
{
    // query the first frame
    _currentFrame = _input->retrieveFrame();
    if (!_currentFrame) return false;

    _currentframeSz.height = _currentFrame->height;
    _currentframeSz.width = _currentFrame->width;

    if (_currentframeSz.height != SRC_IMG_HEIGHT || _currentframeSz.width != SRC_IMG_WIDTH)
    {
        printf("Please set your camera to the resolution %dx%d\n", SRC_IMG_WIDTH, SRC_IMG_HEIGHT);
        return false;
    }

    std::string filepath_cam = getPlatformConfigPrefix();
    std::string filepath_distort = getPlatformConfigPrefix();
    filepath_cam+=FILEPATH_CAMERA_INTRINSICS;
    filepath_distort+=FILEPATH_CAMERA_DISTORT;
    try {
        
        if (!_undistortor.loadCalibrationfromFile(filepath_cam.c_str(), filepath_distort.c_str()))
        {
            // no user defined calib data found, load predefined
            filepath_cam = getPlatformResPrefix();
            filepath_distort = getPlatformResPrefix();

            filepath_cam+= FILEPATH_RESOURCE_PREDEFINE_FOLDER;
            filepath_distort+= FILEPATH_RESOURCE_PREDEFINE_FOLDER;

            filepath_cam+= RELEASE_VENDOR_TYPE;
            filepath_distort+= RELEASE_VENDOR_TYPE;

            filepath_cam+= FILEPATH_PREDEFINE_CAMERA_INTRINSICS;
            filepath_distort+= FILEPATH_PREDEFINE_CAMERA_DISTORT;
            if (!_undistortor.loadCalibrationfromFile(filepath_cam.c_str(), filepath_distort.c_str()))
            {
                printf("warning, no camera calibration file found.\n");
                _noLenCalibration = true;
            }
        }
    } 
    catch(...){
        printf("warning, unexpected error happens during loading the camera calibration files.\n");
        _noLenCalibration = true;
    }

    _grayFrame = cvCreateImage(cvSize(RECTIFIED_IMG_W,RECTIFIED_IMG_H), 8, 1);
    _rectified = cvCreateImage(cvSize(RECTIFIED_IMG_W,RECTIFIED_IMG_H), 8, 3);
    _projected = cvCreateImage(cvSize(PROJECTED_WIDTH,PROJECTED_HEIGHT), 8, 3);
    
    
    _final_disp = cvCreateImage(cvSize(PROJECTED_WIDTH + SRC_IMG_WIDTH, SRC_IMG_HEIGHT + KBD_IMG_HEIGHT ), 8, 3);

    //cvNamedWindow(WINDOWNAME_RECTIFIED);
    //cvNamedWindow(WINDOWNAME_ORIGINAL);
    //cvNamedWindow(WINDOWNAME_PROJECTED);
    cvNamedWindow(WINDOWNAME_STATUS);

    cvSetMouseCallback(WINDOWNAME_STATUS, &VisionPipeLine::s_onMouse, this);
   
    
    _ui_banner_frame.setImage(g_resouce_mgr.getImageRes("main_banner.png"));
    _ui_btn_keyboard_mode.setNormalImage(g_resouce_mgr.getImageRes("btn.keyboard.png"));
    _ui_btn_keyboard_mode.setActiveImage(g_resouce_mgr.getImageRes("btn.keyboard.active.png"));

    _ui_btn_touchpad_mode.setNormalImage(g_resouce_mgr.getImageRes("btn.pad.png"));
    _ui_btn_touchpad_mode.setActiveImage(g_resouce_mgr.getImageRes("btn.pad.active.png"));

    _ui_btn_calib_mode.setNormalImage(g_resouce_mgr.getImageRes("btn.calibration.png"));
    _ui_btn_calib_mode.setActiveImage(g_resouce_mgr.getImageRes("btn.calibration.active.png"));

    _ui_btn_keyboard_mode.moveTo((_final_disp->width-(_ui_btn_keyboard_mode.getWidth()+5)*3)/2,
        (_ui_banner_frame.getHeight() - _ui_btn_keyboard_mode.getHeight())/2);

    _ui_btn_touchpad_mode.moveTo(_ui_btn_keyboard_mode.getRight()+5, _ui_btn_keyboard_mode.getY());
    _ui_btn_calib_mode.moveTo(_ui_btn_touchpad_mode.getRight()+5, _ui_btn_keyboard_mode.getY());


    _ui_btn_rplogo.setNormalImage(g_resouce_mgr.getImageRes("logobtn.png"));
    _ui_btn_rplogo.setActiveImage(g_resouce_mgr.getImageRes("logobtn.active.png"));

    _ui_btn_upgrade.setNormalImage(g_resouce_mgr.getImageRes("btn.update.png"));
    _ui_btn_upgrade.setActiveImage(g_resouce_mgr.getImageRes("btn.update.active.png"));


    _ui_btn_rplogo.moveTo(_final_disp->width-_ui_btn_rplogo.getWidth(), 0);
    _ui_btn_upgrade.moveTo(0, 0);

    _ui_btn_upgrade.setVisible(false);

    _uicontainer.addRenderObject(&_ui_banner_frame);
    _uicontainer.addRenderObject(&_ui_btn_keyboard_mode);
    _uicontainer.addRenderObject(&_ui_btn_touchpad_mode);
    _uicontainer.addRenderObject(&_ui_btn_calib_mode);
    
    _uicontainer.addRenderObject(&_ui_btn_rplogo);
    _uicontainer.addRenderObject(&_ui_btn_upgrade);

    _ui_btn_keyboard_mode.setListener(&VisionPipeLine::s_on_keyboardmode, this);
//.........这里部分代码省略.........
开发者ID:ZChris,项目名称:laserkbd,代码行数:101,代码来源:cvpipeline.cpp


示例4: mainStaticMatchStrengths

int mainStaticMatchStrengths()
{
  bool matchGlobalOrientations = true;

  // Make images as Mats; convert to IplImage for OpenSURF library actions
  cv::Mat mimg1, mimg2;
  mimg1=cv::imread("OpenSURF/imgs/img1.jpg", CV_LOAD_IMAGE_COLOR);
  mimg2=cv::imread("OpenSURF/imgs/img2.jpg", CV_LOAD_IMAGE_COLOR);

  IplImage iimg1, iimg2;
  iimg1=mimg1;
  iimg2=mimg2;

  IplImage *img1, *img2;
  img1 = &iimg1;
  img2 = &iimg2;

  IpVec ipts1, ipts2;
  surfDetDes(img1,ipts1,false,4,4,2,0.0001f,matchGlobalOrientations);
  surfDetDes(img2,ipts2,false,4,4,2,0.0001f,matchGlobalOrientations);

  MatchVec matches;
  getMatchesSymmetric(ipts1,ipts2,matches);

  IpVec mpts1, mpts2;

  const int & w = img1->width;

  for (unsigned int i = 0; i < matches.size(); ++i)
  {
    float strengthOverThreshold = 1 - matches[i].second; // /MATCH_THRESHOLD;
    strengthOverThreshold*=255;
    CvScalar clr = cvScalar(strengthOverThreshold,strengthOverThreshold,strengthOverThreshold);
    clr = cvScalar(255,255,255);
    
    //drawPoint(img1,matches[i].first.first,clr);
    //drawPoint(img2,matches[i].first.second,clr),
    mpts1.push_back(matches[i].first.first);
    mpts2.push_back(matches[i].first.second);
  
    cvLine(img1,cvPoint(matches[i].first.first.x,matches[i].first.first.y),cvPoint(matches[i].first.second.x+w,matches[i].first.second.y), clr,1);
    cvLine(img2,cvPoint(matches[i].first.first.x-w,matches[i].first.first.y),cvPoint(matches[i].first.second.x,matches[i].first.second.y), clr,1);
  }

  drawIpoints(img1,mpts1);
  drawIpoints(img2,mpts2);

  std::cout<< "Matches: " << matches.size() << std::endl;

  cvNamedWindow("1", CV_WINDOW_AUTOSIZE );
  cvNamedWindow("2", CV_WINDOW_AUTOSIZE );
  cvShowImage("1", img1);
  cvShowImage("2",img2);
  cvWaitKey(0);

  // NOW DO IT AGAIN!
  cv::Mat mimg3, mimg4;
  mimg3=cv::imread("OpenSURF/imgs/img1.jpg", CV_LOAD_IMAGE_COLOR);
  mimg4=cv::imread("OpenSURF/imgs/img2.jpg", CV_LOAD_IMAGE_COLOR);

  IplImage iimg3, iimg4;
  iimg3=mimg3;
  iimg4=mimg4;

  IplImage *img3, *img4;
  img3 = &iimg3;
  img4 = &iimg4;

  IpVec ipts3, ipts4;
  surfDetDes(img3,ipts3,false,4,4,2,0.0001f,!matchGlobalOrientations);
  surfDetDes(img4,ipts4,false,4,4,2,0.0001f,!matchGlobalOrientations);

  matches.clear();
  getMatchesSymmetric(ipts3,ipts4,matches);

  IpVec mpts3, mpts4;

  for (unsigned int i = 0; i < matches.size(); ++i)
  {
    float strengthOverThreshold = 1 - matches[i].second; // /MATCH_THRESHOLD;
    strengthOverThreshold*=255;
    CvScalar clr = cvScalar(strengthOverThreshold,strengthOverThreshold,strengthOverThreshold);
    clr = cvScalar(255,255,255);
    
    //drawPoint(img1,matches[i].first.first,clr);
    //drawPoint(img2,matches[i].first.second,clr),
    mpts3.push_back(matches[i].first.first);
    mpts4.push_back(matches[i].first.second);
  
    cvLine(img3,cvPoint(matches[i].first.first.x,matches[i].first.first.y),cvPoint(matches[i].first.second.x+w,matches[i].first.second.y), clr,1);
    cvLine(img4,cvPoint(matches[i].first.first.x-w,matches[i].first.first.y),cvPoint(matches[i].first.second.x,matches[i].first.second.y), clr,1);
  }

  drawIpoints(img3,mpts3);
  drawIpoints(img4,mpts4);

  std::cout<< "Matches: " << matches.size() << std::endl;

  cvNamedWindow("3", CV_WINDOW_AUTOSIZE );
  cvNamedWindow("4", CV_WINDOW_AUTOSIZE );
//.........这里部分代码省略.........
开发者ID:izewiske,项目名称:3Hat,代码行数:101,代码来源:ContourSURF.cpp


示例5: main

int main( int argc, char** argv )
{
    CvSize imgSize;                 
    imgSize.width = 320; 
    imgSize.height = 240; 
	
	int key= -1; 
	
	// set up opencv capture objects

    CvCapture* capture= cvCaptureFromCAM(0); 
	cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 320);
	cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, 240);
	
    CvCapture* capture2= cvCaptureFromCAM(1); 
	cvSetCaptureProperty(capture2, CV_CAP_PROP_FRAME_WIDTH, 320);
	cvSetCaptureProperty(capture2, CV_CAP_PROP_FRAME_HEIGHT, 240);

    CvCapture* capture3= cvCaptureFromCAM(2); 
	cvSetCaptureProperty(capture3, CV_CAP_PROP_FRAME_WIDTH, 320);
	cvSetCaptureProperty(capture3, CV_CAP_PROP_FRAME_HEIGHT, 240);

    
	// allocate image storage (other createimage specifiers: IPL_DEPTH_32F, IPL_DEPTH_8U)
	
    IplImage* colourImage  = cvCloneImage(cvQueryFrame(capture)); 
    IplImage* greyImage    = cvCreateImage(cvGetSize(colourImage), IPL_DEPTH_8U, 1); 
    IplImage* hannImage    = cvCloneImage(greyImage); 
	IplImage *poc= cvCreateImage( cvSize( greyImage->width, kFFTStoreSize ), IPL_DEPTH_64F, 1 );
	IplImage *pocdisp= cvCreateImage( cvSize( greyImage->width, kFFTStoreSize ), IPL_DEPTH_8U, 1 );
	
	// set up opencv windows
	
    cvNamedWindow("hannImage", 1);
    cvNamedWindow("greyImage", 1); 
    cvNamedWindow("greyImage2", 1); 
    cvNamedWindow("greyImage3", 1); 
    cvNamedWindow("poc", 1);
	cvMoveWindow("greyImage", 40, 0);
	cvMoveWindow("hannImage", 40, 270);
	cvMoveWindow("poc", 365, 0);
	cvMoveWindow("greyImage2", 40, 540);
	cvMoveWindow("greyImage3", 365, 540);
	
	// set up storage for fftw
	
	fftw_complex *fftwSingleRow = ( fftw_complex* )fftw_malloc( sizeof( fftw_complex ) * kFFTWidth * 1 );
	fftw_complex *fftwSingleRow2 = ( fftw_complex* )fftw_malloc( sizeof( fftw_complex ) * kFFTWidth * 1 );
	fftw_complex *fftwStore = ( fftw_complex* )fftw_malloc( sizeof( fftw_complex ) * kFFTWidth * kFFTStoreSize );
		
	// loop
	
    while(key != 'q') 
	{ 

		//		double t = (double)cvGetTickCount();
		//		printf( "%g ms: start.\n", (cvGetTickCount() - t)/((double)cvGetTickFrequency()*1000.));

		// capture a frame, convert to greyscale, and show it
		
		cvCopyImage(cvQueryFrame(capture), colourImage);  // cvCopy because both are allocated already!
		cvCvtColor(colourImage,greyImage,CV_BGR2GRAY); 
		cvShowImage("greyImage",greyImage); 

        cvCopyImage(cvQueryFrame(capture2), colourImage);  // cvCopy because both are allocated already!
		cvCvtColor(colourImage,greyImage,CV_BGR2GRAY); 
		cvShowImage("greyImage2",greyImage); 

        cvCopyImage(cvQueryFrame(capture3), colourImage);  // cvCopy because both are allocated already!
		cvCvtColor(colourImage,greyImage,CV_BGR2GRAY); 
		cvShowImage("greyImage3",greyImage);

        
        key = cvWaitKey(3);

		// project and calculate hann window
		
		int i, j, k;
		uchar 	*inData= ( uchar* ) greyImage->imageData;
		uchar 	*hannImageData= ( uchar* ) hannImage->imageData;
		unsigned long acc;
		
		for( j = 0 ; j < greyImage->width ; j++) {
			
			// sum input column
			
			acc= 0;
			for( i = 0; i < greyImage->height ; i++ ) {
				acc+= inData[i * greyImage->widthStep + j];
			}
			
			// hann window and output
			
			for( i = 0; i < 240 ; i++ ) {
				double hannMultiplier = 0.5 * (1 - cos(2*3.14159*j/(greyImage->width-1)));  // hann window coefficient
				hannImageData[i * hannImage->widthStep + j]=  hannMultiplier * (acc/greyImage->height);
			}
			
		}

//.........这里部分代码省略.........
开发者ID:trevyn,项目名称:opencvtest4,代码行数:101,代码来源:main.cpp


示例6: main


//.........这里部分代码省略.........
				right1.y+=pt1.y;
				
				right2.x+=pt2.x;
				right2.y+=pt2.y;
			}
		}
	}

	// we've done the adding... now the dividing to get the "averaged" point
	left1.x/=numLeft;
	left1.y/=numLeft;
	left2.x/=numLeft;
	left2.y/=numLeft;

	right1.x/=numRight;
	right1.y/=numRight;
	right2.x/=numRight;
	right2.y/=numRight;

	top1.x/=numTop;
	top1.y/=numTop;
	top2.x/=numTop;
	top2.y/=numTop;

	bottom1.x/=numBottom;
	bottom1.y/=numBottom;
	bottom2.x/=numBottom;
	bottom2.y/=numBottom;

	// Render these lines onto the image
	cvLine(img, left1, left2, CV_RGB(255, 0,0), 1);
	cvLine(img, right1, right2, CV_RGB(255, 0,0), 1);
	cvLine(img, top1, top2, CV_RGB(255, 0,0), 1);
	cvLine(img, bottom1, bottom2, CV_RGB(255, 0,0), 1);

	// Next, we need to figure out the four intersection points
	double leftA = left2.y-left1.y;
	double leftB = left1.x-left2.x;
	double leftC = leftA*left1.x + leftB*left1.y;

	double rightA = right2.y-right1.y;
	double rightB = right1.x-right2.x;
	double rightC = rightA*right1.x + rightB*right1.y;

	double topA = top2.y-top1.y;
	double topB = top1.x-top2.x;
	double topC = topA*top1.x + topB*top1.y;

	double bottomA = bottom2.y-bottom1.y;
	double bottomB = bottom1.x-bottom2.x;
	double bottomC = bottomA*bottom1.x + bottomB*bottom1.y;

	// Intersection of left and top
	double detTopLeft = leftA*topB - leftB*topA;
	CvPoint ptTopLeft = cvPoint((topB*leftC - leftB*topC)/detTopLeft, (leftA*topC - topA*leftC)/detTopLeft);

	// Intersection of top and right
	double detTopRight = rightA*topB - rightB*topA;
	CvPoint ptTopRight = cvPoint((topB*rightC-rightB*topC)/detTopRight, (rightA*topC-topA*rightC)/detTopRight);

	// Intersection of right and bottom
	double detBottomRight = rightA*bottomB - rightB*bottomA;
	CvPoint ptBottomRight = cvPoint((bottomB*rightC-rightB*bottomC)/detBottomRight, (rightA*bottomC-bottomA*rightC)/detBottomRight);

	// Intersection of bottom and left
	double detBottomLeft = leftA*bottomB-leftB*bottomA;
	CvPoint ptBottomLeft = cvPoint((bottomB*leftC-leftB*bottomC)/detBottomLeft, (leftA*bottomC-bottomA*leftC)/detBottomLeft);

	// Render the points onto the image
	cvLine(img, ptTopLeft, ptTopLeft, CV_RGB(0,255,0), 5);
	cvLine(img, ptTopRight, ptTopRight, CV_RGB(0,255,0), 5);
	cvLine(img, ptBottomRight, ptBottomRight, CV_RGB(0,255,0), 5);
	cvLine(img, ptBottomLeft, ptBottomLeft, CV_RGB(0,255,0), 5);

	// Initialize a mask
	IplImage* imgMask = cvCreateImage(imgSize, 8, 3);
	cvZero(imgMask);

	// Generate the mask
	CvPoint* pts = new CvPoint[4];
	pts[0] = ptTopLeft;
	pts[1] = ptTopRight;
	pts[2] = ptBottomRight;
	pts[3] = ptBottomLeft;
	cvFillConvexPoly(imgMask, pts, 4, cvScalar(255,255,255));

	// Delete anything thats outside the mask
	cvAnd(img, imgMask, img);

	// Show all images in windows
	cvNamedWindow("Original");
	cvNamedWindow("Detected");

	cvShowImage("Original", img);
	cvShowImage("Detected", detected);

	cvWaitKey(0);

	return 0;
}
开发者ID:aishack,项目名称:robotics-arena-crop,代码行数:101,代码来源:ArenaCrop.cpp


示例7: main

/**
 * @brief Main principal
 * @param argc El número de argumentos del programa
 * @param argv Cadenas de argumentos del programa
 * @return Nada si es correcto o algún número negativo si es incorrecto
 */
int main( int argc, char** argv ) {
	
	if( argc < 4 )
		return -1;

	// Declaración de variables
	gsl_rng *rng;
	IplImage *frame, *hsv_frame;
	histogram **ref_histos, *histo_aux;
	CvCapture *video;
	particle **particles, **aux, **nuevas_particulas;
	CvScalar color_rojo = CV_RGB(255,0,0), color_azul = CV_RGB(0,0,255);
	CvRect *regions;
	int num_objects = 0;
	int i = 1, MAX_OBJECTS = atoi(argv[3]), PARTICLES = atoi(argv[2]);
	FILE *datos;
	char name[45], num[3], *p1, *p2;
	clock_t t_ini, t_fin;
	double ms;
	
	video = cvCaptureFromFile( argv[1] );
	if( !video ) {
		printf("No se pudo abrir el fichero de video %s\n", argv[1]);
		exit(-1);
	}

	first_frame = cvQueryFrame( video );
	num_objects = get_regions( &regions,  MAX_OBJECTS, argv[1] );
	if( num_objects == 0 )
		exit(-1);

	t_ini = clock();
	hsv_frame = bgr2hsv( first_frame );
	histo_aux = (histogram*) malloc( sizeof(histogram) );
	histo_aux->n = NH*NS + NV;
	nuevas_particulas = (particle**) malloc( num_objects * sizeof( particle* ) );
	for( int j = 0; j < num_objects; ++j )
		nuevas_particulas[j] = (particle*) malloc( PARTICLES * sizeof( particle ) );
			
	// Computamos los histogramas de referencia y distribuimos las partículas iniciales
	ref_histos = compute_ref_histos( hsv_frame, regions, num_objects );
	particles = init_distribution( regions, num_objects, PARTICLES );

	// Mostramos el tracking
	if( show_tracking ) {

		// Mostramos todas las partículas
		if( show_all )
			for( int k = 0; k < num_objects; ++k )
				for( int j = 0; j < PARTICLES; ++j )
					display_particle( first_frame, particles[k][j], color_azul );

		// Dibujamos la partícula más prometedora de cada objeto
		for( int k = 0; k < num_objects; ++k )
			display_particle( first_frame, particles[k][0], color_rojo );

		cvNamedWindow( "Video", 1 );
		cvShowImage( "Video", first_frame );
		cvWaitKey( 5 );
	}

	// Exportamos los histogramas de referencia y los frames
	if( exportar ) {
		export_ref_histos( ref_histos, num_objects );
		export_frame( first_frame, 1 );

		for( int k = 0; k < num_objects; ++k ) {
			sprintf( num, "%02d", k );
			strcpy( name, REGION_BASE);
			p1 = strrchr( argv[1], '/' );
			p2 = strrchr( argv[1], '.' );
			strncat( name, (++p1), p2-p1 );
			strcat( name, num );
			strcat( name, ".txt" );
			datos = fopen( name, "a+" );
			if( ! datos ) {
				printf("Error creando fichero para datos\n");
				return -1;
			}
			fprintf( datos, "%d\t%f\t%f\n", 0, particles[k][0].x, particles[k][0].y );
			fclose( datos );
		}
	}

	cvReleaseImage( &hsv_frame );
	
	// Inicializamos el generador de números aleatorios
	gsl_rng_env_setup();
	rng = gsl_rng_alloc( gsl_rng_mt19937 );
	gsl_rng_set(rng, (unsigned long) time(NULL));

	// Recordar que frame no se puede liberar debido al cvQueryFrame
	while( frame = cvQueryFrame( video ) ) {
		hsv_frame = bgr2hsv( frame );
//.........这里部分代码省略.........
开发者ID:rotty11,项目名称:Tfm,代码行数:101,代码来源:trackerCopias.cpp


示例8: demo

void demo(char *cfgfile, char *weightfile, float thresh, int cam_index, const char *filename, char **names, int classes, int frame_skip, char *prefix, float hier, int w, int h, int frames, int fullscreen)
{
    //skip = frame_skip;
    image **alphabet = load_alphabet();
    int delay = frame_skip;
    demo_names = names;
    demo_alphabet = alphabet;
    demo_classes = classes;
    demo_thresh = thresh;
    demo_hier = hier;
    printf("Demo\n");
    net = parse_network_cfg(cfgfile);
    if(weightfile){
        load_weights(&net, weightfile);
    }
    set_batch_network(&net, 1);

    srand(2222222);

    if(filename){
        printf("video file: %s\n", filename);
        cap = cvCaptureFromFile(filename);
    }else{
        cap = cvCaptureFromCAM(cam_index);

        if(w){
            cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
        }
        if(h){
            cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
        }
        if(frames){
            cvSetCaptureProperty(cap, CV_CAP_PROP_FPS, frames);
        }
    }

    if(!cap) error("Couldn't connect to webcam.\n");

    layer l = net.layers[net.n-1];
    int j;

    avg = (float *) calloc(l.outputs, sizeof(float));
    for(j = 0; j < FRAMES; ++j) predictions[j] = (float *) calloc(l.outputs, sizeof(float));
    for(j = 0; j < FRAMES; ++j) images[j] = make_image(1,1,3);

    boxes = (box *)calloc(l.w*l.h*l.n, sizeof(box));
    probs = (float **)calloc(l.w*l.h*l.n, sizeof(float *));
    for(j = 0; j < l.w*l.h*l.n; ++j) probs[j] = (float *)calloc(l.classes, sizeof(float));

    pthread_t fetch_thread;
    pthread_t detect_thread;

    fetch_in_thread(0);
    det = in;
    det_s = in_s;

    fetch_in_thread(0);
    detect_in_thread(0);
    disp = det;
    det = in;
    det_s = in_s;

    for(j = 0; j < FRAMES/2; ++j){
        fetch_in_thread(0);
        detect_in_thread(0);
        disp = det;
        det = in;
        det_s = in_s;
    }

    int count = 0;
    if(!prefix){
        cvNamedWindow("Demo", CV_WINDOW_NORMAL); 
        if(fullscreen){
            cvSetWindowProperty("Demo", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
        } else {
            cvMoveWindow("Demo", 0, 0);
            cvResizeWindow("Demo", 1352, 1013);
        }
    }

    double before = get_wall_time();

    while(1){
        ++count;
        if(1){
            if(pthread_create(&fetch_thread, 0, fetch_in_thread, 0)) error("Thread creation failed");
            if(pthread_create(&detect_thread, 0, detect_in_thread, 0)) error("Thread creation failed");

            if(!prefix){
                show_image(disp, "Demo");
                int c = cvWaitKey(1);
		if (c != -1) c = c%256;
                if (c == 10){
                    if(frame_skip == 0) frame_skip = 60;
                    else if(frame_skip == 4) frame_skip = 0;
                    else if(frame_skip == 60) frame_skip = 4;   
                    else frame_skip = 0;
                } else if (c == 27) {
                    return;
//.........这里部分代码省略.........
开发者ID:dagoliveira,项目名称:radiation-benchmarks,代码行数:101,代码来源:demo.c


示例9: cvCreateImage

////////////////////////////////////////////////////////////////////////////////////	
//以彩色图像显示每一尺度的张量信息
////////////////////////////////////////////////////////////////////////////////////
void Tensor::ShowTensorByColorImage()
{
	double ret_minr=0.0;
	double ret_maxr=0.0;
	double ret_ming=0.0;
	double ret_maxg=0.0;
	double ret_minb=0.0;
	double ret_maxb=0.0;
	int x,y,i;
	//纹理特征
	IplImage **pImg= new IplImage *[m_levels];
	for (i = 0;i < m_levels;i++)
	{
		pImg[i] = cvCreateImage( cvGetSize(m_img), m_img->depth, 3);
		cvZero(pImg[i]);
	}

	CString * ptitle=new CString [m_levels];

	for (i=0;i<m_levels;i++)
	{
		//找到每幅图像颜色通道的上限与下限值
		for (y=0; y<m_h;y++)
		{
			for (x=0;x<m_w;x++)
			{
				if((*m_pImageTensorRGB[i])(x,y).r>ret_maxr)
				{
					ret_maxr=(*m_pImageTensorRGB[i])(x,y).r;
				}
				if ((*m_pImageTensorRGB[i])(x,y).r<ret_minr)
				{
					ret_minr=(*m_pImageTensorRGB[i])(x,y).r;
				}

				if((*m_pImageTensorRGB[i])(x,y).g>ret_maxg)
				{
					ret_maxg=(*m_pImageTensorRGB[i])(x,y).g;
				}
				if ((*m_pImageTensorRGB[i])(x,y).g<ret_ming)
				{
					ret_ming=(*m_pImageTensorRGB[i])(x,y).g;
				}

				if((*m_pImageTensorRGB[i])(x,y).b>ret_maxb)
				{
					ret_maxb=(*m_pImageTensorRGB[i])(x,y).b;
				}
				if ((*m_pImageTensorRGB[i])(x,y).b<ret_minb)
				{
					ret_minb=(*m_pImageTensorRGB[i])(x,y).b;
				}

			}
		}
		uchar * dst=(uchar *)pImg[i]->imageData;
		for (y=0; y<m_h;y++)
		{
			for (x=0;x<m_w;x++)
			{
				int temp=y*(pImg[i]->widthStep)+3*x;
				dst[temp+2]=(uchar)(((*m_pImageTensorRGB[i])(x,y).r-ret_minr)/(ret_maxr-ret_minr)*256);
				dst[temp+1]=(uchar)(((*m_pImageTensorRGB[i])(x,y).g-ret_ming)/(ret_maxg-ret_ming)*256);
				dst[temp+0]=(uchar)(((*m_pImageTensorRGB[i])(x,y).b-ret_minb)/(ret_maxb-ret_minb)*256);
			}
		}
		ptitle[i].Format(_T("Image Texture of Level %d"),i);
		cvNamedWindow((char *)(LPCTSTR)ptitle[i],CV_WINDOW_AUTOSIZE);
		cvShowImage((char *)(LPCTSTR)ptitle[i],pImg[i]);
	}
	if (pImg != NULL)
	{
		for (i=0;i<m_levels;i++)
		{
			cvReleaseImage(&pImg[i]);
		}
		delete [] pImg;
	}
}
开发者ID:xwlaina,项目名称:GrabCut,代码行数:82,代码来源:Tensor.cpp


示例10: _tmain

int _tmain(int argc, _TCHAR* argv[])
{
	location bot, target;
	bot.x = 300;		bot.y = 20;		bot.theta = 90.0;
	target.x = 300;	target.y = 450;	target.theta = 90.000;

	list *ol = NULL, *cl = NULL;
	elem e,vare;
	e.l = bot;	e.g = 0;	e.h = 0;	e.id = UNDEFINED;

	int n = 13;
	elem* np = loadPosData(n);
	
	while(1)
	{
		cl = append(cl, e);
		//printList(cl);
		if(isNear(e.l, target))
			break;
		ol = update(ol, e, target, np, n);
		//printList(ol);
		e = findMin(ol);
		printf("Min: (%.3f, %.3f, %.3f)\n", e.l.x, e.l.y, e.l.theta);
		ol = detach(ol, e);
		//printList(ol);
		//getchar();
	}
	//getchar();
	cvNamedWindow("hello",CV_WINDOW_AUTOSIZE);
	IplImage *img = cvCreateImage(cvSize(500, 500), IPL_DEPTH_8U, 3);
	cvCircle(img, cvPoint(300, 500-300), 45, CV_RGB(0, 15, 200), 1, CV_AA, 0);

	//list *t = cl;
	//while(t)
	//{
	//	cvLine(img,cvPoint(t->p.parent.x*40,500-(t->p.parent.y*40)),cvPoint(t->p.parent.x*40+2,500-(t->p.parent.y*40)-2),CV_RGB(255,255,0),2,CV_AA,0);
	//	//printf("(%.3f, %.3f) ", t->p.l.x, t->p.l.y);
	//	t=t->next;
	//}
	CvPoint a = cvPoint(target.x, 500 - (target.y));
	CvPoint b = cvPoint((target.x + 10*cos(target.theta*(CV_PI/180))), 500 - ((target.y+10*sin(target.theta*(CV_PI/180)))));
	cvLine(img, a, b, CV_RGB(0,255,0), 2, CV_AA, 0);

	a = cvPoint(bot.x, 500 - (bot.y));
	b = cvPoint((bot.x + 10*cos(bot.theta*(CV_PI/180))), 500 - ((bot.y+10*sin(bot.theta*(CV_PI/180)))));
	cvLine(img, a, b, CV_RGB(0,0,255), 2, CV_AA, 0);

	vare = e;
	a = cvPoint(vare.l.x, 500 - (vare.l.y));
	b = cvPoint((vare.l.x + 10*cos(vare.l.theta*(CV_PI/180))), 500 - ((vare.l.y+10*sin(vare.l.theta*(CV_PI/180)))));
	cvLine(img, a, b, CV_RGB(255,0,0), 2, CV_AA, 0);
	
	printf("(%.3f, %.3f, %.3f) : %d\n", vare.l.x, vare.l.y, vare.l.theta, vare.id);
	while(!((abs(vare.l.x-bot.x) < 1.25) && (abs(vare.l.y-bot.y) < 1.25)))
	{
		vare=searchforcoor(cl,vare.parent.x,vare.parent.y);
		if(vare.id != -1)
		{
			printf("(%.3f, %.3f, %.3f) : %d\n", vare.l.x, vare.l.y, vare.l.theta, vare.id);
			a = cvPoint(vare.l.x, 500 - (vare.l.y));
			b = cvPoint((vare.l.x + 10*cos(vare.l.theta*(CV_PI/180))), 500 - ((vare.l.y+10*sin(vare.l.theta*(CV_PI/180)))));
			cvLine(img, a, b, CV_RGB(255,0,0), 2, CV_AA, 0);
		}
	}

	cvShowImage("hello",img);
	cvWaitKey(0);
}
开发者ID:bhuvnesh-agarwal,项目名称:IGVC-2012,代码行数:68,代码来源:CStar.cpp


示例11: surf_match

void surf_match(IplImage* object_color, IplImage* object, IplImage* image,const CvSeq *objectKeypoints,const CvSeq *imageKeypoints,const CvSeq * objectDescriptors,const CvSeq * imageDescriptors, CvPoint val[4])
{
    cvNamedWindow("Object", 0);
    cvNamedWindow("Object Correspond", 0);

    static CvScalar colors[] = 
    {
        {{0,0,255}},
        {{0,128,255}},
        {{0,255,255}},
        {{0,255,0}},
        {{255,128,0}},
        {{255,255,0}},
        {{255,0,0}},
        {{255,0,255}},
        {{255,255,255}}
    };

    int i;

	CvPoint src_corners[4] = {{0,0}, {object->width,0}, {object->width, object->height}, {0, object->height}};
    CvPoint dst_corners[4];
    IplImage* correspond = cvCreateImage( cvSize(image->width, object->height+image->height), 8, 1 );
    cvSetImageROI( correspond, cvRect( 0, 0, object->width, object->height ) );
    cvCopy( object, correspond );
    cvSetImageROI( correspond, cvRect( 0, object->height, correspond->width, correspond->height ) );
    cvCopy( image, correspond );
    cvResetImageROI( correspond );

#ifdef USE_FLANN
    printf("Using approximate nearest neighbor search\n");
#endif

    if( locatePlanarObject( objectKeypoints, objectDescriptors, imageKeypoints,
        imageDescriptors, src_corners, dst_corners ))
    {
        for( i = 0; i < 4; i++ )
        {
            CvPoint r1 = dst_corners[i%4];
            CvPoint r2 = dst_corners[(i+1)%4];
            cvLine( correspond, cvPoint(r1.x, r1.y+object->height ),
                cvPoint(r2.x, r2.y+object->height ), colors[8] );
        }
    }
    vector<int> ptpairs;
#ifdef USE_FLANN
    flannFindPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );
#else
    findPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );
#endif
    for( i = 0; i < (int)ptpairs.size(); i += 2 )
    {
        CvSURFPoint* r1 = (CvSURFPoint*)cvGetSeqElem( objectKeypoints, ptpairs[i] );
        CvSURFPoint* r2 = (CvSURFPoint*)cvGetSeqElem( imageKeypoints, ptpairs[i+1] );
        cvLine( correspond, cvPointFrom32f(r1->pt),
            cvPoint(cvRound(r2->pt.x), cvRound(r2->pt.y+object->height)), colors[8] );
    }

    cvShowImage( "Object Correspond", correspond );
    for( i = 0; i < objectKeypoints->total; i++ )
    {
        CvSURFPoint* r = (CvSURFPoint*)cvGetSeqElem( objectKeypoints, i );
        CvPoint center;
        int radius;
        center.x = cvRound(r->pt.x);
        center.y = cvRound(r->pt.y);
        radius = cvRound(r->size*1.2/9.*2);
        cvCircle( object_color, center, radius, colors[0], 1, 8, 0 );
    }
    cvShowImage( "Object", object_color );

    cvWaitKey(0);

    cvDestroyWindow("Object");
    cvDestroyWindow("Object SURF");
    cvDestroyWindow("Object Correspond");

	//CvPoint val[4];
	for(int k=0;k<4;k++)
	{
//		printf("%d %d \n", dst_corners[k].x, dst_corners[k].y);
		val[k] = dst_corners[k]; 
		val[k] = dst_corners[k]; 
	}

}
开发者ID:gatsoulis,项目名称:cappocacciaactivevision,代码行数:86,代码来源:SURF.cpp


示例12: main

int main()
{
	// Initialize capturing live feed from the camera
	CvCapture* capture = 0;
	capture = cvCaptureFromCAM(1);	 //depending on from which camera you are Capturing
	// Couldn't get a device? Throw an error and quit
	if(!capture)
    {
        printf("Could not initialize capturing...\n");
        return -1;
    }

	// The two windows we'll be using
    cvNamedWindow("video");
	cvNamedWindow("thresh");

	// This image holds the "scribble" data...
	// the tracked positions of the ball
	IplImage* imgScribble = NULL;

	// An infinite loop
	while(true)
    {
		// Will hold a frame captured from the camera
		IplImage* frame = 0;
		frame = cvQueryFrame(capture);

		// If we couldn't grab a frame... quit
        if(!frame)
            break;
		
		// If this is the first frame, we need to initialize it
		if(imgScribble == NULL)
		{
			imgScribble = cvCreateImage(cvGetSize(frame), 8, 3);
		}

		// Holds the yellow thresholded image (yellow = white, rest = black)
		IplImage* imgYellowThresh = GetThresholdedImage(frame);

		// Calculate the moments to estimate the position of the ball
		CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments));
		cvMoments(imgYellowThresh, moments, 1);

		// The actual moment values
		double moment10 = cvGetSpatialMoment(moments, 1, 0);
		double moment01 = cvGetSpatialMoment(moments, 0, 1);
		double area = cvGetCentralMoment(moments, 0, 0);

		// Holding the last and current ball positions
		static int posX = 0;
		static int posY = 0;

		int lastX = posX;
		int lastY = posY;

		posX = moment10/area;
		posY = moment01/area;

		// Print it out for debugging purposes
		printf("position (%d,%d)\n", posX, posY);

		// We want to draw a line only if its a valid position
		if(lastX>0 && lastY>0 && posX>0 && posY>0)
		{
			// Draw a yellow line from the previous point to the current point
			cvLine(imgScribble, cvPoint(posX, posY), cvPoint(lastX, lastY), cvScalar(0,255,255), 5);
		}

		// Add the scribbling image and the frame... and we get a combination of the two
		cvAdd(frame, imgScribble, frame);
		cvShowImage("thresh", imgYellowThresh);
		cvShowImage("video", frame);

		// Wait for a keypress
		int c = cvWaitKey(10);
		if(c!=-1)
		{
			// If pressed, break out of the loop
            break;
		}

		// Release the thresholded image... we need no memory leaks.. please
		cvReleaseImage(&imgYellowThresh);

		delete moments;
    }

	// We're done using the camera. Other applications can now use it
	cvReleaseCapture(&capture);
    return 0;
}
开发者ID:dirtydevil,项目名称:gesto-Paint,代码行数:92,代码来源:gestoPaint.cpp


示例13: findFirstChar

void basicOCR::printCvSeq(CvSeq* seq, IplImage* imgSrc, IplImage* img_gray, CvMemStorage* storage)
{
	CvSeq* si = seq;
	CvRect rcFirst = findFirstChar(seq, 0);
	if (rcFirst.x == 0)
	{
		printf("No words found...\n");
		return;
	}
	else
		printf("\nOCR of text:\n");
	CvRect rcNewFirst = rcFirst;
	cvDrawRect(imgSrc, cvPoint(rcFirst.x, rcFirst.y), cvPoint(rcFirst.x + rcFirst.width, rcFirst.y + rcFirst.height), CV_RGB(0, 0, 0));
	int printX = rcFirst.x - 1;
	int printY = rcFirst.y - 1;

	int idx = 0;
	char szName[56] = {0};
	int tempCount=0;

	while (true)
	{
		CvRect rc = findPrintRect(seq, printX, printY, rcFirst);
		cvDrawRect(imgSrc, cvPoint(rc.x, rc.y), cvPoint(rc.x + rc.width, rc.y + rc.height), CV_RGB(0, 0, 0));
		// dealing with useless Part
		/*if (rc.width <= 1 && rc.height <= 1)
		{
		continue;
		}*/

		if (printX < rc.x)
		{
			if ((rc.x - printX) >= (rcFirst.width / 2))
				printf(" ");
			printX = rc.x;
			//cvDrawRect(imgSrc, cvPoint(rc.x, rc.y), cvPoint(rc.x + rc.width, rc.y + rc.height), CV_RGB(255, 0, 0));
			IplImage* imgNo = cvCreateImage(cvSize(rc.width, rc.height), IPL_DEPTH_8U, 3);
			cvSetImageROI(imgSrc, rc);
			cvCopyImage(imgSrc, imgNo);
			cvResetImageROI(imgSrc);
			sprintf(szName, "wnd_%d", idx++);
			// show splited picture or not
			cvNamedWindow(szName);
			cvShowImage(szName, imgNo);
			IplImage* imgDst = cvCreateImage(cvSize(rc.width, rc.height),IPL_DEPTH_8U,1);
			cvCvtColor(imgNo, imgDst, CV_RGB2GRAY);
			printf("%c", (char)classify(imgDst, 0));
			cvReleaseImage(&imgNo);
		}
		else if (printX == rc.x && printX < imgSrc->width)
		{
			printX += rc.width;
		}
		else
		{
			printf("\n");
			printY = rcNewFirst.y + rcNewFirst.height;
			rcNewFirst = findFirstChar(seq, printY);
			if (rcNewFirst.x == 0)
				break;
			cvDrawRect(imgSrc, cvPoint(rcNewFirst.x, rcNewFirst.y), cvPoint(rcNewFirst.x + rcNewFirst.width, rcNewFirst.y + rcNewFirst.height), CV_RGB(0, 0, 0));
			printX = rcNewFirst.x - 1;
			printY = rcNewFirst.y - 1;
		}
	}
	cvNamedWindow("src");
	cvShowImage("src", imgSrc);
	cvWaitKey(0);
	cvReleaseMemStorage(&storage);
	cvReleaseImage(&imgSrc);
	cvReleaseImage(&img_gray);
	cvDestroyAllWindows();

}
开发者ID:thunder176,项目名称:OCR_ASCII_Machine_Learning,代码行数:74,代码来源:basicOCR.cpp


示例14: main

int main(int argc, char **argv)
{
  //   void *p;

  //   ((int *)p)[5];
  //   (int *)p[5];

  //   argc--, argv++;
  //   if (argc > 0) { distFileName = *argv; argc--, argv++; }
  //   if (argc > 1) { argc--, argv++; distFileName = *argv; }
  //   if (argc > 1) { argc--, argv++; distFileName = *argv; }
  //   if (!distFileName) usage();
  
  std::vector <std::vector <int> > dists;
  int zlow = INT_MAX, zhigh = 0;
  int face_row = 0, face_col = 0;
  char from_stdin = (argc == 1) ? 1 : 0;
  char *distFileName;
  std::ifstream file;
  const int WIDTH = 176, HEIGHT = 144;

  if (argc > 2) {
    distFileName = argv[1];
    file.open(distFileName, std::ifstream::in);
  }

  while (1) {
    std::string line;

    if (from_stdin) {
      char cline[100];
      char *res = fgets(cline, 100, stdin);
      if (!res) break;
      line = cline;
    } else {
      if (file.eof()) break;
      getline(file, line);
    }

    if (line[0] == '#') {
      sscanf(line.c_str(), "#face:%d\t%d\n", &face_row, &face_col);
    } else {
      std::vector <int> vals = split(line, "\t");
      if (!vals.empty() && vals[2] != -1) {
        dists.push_back(vals);
        zlow = std::min(zlow, vals[2]);
        zhigh = std::max(zhigh, vals[2]);
      }
    }
  }

  if (argc > 1 && argv[2]) face_row = atoi(argv[2]);
  if (argc > 2 && argv[3]) face_col = atoi(argv[3]);
  if (face_row < 0) face_row = 0;
  if (face_col < 0) face_col = 0;

  IplImage * img = cvCreateImage(cvSize(WIDTH, HEIGHT), IPL_DEPTH_16U, 1);
  int imgDepth = 16;
  int maxNum = (int)pow((double)2, (double)imgDepth);
  double ratio = (double)maxNum/(double)(zhigh - zlow);
  cvSetZero(img);

  for (int i=0; i<(int)dists.size(); i++)
    {
      int z = dists[i][2];
      z -= zlow;
      z = (int)(z*ratio);
      cvSet2D(img, dists[i][0], dists[i][1], cvScalarAll(z));
    }

  cvCircle(img, cvPoint(face_row, face_col), 5, CV_RGB(maxNum, maxNum, maxNum));

  cvNamedWindow("result", CV_WINDOW_AUTOSIZE);
  cvShowImage("result", img);
  cvWaitKey(0);
  cvReleaseImage(&img);
  cvDestroyWindow("result");

  return 0;
}
开发者ID:cou929,项目名称:Misc-stuffs,代码行数:80,代码来源:visualize.cpp


示例15: main

int main(int argc, const char **argv)
{
    //Variables
    int degrees,PosRelX,PosRelY;
    float radians,Dlaser,ODM_ang, ang;
    int width = 500, height = 500; //Create the size of the map here (in pixel)
    int centroX = (width / 2);
    int centroY = (height / 2);
    playerc_client_t *client;
    playerc_laser_t *laser;
    playerc_position2d_t *position2d;
    CvPoint pt,pt1,pt2;
    CvScalar cinzaE,preto,cinzaC;
    char window_name[] = "Map";

    IplImage* image = cvCreateImage( cvSize(width,height), 8, 3 );
    cvNamedWindow(window_name, 1 );
    preto = CV_RGB(0, 0, 0);        //for indicating obstacles
    cinzaE = CV_RGB(92, 92, 92);    //To indicate the stranger
    cinzaC = CV_RGB(150, 150, 150); //To indicate free spaces

    client = playerc_client_create(NULL, "localhost", 6665);
    if (playerc_client_connect(client) != 0)
    return -1;

    laser = playerc_laser_create(client, 0);
    if (playerc_laser_subscribe(laser, PLAYERC_OPEN_MODE))
    return -1;

    position2d = playerc_position2d_create(client, 0);
    if (playerc_position2d_subscribe(position2d, PLAYERC_OPEN_MODE) != 0) {
        fprintf(stderr, "error: %s\n", playerc_error_str());
        return -1;
    }

    if (playerc_client_datamode (client, PLAYERC_DATAMODE_PULL) != 0) {
        fprintf(stderr, "error: %s\n", playerc_error_str());
        return -1;
    }

    if (playerc_client_set_replace_rule (client, -1, -1, PLAYER_MSGTYPE_DATA, -1, 1) != 0) {
        fprintf(stderr, "error: %s\n", playerc_error_str());
        return -1;
    }

    playerc_position2d_enable(position2d, 1);  // initialise motors
    playerc_position2d_set_odom(position2d, 0, 0, 0);  // Set odometer to zero

    cvSet(image, cinzaE,0); //set the image colour to dark
    pt.x = centroX;  // Zero coordinate for x
    pt.y = centroY;  // Zero coordinate for y


    while(1) {
        playerc_client_read(client);
        cvSaveImage("mapa.jpg",image);
        playerc_client_read(client);

        for (degrees = 2; degrees <= 360; degrees+=2) {
            Dlaser = laser->scan[degrees][0];
            if (Dlaser < 8) {
                radians = graus2rad (degrees/2);      //Convert the angle  

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ cvOpenFileStorage函数代码示例发布时间:2022-05-30
下一篇:
C++ cvMoveWindow函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap