• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ cv::CascadeClassifier类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中cv::CascadeClassifier的典型用法代码示例。如果您正苦于以下问题:C++ CascadeClassifier类的具体用法?C++ CascadeClassifier怎么用?C++ CascadeClassifier使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了CascadeClassifier类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: EyeSearchRect

namespace stasm
{
static cv::CascadeClassifier leye_det_g;  // left eye detector
static cv::CascadeClassifier reye_det_g;  // right eye detector
static cv::CascadeClassifier mouth_det_g; // mouth detector

//-----------------------------------------------------------------------------

// Return the region of the face we search for the left or right eye.
// Return rect of width=0 if eye must not be searched for (outer eyes in side views).
// We reduce false positives and save time by searching in only part of the face.
// The entire eye box must fall in this region, not just the center of the eye.
// The magic numbers below were found empirically to give good
// results in informal tests.  They reduce the number of false positives
// in the forehead, eyebrows, nostrils, and mouth.

static Rect EyeSearchRect(
    EYAW        eyaw,         // in
    const Rect& facerect,     // in
    const bool  is_right_eye) // in: true for right eye, false for left eye
{
    Rect rect = facerect;
    int width = facerect.width;
    switch (eyaw)
    {
        case EYAW00:                        // frontal model
            if (is_right_eye)
                rect.x += width / 3; // don't search left third of face
            rect.width -= width / 3; // or right third
            rect.height = cvRound(.6 * facerect.height); // don't search lower part of face
            break;
        case EYAW_22:                       // left facing three-quarter model
            if (is_right_eye)               // inner eye
            {
                rect.x += cvRound(.4 * width);
                rect.width = cvRound(.5 * width);
            }
            else                            // outer eye
            {
                rect.x += cvRound(.1 * width);
                rect.width = cvRound(.5 * width);
            }
            rect.height = cvRound(.5 * facerect.height);
            break;
        case EYAW22:                        // right facing three-quarter model
            if (is_right_eye)               // outer eye
            {
                rect.x += cvRound(.4 * width);
                rect.width = cvRound(.5 * width);
            }
            else                            // inner eye
            {
                rect.x += cvRound(.1 * width);
                rect.width = cvRound(.5 * width);
            }
            rect.height = cvRound(.5 * facerect.height);
            break;
        case EYAW_45:                       // left facing three-quarter model
            if (is_right_eye)               // inner eye
            {
                rect.x += cvRound(.4 * width);
                rect.width = cvRound(.5 * width);
                rect.height = cvRound(.5 * facerect.height);
            }
            else                            // outer eye
                rect.width = rect.height = 0;
            break;
        case EYAW45:                        // right facing three-quarter model
            if (is_right_eye)               // outer eye
                rect.width = rect.height = 0;
            else                            // inner eye
            {
                rect.x += cvRound(.1 * width);
                rect.width = cvRound(.5 * width);
                rect.height = cvRound(.5 * facerect.height);
            }
            break;
        default:
            Err("EyeSearchRect: Invalid eyaw %d", eyaw);
            break;
    }
    rect.width  = MAX(0, rect.width);
    rect.height = MAX(0, rect.height);
    return rect;
}

// Get adjustment for position of mouth, based on model type and eye angle.

static void MouthRectShift(
    int&            ixshift,         // out
    int&            iyshift,         // out
    EYAW            eyaw,            // in
    int             facerect_width,  // in
    int             facerect_height, // in
    int             ileft_best,      // in
    int             iright_best,     // in
    const vec_Rect& leyes,           // in
    const vec_Rect& reyes)           // in
{
    double xshift = 0, yshift = 0;
//.........这里部分代码省略.........
开发者ID:Keerecles,项目名称:libra,代码行数:101,代码来源:eyedet.cpp


示例2: classifierDetect

void FaceDetector::classifierDetect(cv::Mat image, std::vector<cv::Rect>& detections, cv::CascadeClassifier classifier,int flag, cv::Size size)
{
	classifier.detectMultiScale(image, detections, 1.1, 2,  flag, size);//, cv::Size(100,100));

}
开发者ID:Atom-machinerule,项目名称:OpenQbo,代码行数:5,代码来源:face_detector.cpp


示例3: OpenFaceDetector_

namespace stasm
{
typedef vector<DetPar> vec_DetPar;

static cv::CascadeClassifier facedet_g;  // the face detector

static double BORDER_FRAC = 0.1; // fraction of image width or height
                                 // use 0.0 for no border

//-----------------------------------------------------------------------------

void FaceDet::OpenFaceDetector_( // called by stasm_init, init face det from XML file
    const char* datadir,         // in: directory of face detector files
    void*)                       // in: unused (func signature compatibility)
{
    OpenDetector(facedet_g, "haarcascade_frontalface_alt2.xml",  datadir);
}

// If a face is near the edge of the image, the OpenCV detectors tend to
// return a too-small face rectangle.  By adding a border around the edge
// of the image we mitigate this problem.

static Image EnborderImg(    // return the image with a border
    int&         leftborder, // out: border size in pixels
    int&         topborder,  // out: border size in pixels
    const Image& img)        // io
{
    Image bordered_img(img);
    leftborder = cvRound(BORDER_FRAC * bordered_img.cols);
    topborder  = cvRound(BORDER_FRAC * bordered_img.rows);
    copyMakeBorder(bordered_img, bordered_img,
                   topborder, topborder, leftborder, leftborder,
                   cv::BORDER_REPLICATE);
    return bordered_img;
}

void DetectFaces(          // all face rects into detpars
    vec_DetPar&  detpars,  // out
    const Image& img,      // in
    int          minwidth) // in: as percent of img width
{
    int leftborder = 0, topborder = 0; // border size in pixels
    Image bordered_img(BORDER_FRAC == 0?
                       img: EnborderImg(leftborder, topborder, img));

    // Detection results are very slightly better with equalization
    // (tested on the MUCT images, which are not pre-equalized), and
    // it's quick enough to equalize (roughly 10ms on a 1.6 GHz laptop).

    Image equalized_img; cv::equalizeHist(bordered_img, equalized_img);

    CV_Assert(minwidth >= 1 && minwidth <= 100);

    int minpix = MAX(100, cvRound(img.cols * minwidth / 100.));

    // the params below are accurate but slow
    static const double SCALE_FACTOR   = 1.1;
    static const int    MIN_NEIGHBORS  = 3;
    static const int    DETECTOR_FLAGS = 0;

    vec_Rect facerects = // all face rects in image
        Detect(equalized_img, &facedet_g, NULL,
               SCALE_FACTOR, MIN_NEIGHBORS, DETECTOR_FLAGS, minpix);

    // copy face rects into the detpars vector

    detpars.resize(NSIZE(facerects));
    for (int i = 0; i < NSIZE(facerects); i++)
    {
        Rect* facerect = &facerects[i];
        DetPar detpar; // detpar constructor sets all fields INVALID
        // detpar.x and detpar.y is the center of the face rectangle
        detpar.x = facerect->x + facerect->width / 2.;
        detpar.y = facerect->y + facerect->height / 2.;
        detpar.x -= leftborder; // discount the border we added earlier
        detpar.y -= topborder;
        detpar.width  = double(facerect->width);
        detpar.height = double(facerect->height);
        detpar.yaw = 0; // assume face has no yaw in this version of Stasm
        detpar.eyaw = EYAW00;
        detpars[i] = detpar;
    }
}

// order by increasing distance from left marg, and dist from top marg within that

static bool IncreasingLeftMargin( // compare predicate for std::sort
    const DetPar& detpar1,        // in
    const DetPar& detpar2)        // in
{
    return 1e5 * detpar2.x + detpar2.y >
           1e5 * detpar1.x + detpar1.y;
}

// order by decreasing width, and dist from the left margin within that

static bool DecreasingWidth( // compare predicate for std::sort
    const DetPar& detpar1,   // in
    const DetPar& detpar2)   // in
{
//.........这里部分代码省略.........
开发者ID:Keerecles,项目名称:libra,代码行数:101,代码来源:facedet.cpp


示例4: scanWebcam

void scanWebcam()
{
	//Check that the cascade file was loaded
	if(!face_cascade1.load( face_cascade_name1 ))
	{
		cout << "Error while loading cascade files" << endl;                            
	} 

	CvCapture* capture;
	cv::Mat frame;

	//Connect to the video stream
	capture = cvCaptureFromFile(PATH_TO_CAM);

	//If the connection was successful 
	if(capture)
	{
		//Create a FaceRecognizer object that uses the Fisherfaces algorithm (also works with the eigenfaces and LBPH algorithms)
		cv::Ptr<cv::FaceRecognizer> fisherfaces = cv::createFisherFaceRecognizer();

		//Load the database that was previously created during the training phase
		cv::FileStorage fs_fisher(PATH_TO_XML_FISHERFACES, cv::FileStorage::READ);
		fisherfaces->load(fs_fisher);

		//Infinite loop to detect the faces continuously
		while(true)
		{
			//Get one picture from the videostream (The facial recognition is done on images from the video and not directly from the videostream)
			frame = cvQueryFrame( capture );
			cv::namedWindow("test");

			//Check that one image was successfully extracted from the video
			if(!frame.empty())
			{
				//Variables used for the id process
				int predictedLabel = -1;
				double predictedConfidence = 0.0;

				std::vector<cv::Rect> faces; //Contains the rectangle coordinates in which the face will be included
				cv::Mat frame_gray; //Grey image
				cvtColor( frame, frame_gray, CV_RGB2GRAY ); //Converts the image from RGB to shades of grey
				equalizeHist( frame_gray, frame_gray ); //Histogram equalization
				
				//We perform a face detection
				face_cascade1.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, cv::Size(30, 30) );
				

				//If at least one face was detected then we can perform an identification
				for(int i=0; i<faces.size();i++)
				{
					//Get only (crop) the face (shades of grey)
					cv::Mat croppedFace = frame_gray(cv::Rect(faces[i].x, faces[i].y, faces[i].width, faces[i].height));
					//Resize the image
					cv::resize(croppedFace, croppedFace, sizeOfImage);
					
					//Start the identification
					fisherfaces->predict(croppedFace, predictedLabel, predictedConfidence);
					
					//Print the result in the console
					cout << "##### ID " << predictedLabel << "    confidence : " << predictedConfidence;

					int id=predictedLabel;
					const int THRESHOLD = 1000; //Threshold for the facial recognition. Used to make sure that the face was properly recognized.

					string printedName;

					cv::Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );

					//Print the ID result on the video (it's really bad to do it this way !! A funtion should be created !)
					if(id==1 && predictedConfidence>THRESHOLD)
					{
						printedName="Adrien";
						//Print the circle around the face
						ellipse( frame, center, cv::Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, cv::Scalar(0,255,0), 4, 8, 0);
						//Print the person's name
						cv::putText(frame,printedName, center, cv::FONT_HERSHEY_SIMPLEX, 1.0f, cv::Scalar(0,255,0), 2, 8, false );
					}
					else if(id==2 && predictedConfidence>THRESHOLD)
					{
						printedName="Ofir";
						ellipse( frame, center, cv::Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, cv::Scalar(0,255,0), 4, 8, 0);
						cv::putText(frame,printedName, center, cv::FONT_HERSHEY_SIMPLEX, 1.0f, cv::Scalar(0,255,0), 2, 8, false );
					}
					else if(id==3 && predictedConfidence>THRESHOLD)
					{
						printedName="Jeremie";
						ellipse( frame, center, cv::Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, cv::Scalar(0,255,0), 4, 8, 0);
						cv::putText(frame,printedName, center, cv::FONT_HERSHEY_SIMPLEX, 1.0f, cv::Scalar(0,255,0), 2, 8, false );
					}
					else
					{
						printedName="UNKNOWN";
						ellipse( frame, center, cv::Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, cv::Scalar(0,0,255), 4, 8, 0);
						cv::putText(frame,printedName, center, cv::FONT_HERSHEY_SIMPLEX, 1.0f, cv::Scalar(0,0,255), 2, 8, false );
					}
					
				}
				cout << endl;

				//Print each images to recreate a video
//.........这里部分代码省略.........
开发者ID:AdriGe,项目名称:FacialRecognition,代码行数:101,代码来源:main.cpp


示例5: main

/*
 * @function main
 */
int main( int argc, const char** argv ) {

  // Get the mode
  if (argc > 1)
  {
    const char *inputMode = argv[1];
    if (strcmp(inputMode, "normal") == 0) {
      mode = NORMAL;
    } else if (strcmp(inputMode, "debug") == 0) {
      mode = DEBUG;
    } else if (strcmp(inputMode, "plot") == 0) {
      mode = PLOT;
    } else {
      mode = NORMAL;
    }
  }
  else
  {
    mode = NORMAL;
  }

  if (mode == NORMAL) {
    eventHandler = EventHandler();
  }

  if (mode == DEBUG || mode == NORMAL) {
    printf("Input Mode: %s\n", mode == NORMAL ? "normal" :
        mode == DEBUG ? "debug" :
        mode == PLOT ? "plot" : "none");

    cv::namedWindow(main_window_name,CV_WINDOW_NORMAL);
    cv::moveWindow(main_window_name, 400, 100);
    cv::namedWindow(face_window_name,CV_WINDOW_NORMAL);
    cv::moveWindow(face_window_name, 10, 100);
    cv::namedWindow("Right Eye",CV_WINDOW_NORMAL);
    cv::moveWindow("Right Eye", 10, 600);
    cv::namedWindow("Left Eye",CV_WINDOW_NORMAL);
    cv::moveWindow("Left Eye", 10, 800);
  } else if (mode == PLOT) {
    cv::namedWindow(face_window_name,CV_WINDOW_NORMAL);
    cv::moveWindow(face_window_name, 400, 100);
  }

  cv::Mat frame;

  // Load the cascades
  if( !face_cascade.load( FACE_CASCADE_FILE ) ){ printf("--(!)Error loading face cascade, please change face_cascade_name in source code.\n"); return -1; };


  // Read the video stream
  cv::VideoCapture capture( 0 );
  if( capture.isOpened() ) {
    capture.set(CV_CAP_PROP_FRAME_WIDTH, 640);
    capture.set(CV_CAP_PROP_FRAME_HEIGHT, 480);
    capture.set(CV_CAP_PROP_FPS, 15);
    capture >> frame;
    while( true ) {
      capture >> frame;

      // mirror it
      cv::flip(frame, frame, 1);
      frame.copyTo(debugImage);

      // Apply the classifier to the frame
      if( !frame.empty() ) {
        detectAndDisplay( frame );
      }
      else {
        printf(" --(!) No captured frame -- Break!");
        break;
      }
      if (mode == DEBUG || mode == NORMAL) {
        imshow(main_window_name, debugImage);
      }

      if (mode == DEBUG || mode == PLOT || mode == NORMAL) {
        int c = cv::waitKey(10);
        if( (char)c == 'c' ) { break; }
        if( (char)c == 'f' ) {
          imwrite("frame.png", frame);
        }
      }
    }
  }
开发者ID:lingz,项目名称:eye-see-you,代码行数:87,代码来源:main.cpp


示例6: main

// Main function, defines the entry point for the program.
int main( int argc, char** argv )
{
    cv::VideoCapture capture;
    cv::Mat frame;
    cascade.load(cascade_name);
    
    // This works on a D-Link CDS-932L
    const std::string videoStreamAddress = "http://192.168.1.253/nphMotionJpeg?Resolution=320x240&amp;Quality=Standard&.mjpg";

    //open the video stream and make sure it's opened
    if(!capture.open(videoStreamAddress)) {
        std::cout << "Error opening video stream or file" << std::endl;
        return -1;
    }

    // Create a new named window with title: result
    cvNamedWindow( "Result", 1 );

    // Find if the capture is loaded successfully or not.

    // If loaded succesfully, then:
    // Capture from the camera.
    for(;;)
    {   
        cv::Mat gray;
        for(int i=0;i<10;i++)
            capture.grab();
        capture >> frame;
        cv::cvtColor(frame, gray, CV_BGR2GRAY);

        std::vector<cv::Rect> results;
        cascade.detectMultiScale(gray, results, 1.1, 3, 0);
        int dx=0;
        int dy=0;
        for(std::vector<cv::Rect>::iterator it=results.begin();
        it!=results.end();
        it++)
        {
          cv::Rect r = *it;
          std::cout << "_" << r.x << "_\t_" << r.y << "_\t" << std::endl;
          dx = r.x + r.width/2;
          dy = r.y + r.height/2;
        }
        std::stringstream ss;
        ss << "wget -q -O /dev/null \"http://192.168.1.253/nphControlCamera?Width=";
        ss << gray.cols;
        ss << "&Height=";
        ss << gray.rows;
        ss << "&Direction=Direct&NewPosition.x=" << dx;
        ss << "&NewPosition.y=" << dy<<"\"";
        if((dx!=0)||(dy!=0))
        {
            std::cout << ss.str() << std::endl;
            system(ss.str().c_str());
                
        }
        imshow("Result",gray);
        // Wait for a while before proceeding to the next frame
        if( cvWaitKey( 10 ) >= 0 )
            break;
        
    }

    return 0;
}
开发者ID:yquemener,项目名称:THS-Geist,代码行数:66,代码来源:splay.cpp


示例7: do_work

  void do_work(const sensor_msgs::ImageConstPtr& msg, const std::string input_frame_from_msg)
  {
    // Work on the image.
    try
    {
      // Convert the image into something opencv can handle.
      cv::Mat frame = cv_bridge::toCvShare(msg, msg->encoding)->image;

      // Messages
      opencv_apps::FaceArrayStamped faces_msg;
      faces_msg.header = msg->header;

      // Do the work
      std::vector<cv::Rect> faces;
      cv::Mat frame_gray;

      cv::cvtColor( frame, frame_gray, cv::COLOR_BGR2GRAY );
      cv::equalizeHist( frame_gray, frame_gray );
      //-- Detect faces
#ifndef CV_VERSION_EPOCH
      face_cascade_.detectMultiScale( frame_gray, faces, 1.1, 2, 0, cv::Size(30, 30) );
#else
      face_cascade_.detectMultiScale( frame_gray, faces, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, cv::Size(30, 30) );
#endif

      for( size_t i = 0; i < faces.size(); i++ )
      {
        cv::Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
        cv::ellipse( frame,  center, cv::Size( faces[i].width/2, faces[i].height/2), 0, 0, 360, cv::Scalar( 255, 0, 255 ), 2, 8, 0 );
        opencv_apps::Face face_msg;
        face_msg.face.x = center.x;
        face_msg.face.y = center.y;
        face_msg.face.width = faces[i].width;
        face_msg.face.height = faces[i].height;

        cv::Mat faceROI = frame_gray( faces[i] );
        std::vector<cv::Rect> eyes;

        //-- In each face, detect eyes
#ifndef CV_VERSION_EPOCH
        eyes_cascade_.detectMultiScale( faceROI, eyes, 1.1, 2, 0, cv::Size(30, 30) );
#else
        eyes_cascade_.detectMultiScale( faceROI, eyes, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, cv::Size(30, 30) );
#endif

        for( size_t j = 0; j < eyes.size(); j++ )
        {
          cv::Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
          int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
          cv::circle( frame, eye_center, radius, cv::Scalar( 255, 0, 0 ), 3, 8, 0 );

          opencv_apps::Rect eye_msg;
          eye_msg.x = eye_center.x;
          eye_msg.y = eye_center.y;
          eye_msg.width = eyes[j].width;
          eye_msg.height = eyes[j].height;
          face_msg.eyes.push_back(eye_msg);
        }

        faces_msg.faces.push_back(face_msg);
      }
      //-- Show what you got
      if( debug_view_) {
        cv::imshow( "Face detection", frame );
        int c = cv::waitKey(1);
      }

      // Publish the image.
      sensor_msgs::Image::Ptr out_img = cv_bridge::CvImage(msg->header, msg->encoding,frame).toImageMsg();
      img_pub_.publish(out_img);
      msg_pub_.publish(faces_msg);
    }
    catch (cv::Exception &e)
    {
      NODELET_ERROR("Image processing error: %s %s %s %i", e.err.c_str(), e.func.c_str(), e.file.c_str(), e.line);
    }

    prev_stamp_ = msg->header.stamp;
  }
开发者ID:srmanikandasriram,项目名称:vision_opencv,代码行数:79,代码来源:face_detection_nodelet.cpp


示例8: main

/**
 * @function main
 */
int main( int argc, const char** argv ) {
  CvCapture* capture;
  cv::Mat frame;

  // Load the cascades
  if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };

  cv::namedWindow(main_window_name,CV_WINDOW_NORMAL);
  cv::moveWindow(main_window_name, 400, 100);
  cv::namedWindow(face_window_name,CV_WINDOW_NORMAL);
  cv::moveWindow(face_window_name, 10, 100);
  cv::namedWindow("Right Eye",CV_WINDOW_NORMAL);
  cv::moveWindow("Right Eye", 10, 600);
  cv::namedWindow("Left Eye",CV_WINDOW_NORMAL);
  cv::moveWindow("Left Eye", 100, 600);

  createCornerKernels();
  ellipse(skinCrCbHist, cv::Point(113, 155.6), cv::Size(23.4, 15.2),
          43.0, 0.0, 360.0, cv::Scalar(255, 255, 255), -1);

    
    
    cout << "press c to quit program" << endl;
    // Use pre-recorded video
    if (argc > 1) {
        const char* path = argv[1];
        
        capture = cvCreateFileCapture(path);

        if( capture ) {
            while( true ) {
                frame = cvQueryFrame( capture );

                frame.copyTo(debugImage);
                
                // Apply the classifier to the frame
                if( !frame.empty() ) {
                    detectAndDisplay( frame );
                }
                else {
                    printf(" --(!) No captured frame -- Break!");
                    break;
                }
                
                imshow(main_window_name,frame);
                
                int c = cv::waitKey(1);
                if( (char)c == 'c' ) { break; }
                if( (char)c == 'f' ) {
                    imwrite("frame.png",frame);
                }
                
            }
        }
        
        releaseCornerKernels();
        
    }
    else {
    
       // Read the video stream
      capture = cvCaptureFromCAM( -1 );
        
      if( capture ) {
        while( true ) {
          frame = cvQueryFrame( capture );
          // mirror it
          cv::flip(frame, frame, 1);
          frame.copyTo(debugImage);

          // Apply the classifier to the frame
          if( !frame.empty() ) {
            detectAndDisplay( frame );
          }
          else {
            printf(" --(!) No captured frame -- Break!");
            break;
          }

          imshow(main_window_name,debugImage);

          int c = cv::waitKey(1);
          if( (char)c == 'c' ) { break; }
          if( (char)c == 'f' ) {
            imwrite("frame.png",frame);
          }

        }
      }

      releaseCornerKernels();
    }

  return 0;
}
开发者ID:hkuhn,项目名称:EECS481-Pupil-Tracking,代码行数:98,代码来源:main.cpp


示例9: if

    std::vector<cv::Rect> detect()
    {
        std::vector<cv::Rect> faces;
        if (cascade.empty()) return faces;
        double scale = this->scale == 0? 1.0 : this->scale;
        cv::Mat image_roi = image;
        cv::Mat gray, small;
        int min = cvRound(smallest * 1000. * scale);
        
        // use a region of interest to improve performance
        // This idea comes from the More than Technical blog:
        // http://www.morethantechnical.com/2009/08/09/near-realtime-face-detection-on-the-iphone-w-opencv-port-wcodevideo/
        if ( roi.width > 0 && roi.height > 0)
        {
            image_roi = image(roi);
        }

        // use an equalized grayscale to improve detection
        cv::cvtColor(image_roi, gray, CV_BGR2GRAY);

        // use a smaller image to improve performance
        cv::resize(gray, small, cv::Size(cvRound(gray.cols * scale), cvRound(gray.rows * scale)));
        cv::equalizeHist(small, small);
        
        // detect with OpenCV
        cascade.detectMultiScale(small, faces, 1.1, 2, 0, cv::Size(min, min));
        
#ifdef USE_ROI
        if (faces.size() == 0)
        {
            // clear the region of interest
            roi.width = roi.height = 0;
            roi.x = roi.y = 0;
        }
        else if (faces.size() > 0)
        {
            // determine the region of interest from the detected objects
            int minx = width * scale;
            int miny = height * scale;
            int maxx, maxy = 0;
            for (size_t i = 0; i < faces.size(); i++)
            {
                faces[i].x+= roi.x * scale;
                faces[i].y+= roi.y * scale;
                minx = MIN(faces[i].x, minx);
                miny = MIN(faces[i].y, miny);
                maxx = MAX(faces[i].x + faces[i].width, maxx);
                maxy= MAX(faces[i].y + faces[i].height, maxy);
            }
            minx= MAX(minx - PAD, 0);
            miny= MAX(miny - PAD, 0);
            maxx = MIN(maxx + PAD, width * scale);
            maxy = MIN(maxy + PAD, height * scale);

            // store the region of interest
            roi.x = minx / scale;
            roi.y = miny / scale;
            roi.width = (maxx - minx) / scale;
            roi.height = (maxy - miny) / scale; 
        }
#endif
        return faces;
    }
开发者ID:ddennedy,项目名称:frei0r,代码行数:63,代码来源:facedetect.cpp


示例10: srcGray

/*
 * Class:     io_github_melvincabatuan_pedestriandetection_MainActivity
 * Method:    predict
 * Signature: (Landroid/graphics/Bitmap;[B)V
 */
JNIEXPORT void JNICALL Java_io_github_melvincabatuan_pedestriandetection_MainActivity_predict
  (JNIEnv * pEnv, jobject clazz, jobject pTarget, jbyteArray pSource){

   AndroidBitmapInfo bitmapInfo;
   uint32_t* bitmapContent; // Links to Bitmap content

   if(AndroidBitmap_getInfo(pEnv, pTarget, &bitmapInfo) < 0) abort();
   if(bitmapInfo.format != ANDROID_BITMAP_FORMAT_RGBA_8888) abort();
   if(AndroidBitmap_lockPixels(pEnv, pTarget, (void**)&bitmapContent) < 0) abort();

   /// Access source array data... OK
   jbyte* source = (jbyte*)pEnv->GetPrimitiveArrayCritical(pSource, 0);
   if (source == NULL) abort();

   /// cv::Mat for YUV420sp source and output BGRA 
    Mat srcGray(bitmapInfo.height, bitmapInfo.width, CV_8UC1, (unsigned char *)source);
    Mat mbgra(bitmapInfo.height, bitmapInfo.width, CV_8UC4, (unsigned char *)bitmapContent);

/***********************************************************************************************/
    /// Native Image Processing HERE... 
    if(DEBUG){
      LOGI("Starting native image processing...");
    }

    if (full_body_cascade.empty()){
       t = (double)getTickCount();
       sprintf( full_body_cascade_path, "%s/%s", getenv("ASSETDIR"), "visionary.net_pedestrian_cascade_web_LBP.xml");       
    
      /* Load the face cascades */
       if( !full_body_cascade.load(full_body_cascade_path) ){ 
           LOGE("Error loading cat face cascade"); 
           abort(); 
       };

       t = 1000*((double)getTickCount() - t)/getTickFrequency();
       if(DEBUG){
       LOGI("Loading full body cascade took %lf milliseconds.", t);
     }
    }
            
 
     std::vector<Rect> fbody;


       //-- Detect full body
       t = (double)getTickCount();
 
       /// Detection took cat_face_cascade.detectMultiScale() time = 655.334471 ms
      // cat_face_cascade.detectMultiScale( srcGray, faces, 1.1, 2 , 0 , Size(30, 30) ); // Scaling factor = 1.1;  minNeighbors = 2 ; flags = 0; minimumSize = 30,30

      // cat_face_cascade.detectMultiScale() time = 120.117185 ms
      // cat_face_cascade.detectMultiScale( srcGray, faces, 1.2, 3 , 0 , Size(64, 64));

 
      
      full_body_cascade.detectMultiScale( srcGray, fbody, 1.2, 2 , 0 , Size(14, 28));  // Size(double width, double height) 

      // scalingFactor parameters determine how much the classifier will be scaled up after each run.
      // minNeighbors parameter specifies how many positive neighbors a positive face rectangle should have to be considered a possible match; 
      // when a potential face rectangle is moved a pixel and does not trigger the classifier any more, it is most likely that it’s a false positive. 
      // Face rectangles with fewer positive neighbors than minNeighbors are rejected. 
      // If minNeighbors is set to zero, all potential face rectangles are returned. 
      // The flags parameter is from the OpenCV 1.x API and should always be 0. 
      // minimumSize specifies the smallest face rectangle we’re looking for. 

       t = 1000*((double)getTickCount() - t)/getTickFrequency();
       if(DEBUG){
          LOGI("full_body_cascade.detectMultiScale() time = %lf milliseconds.", t);
      }


       // Iterate through all faces and detect eyes
       t = (double)getTickCount();

       for( size_t i = 0; i < fbody.size(); i++ )
       {
          Point center(fbody[i].x + fbody[i].width / 2, fbody[i].y + fbody[i].height / 2);
          ellipse(srcGray, center, Size(fbody[i].width / 2, fbody[i].height / 2), 0, 0, 360, Scalar(255, 0, 255), 4, 8, 0);
       }//endfor
  
       t = 1000*((double)getTickCount() - t)/getTickFrequency();
       if(DEBUG){
          LOGI("Iterate through all faces and detecting eyes took %lf milliseconds.", t);
       }

       /// Display to Android
       cvtColor(srcGray, mbgra, CV_GRAY2BGRA);


      if(DEBUG){
        LOGI("Successfully finished native image processing...");
      }
   
/************************************************************************************************/ 
   
//.........这里部分代码省略.........
开发者ID:DeLaSalleUniversity-Manila,项目名称:pedestriandetection-melvincabatuan,代码行数:101,代码来源:ImageProcessing.cpp


示例11: detectAndDisplay

void detectAndDisplay(cv::Mat frame) {
  std::vector<cv::Rect> faces;
  cv::Mat frame_gray;

  cv::cvtColor(frame, frame_gray, cv::COLOR_BGR2GRAY);
  cv::equalizeHist(frame_gray, frame_gray);
  
  // Detect Faces
  face_cascade.detectMultiScale(frame_gray, // image
				faces, // objects
				1.1, // scale factor
				2, // min neighbors
				0|cv::CASCADE_SCALE_IMAGE, // flags
				cv::Size(30, 30)); // min size

  for (std::size_t i = 0; i < faces.size(); i++) {
    cv::Point center(faces[i].x + faces[i].width/2,
		     faces[i].y + faces[i].height/2);

    cv::ellipse(frame,
		center,
		cv::Size(faces[i].width/2, faces[i].height/2),
		0,
		0,
		360,
		cv::Scalar(255, 0, 255),
		4,
		8,
		0);

    cv::Mat faceROI = frame_gray(faces[i]);
    std::vector<cv::Rect> eyes;

    // in each face, detect eyes
    eyes_cascade.detectMultiScale(faceROI,
				  eyes,
				  1.1,
				  2,
				  0 | cv::CASCADE_SCALE_IMAGE,
				  cv::Size(30, 30));

    for (std::size_t j = 0; j < eyes.size(); j++) {
      cv::Point eye_center(faces[i].x + eyes[j].x + eyes[j].width/2,
			   faces[i].y + eyes[j].y + eyes[j].height/2);

      int radius = cvRound((eyes[j].width + eyes[j].height) * 0.25);
      cv::circle(frame, 
		 eye_center,
		 radius, 
		 cv::Scalar(255, 0, 0),
		 4,
		 8,
		 0);
    }

  }

  // Show what you got
  cv::imshow(window_name, frame);

}
开发者ID:trevorsenior,项目名称:playground,代码行数:61,代码来源:main.cpp


示例12: main

/**
 * @function main
 */
int main( int argc, const char** argv ) {

	srand(time(NULL)); int ra;

/*	char circ_window[] = "Moving dot";

	Mat circ_image = Mat::zeros( 400, 400, CV_8UC3 );
	MyFilledCircle( circ_image, Point( 100, 100) );
	imshow( circ_window, circ_image );
	cv::setWindowProperty( circ_window, CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
	moveWindow( circ_window, 900, 200 );*/
	//sleep(8);

	CvCapture* capture;
	cv::Mat frame;
	
	// Load the cascades
	if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading face cascade, please change face_cascade_name in source code.\n"); return -1; };

	cv::namedWindow(main_window_name,CV_WINDOW_NORMAL);
	cv::moveWindow(main_window_name, 400, 100);
	cv::namedWindow(face_window_name,CV_WINDOW_NORMAL);
	cv::moveWindow(face_window_name, 10, 100);
	cv::namedWindow("Right Eye",CV_WINDOW_NORMAL);
	cv::moveWindow("Right Eye", 10, 600);
	cv::namedWindow("Left Eye",CV_WINDOW_NORMAL);
	cv::moveWindow("Left Eye", 10, 800);
	cv::namedWindow("aa",CV_WINDOW_NORMAL);
	cv::moveWindow("aa", 10, 800);
	cv::namedWindow("aaa",CV_WINDOW_NORMAL);
	cv::moveWindow("aaa", 10, 800);

	createCornerKernels();
	ellipse(skinCrCbHist, cv::Point(113, 155.6), cv::Size(23.4, 15.2),
			43.0, 0.0, 360.0, cv::Scalar(255, 255, 255), -1);

	// Read the video stream
	capture = cvCaptureFromCAM( -1 );
	if( capture ) {
		while( true ) {
	
	char circ_window[] = "Moving dot";

	Mat circ_image = Mat::zeros( 414, 414, CV_8UC3 );
	//ra = rand()%4;
	//if (ra==1) rx+=1; else if(ra==2) rx-=1; else if(ra==3) ry+=1; else ry-=1; rx+=1; if(rx==500) rx=0;

	if(stage1 && !stage2)
		if(rx>=6 && rx <=400 && ry==6)
		{
			rx+= 10;
			tl+= lpy;
			tr+= rpy;
			countert ++;
		}
		else if(rx>=400 && ry<400)
		{
			ry+=10;
			rl+= lpx;
			rr+= rpx;
			counterl ++;
		}
		else if(ry>=400 && rx > 6)
		{
			rx-=10;
			bl+= lpy;
			br+= rpy;
		}
		else if(rx<=6 && ry>20)
		{
			ry-=10;
			ll+= lpx;
			lr+= rpx;
		}
		else if(rx <= 6 && ry <= 20 && ry > 6)
		{
			stage1 = 0;
			stage2 = 1;
		}
	if(!stage1 && stage2)
	{
		tal = tl / countert;
		tar = tr / countert;
		bar = br / countert;
		bal = bl / countert;
		lal = ll / (counterl-1);
		lar = lr / (counterl-1);
		ral = rl / counterl;
		rar = rr / counterl;
		std::cout<<tal<<" : "<<tar<<" : "<<lal<<" : "<<lar<<std::endl;
		std::cout<<ral<<" : "<<rar<<" : "<<bal<<" : "<<bar<<std::endl;
		stage2 = 0;
		rx=200;ry=200;
	}

	if(!stage1 && !stage2)
	{
//.........这里部分代码省略.........
开发者ID:4sskick,项目名称:Eye-Tracking_robust,代码行数:101,代码来源:main.cpp


示例13: main

int main(int argc, char** argv)
{
    // Load the cascade classifiers
    // Make sure you point the XML files to the right path, or
    // just copy the files from [OPENCV_DIR]/data/haarcascades directory
    face_cascade.load("haarcascade_frontalface_alt2.xml");
    eye_cascade.load("haarcascade_eye.xml");

    std::cout << "==================\n";

    // Open webcam
    cv::VideoCapture cap(0);
    // Check if everything is ok
    if (face_cascade.empty() || eye_cascade.empty() || !cap.isOpened())
    {
        std::cout << "bad\n";
        return 1;
    }

    cap.set(CV_CAP_PROP_FRAME_WIDTH, WIDTH);
    cap.set(CV_CAP_PROP_FRAME_HEIGHT, HEIGHT);
    cv::Mat frame, eye_tpl;
    cv::Rect eye_bb;
    while (cv::waitKey(15) != 'q' && cv::waitKey(15) != 'Q')
    {
        cap >> frame;
        if (frame.empty()) break;
        // Flip the frame horizontally, Windows users might need this
        cv::flip(frame, frame, 1);
        // Convert to greyscale and
        // adjust the image contrast using histogram equalization
        cv::Mat gray;
        cv::cvtColor(frame, gray, CV_BGR2GRAY);
        if (eye_bb.width == 0 && eye_bb.height == 0)
        {
            // Detection stage
            // Try to detect the face and the eye of the user
            detectEye(gray, eye_tpl, eye_bb);
        }
        else
        {
            // Tracking stage with template matching
            trackEye(gray, eye_tpl, eye_bb);
            // Draw bounding rectangle for the eye
            cv::rectangle(frame, eye_bb, CV_RGB(0,255,0));
        }

        {//drawing grids

            struct Line { cv::Point from, to; };
            using Lines = std::vector<Line>;
            Lines lines{
                    { { 213, 0 }, { 213, 480 } },
                    { { 427, 0 }, { 427, 480 } },
                    { { 0, 160 }, { 640, 160 } },
                    { { 0, 320 }, { 640, 320 } }
            };
            for (auto const& l : lines)
                cv::line(frame,l.from, l.to, CV_RGB(0,255,0), 1, 1);
        }

        {//generate direction command
            std::vector<cv::Rect> direction_boxes{
                    cv::Rect{cv::Point{213,   0}, cv::Point{427, 160}}, //F
                    cv::Rect{cv::Point{  0, 160}, cv::Point{213, 320}}, //L
                    cv::Rect{cv::Point{427, 160}, cv::Point{640, 320}}  //R
            };

            auto draw_direction = [&](std::string const &direction) {
                cv::putText(frame, direction, cv::Point{280, 435}, cv::FONT_HERSHEY_DUPLEX, 3, CV_RGB(70, 130, 180),  5);
                cv::putText(frame, direction, cv::Point{280, 435}, cv::FONT_HERSHEY_DUPLEX, 3, CV_RGB(102, 105, 170), 4);
            };

            for(int box = 0; box != 3; ++box)
            {
                if (box == 0)
                    if (direction_boxes[0].contains(center_of_rect(eye_bb)))
                    {
                        draw_direction("F");
                        break;
                    }
                if (box == 1)
                    if (direction_boxes[1].contains(center_of_rect(eye_bb)))
                    {
                        draw_direction("L");
                        break;
                    }
                if (box == 2)
                    if (direction_boxes[2].contains(center_of_rect(eye_bb)))
                    {
                        draw_direction("R");
                        break;
                    }
            }
            std::cout << center_of_rect(eye_bb).x << std::endl;
        }

        cv::imshow("video", frame);
    }
    return 0;
//.........这里部分代码省略.........
开发者ID:sarmadm,项目名称:Eye-Tracking-1,代码行数:101,代码来源:main.cpp



注:本文中的cv::CascadeClassifier类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ cv::FileNode类代码示例发布时间:2022-05-31
下一篇:
C++ cuserinfo::pointer类代码示例发布时间:2022-05-31
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap