• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ xn::ImageGenerator类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中xn::ImageGenerator的典型用法代码示例。如果您正苦于以下问题:C++ ImageGenerator类的具体用法?C++ ImageGenerator怎么用?C++ ImageGenerator使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了ImageGenerator类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: setImageGeneratorProperty

bool CvCapture_OpenNI::setImageGeneratorProperty( int propIdx, double propValue )
{
    bool isSet = false;
    if( !imageGenerator.IsValid() )
        return isSet;

    switch( propIdx )
    {
    case CV_CAP_PROP_OPENNI_OUTPUT_MODE :
    {
        XnMapOutputMode mode;

        switch( cvRound(propValue) )
        {
        case CV_CAP_OPENNI_VGA_30HZ :
            mode.nXRes = XN_VGA_X_RES;
            mode.nYRes = XN_VGA_Y_RES;
            mode.nFPS = 30;
            break;
        case CV_CAP_OPENNI_SXGA_15HZ :
            mode.nXRes = XN_SXGA_X_RES;
            mode.nYRes = XN_SXGA_Y_RES;
            mode.nFPS = 15;
            break;
        case CV_CAP_OPENNI_SXGA_30HZ :
            mode.nXRes = XN_SXGA_X_RES;
            mode.nYRes = XN_SXGA_Y_RES;
            mode.nFPS = 30;
            break;
        case CV_CAP_OPENNI_QVGA_30HZ :
             mode.nXRes = XN_QVGA_X_RES;
             mode.nYRes = XN_QVGA_Y_RES;
             mode.nFPS = 30;
             break;
        case CV_CAP_OPENNI_QVGA_60HZ :
             mode.nXRes = XN_QVGA_X_RES;
             mode.nYRes = XN_QVGA_Y_RES;
             mode.nFPS = 60;
             break;
        default :
            CV_Error( CV_StsBadArg, "Unsupported image generator output mode.\n");
        }

        XnStatus status = imageGenerator.SetMapOutputMode( mode );
        if( status != XN_STATUS_OK )
            std::cerr << "CvCapture_OpenNI::setImageGeneratorProperty : " << xnGetStatusString(status) << std::endl;
        else
            isSet = true;
        break;
    }
    default:
    {
        std::stringstream ss;
        ss << "Image generator does not support such parameter (propIdx=" << propIdx << ") for setting.\n";
        CV_Error( CV_StsBadArg, ss.str().c_str() );
    }
    }

    return isSet;
}
开发者ID:AndreSteenveld,项目名称:opencv,代码行数:60,代码来源:cap_openni.cpp


示例2: ConfigureGenerators

XnStatus ConfigureGenerators(const RecConfiguration& config, xn::Context& context, xn::DepthGenerator& depthGenerator, xn::ImageGenerator& imageGenerator)
{
	XnStatus nRetVal = XN_STATUS_OK;
	xn::EnumerationErrors errors;

	// Configure the depth, if needed
	if (config.bRecordDepth)
	{
		nRetVal = context.CreateAnyProductionTree(XN_NODE_TYPE_DEPTH, NULL, depthGenerator, &errors);
		CHECK_RC_ERR(nRetVal, "Create Depth", errors);
		nRetVal = depthGenerator.SetMapOutputMode(*config.pDepthMode);
		CHECK_RC(nRetVal, "Set Mode");
		if (config.bMirrorIndicated && depthGenerator.IsCapabilitySupported(XN_CAPABILITY_MIRROR))
		{
			depthGenerator.GetMirrorCap().SetMirror(config.bMirror);
		}

		// Set Hole Filter
		depthGenerator.SetIntProperty("HoleFilter", TRUE);
	}
	// Configure the image, if needed
	if (config.bRecordImage)
	{
		nRetVal = context.CreateAnyProductionTree(XN_NODE_TYPE_IMAGE, NULL, imageGenerator, &errors);
		CHECK_RC_ERR(nRetVal, "Create Image", errors);
		nRetVal = imageGenerator.SetMapOutputMode(*config.pImageMode);
		CHECK_RC(nRetVal, "Set Mode");

		if (config.bMirrorIndicated && imageGenerator.IsCapabilitySupported(XN_CAPABILITY_MIRROR))
		{
			imageGenerator.GetMirrorCap().SetMirror(config.bMirror);
		}
	}

	// Configuration for when there are both streams
	if (config.bRecordDepth && config.bRecordImage)
	{
		// Registration
		if (config.bRegister && depthGenerator.IsCapabilitySupported(XN_CAPABILITY_ALTERNATIVE_VIEW_POINT))
		{
			nRetVal = depthGenerator.GetAlternativeViewPointCap().SetViewPoint(imageGenerator);
			CHECK_RC(nRetVal, "Registration");
		}
		// Frame Sync
		if (config.bFrameSync && depthGenerator.IsCapabilitySupported(XN_CAPABILITY_FRAME_SYNC))
		{
			if (depthGenerator.GetFrameSyncCap().CanFrameSyncWith(imageGenerator))
			{
				nRetVal = depthGenerator.GetFrameSyncCap().FrameSyncWith(imageGenerator);
				CHECK_RC(nRetVal, "Frame sync");
			}
		}
	}

	return XN_STATUS_OK;
}
开发者ID:ABMNYZ,项目名称:OpenNI,代码行数:56,代码来源:main.cpp


示例3: Update

	// Save new data from OpenNI
	void Update(const xn::DepthGenerator& depthGenerator, const xn::ImageGenerator& imageGenerator)
	{
		if (m_bDepth)
		{
			// Save latest depth frame
			xn::DepthMetaData dmd;
			depthGenerator.GetMetaData(dmd);
			m_pFrames[m_nNextWrite].depthFrame.CopyFrom(dmd);
		}
		if (m_bImage)
		{
			// Save latest image frame
			xn::ImageMetaData imd;
			imageGenerator.GetMetaData(imd);
			m_pFrames[m_nNextWrite].imageFrame.CopyFrom(imd);
		}

		// See if buffer is already full
		if (m_nBufferCount < m_nBufferSize)
		{
			m_nBufferCount++;
		}
		// Make sure cylic buffer pointers are good
		m_nNextWrite++;
		if (m_nNextWrite == m_nBufferSize)
		{
			m_nNextWrite = 0;
		}
	}
开发者ID:ABMNYZ,项目名称:OpenNI,代码行数:30,代码来源:main.cpp


示例4: update

// Save new data from OpenNI
void NiRecorder::update(const xn::DepthGenerator &dg, const xn::ImageGenerator &ig)
{
    // Save latest depth frame
    xn::DepthMetaData dmd;
    dg.GetMetaData(dmd);
    frames[next_to_write].depth_frame.CopyFrom(dmd);

    // Save latest image frame
    xn::ImageMetaData imd;
    ig.GetMetaData(imd);
    frames[next_to_write].image_frame.CopyFrom(imd);

    // See if buffer is already full
    if (buffer_count < buffer_size)
    {
        buffer_count++;
    }

    // Make sure cylic buffer pointers are good
    next_to_write++;
    if (next_to_write == buffer_size)
    {
        next_to_write = 0;
    }
}
开发者ID:hudakm,项目名称:animik,代码行数:26,代码来源:NiRecorder.cpp


示例5: glutDisplay

// this function is called each frame
void glutDisplay ()
{
	clock_t t1 = clock();
	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Setup the OpenGL viewpoint
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();

	xn::SceneMetaData sceneMD;
	xn::DepthMetaData depthMD;
        xn::ImageMetaData imageMD;
	g_DepthGenerator.GetMetaData(depthMD);
	g_ImageGenerator.GetMetaData(imageMD);
#ifndef USE_GLES
	glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
#else
	glOrthof(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
#endif

	glDisable(GL_TEXTURE_2D);

	if (!g_bPause)
	{
		// Read next available data
		g_Context.WaitOneUpdateAll(g_UserGenerator);
	}

		// Process the data
	g_DepthGenerator.GetMetaData(depthMD);
	g_ImageGenerator.GetMetaData(imageMD);
	g_UserGenerator.GetUserPixels(0, sceneMD);
	if(Show_Image == FALSE)
		DrawDepthMap(depthMD, sceneMD,COM_tracker,Bounding_Box);
	else
	{
		DrawImageMap(imageMD, depthMD, sceneMD,COM_tracker,Bounding_Box);
	}
#ifndef USE_GLES
	glutSwapBuffers();
#endif
	clock_t t2 = clock();
        std::cout << t2 - t1 << std::endl;
}
开发者ID:ZewiHugo,项目名称:UserTracker_modified,代码行数:46,代码来源:main.cpp


示例6: CleanupExit

void CleanupExit()
{
    g_scriptNode.Release();
    g_DepthGenerator.Release();
    g_UserGenerator.Release();
    g_Player.Release();
    g_Context.Release();
    g_ImageGenerator.Release();
    exit (1);
}
开发者ID:ami-lab,项目名称:AmI-Platform,代码行数:10,代码来源:main.cpp


示例7: InitialKinect

void InitialKinect()
{
	    //1. Initial Context
		mContext.Init();

		// 2. Set Map_mode (建構函數已定義)
		mapMode.nXRes = 640;
		mapMode.nYRes = 480;
		mapMode.nFPS = 30;

		// 3.a Create Depth_Generator
		mDepthGenerator.Create( mContext );
		mDepthGenerator.SetMapOutputMode( mapMode );

		// 3.b Create Image_Generator
		mImageGenerator.Create( mContext );
		mImageGenerator.SetMapOutputMode( mapMode );

		 // 4. Correct view port
		mDepthGenerator.GetAlternativeViewPointCap().SetViewPoint( mImageGenerator );
		std::cout << "正確:KinectSensor Intial Correct!" << std::endl;
}
开发者ID:s880367,项目名称:DepthSpace_HumanTracking,代码行数:22,代码来源:Project2RealWorld.cpp


示例8: glutDisplay

// this function is called each frame
void glutDisplay (void)
{
    xn::SceneMetaData sceneMD;
    xn::DepthMetaData depthMD;
    xn::ImageMetaData imageMD;

    glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

    // Setup the OpenGL viewpoint
    glMatrixMode(GL_PROJECTION);
    glPushMatrix();
    glLoadIdentity();

    g_DepthGenerator.GetMetaData(depthMD);
    g_ImageGenerator.GetMetaData(imageMD);

#ifndef USE_GLES
    glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
#else
    glOrthof(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
#endif

    glDisable(GL_TEXTURE_2D);

    // Read next available data
    g_Context.WaitOneUpdateAll(g_UserGenerator);

    // Process the data
    g_DepthGenerator.GetMetaData(depthMD);
    g_UserGenerator.GetUserPixels(0, sceneMD);
    g_ImageGenerator.GetMetaData(imageMD);

    // Draw the input fetched from the Kinect
    DrawKinectInput(depthMD, sceneMD, imageMD);

#ifndef USE_GLES
    glutSwapBuffers();
#endif
}
开发者ID:ami-lab,项目名称:AmI-Platform,代码行数:40,代码来源:main.cpp


示例9: changeRegistration

void changeRegistration(int nValue)
{
	if (!g_DepthGenerator.IsValid() || !g_DepthGenerator.IsCapabilitySupported(XN_CAPABILITY_ALTERNATIVE_VIEW_POINT))
	{
		return;
	}

	if(!nValue)
	{
		g_DepthGenerator.GetAlternativeViewPointCap().ResetViewPoint();
	}
	else if (g_ImageGenerator.IsValid())
	{
		g_DepthGenerator.GetAlternativeViewPointCap().SetViewPoint(g_ImageGenerator);
	}
}
开发者ID:ZewiHugo,项目名称:UserTracker_modified,代码行数:16,代码来源:main.cpp


示例10: getImageGeneratorProperty

double CvCapture_OpenNI::getImageGeneratorProperty( int propIdx )
{
    double propValue = 0.;
    if( !imageGenerator.IsValid() )
        return propValue;

    XnMapOutputMode mode;
    switch( propIdx )
    {
    case CV_CAP_PROP_OPENNI_GENERATOR_PRESENT :
        CV_DbgAssert( imageGenerator.IsValid() );
        propValue = 1.;
        break;
    case CV_CAP_PROP_FRAME_WIDTH :
        if( imageGenerator.GetMapOutputMode(mode) == XN_STATUS_OK )
            propValue = mode.nXRes;
        break;
    case CV_CAP_PROP_FRAME_HEIGHT :
        if( imageGenerator.GetMapOutputMode(mode) == XN_STATUS_OK )
            propValue = mode.nYRes;
        break;
    case CV_CAP_PROP_FPS :
        if( imageGenerator.GetMapOutputMode(mode) == XN_STATUS_OK )
            propValue = mode.nFPS;
        break;
    case CV_CAP_PROP_POS_MSEC :
        propValue = imageGenerator.GetTimestamp();
        break;
    case CV_CAP_PROP_POS_FRAMES :
        propValue = imageGenerator.GetFrameID();
        break;
    default :
    {
        std::stringstream ss;
        ss << "Image generator does not support such parameter (propIdx=" << propIdx << ") for getting.\n";
        CV_Error( CV_StsBadArg, ss.str().c_str() );
    }
    }

    return propValue;
}
开发者ID:AndreSteenveld,项目名称:opencv,代码行数:41,代码来源:cap_openni.cpp


示例11: main

int main(int argc, char **argv)
{
    XnStatus nRetVal = XN_STATUS_OK;
    xn::EnumerationErrors errors;
    
    if( USE_RECORED_DATA ){
        g_Context.Init();
        g_Context.OpenFileRecording(RECORD_FILE_PATH);
        xn::Player player;
        
        // Player nodeの取得
        nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_PLAYER, player);
        CHECK_RC(nRetVal, "Find player");
        
        LOG_D("PlaybackSpeed: %d", player.GetPlaybackSpeed());
        
        xn:NodeInfoList nodeList;
        player.EnumerateNodes(nodeList);
        for( xn::NodeInfoList::Iterator it = nodeList.Begin();
            it != nodeList.End(); ++it){
            
            if( (*it).GetDescription().Type == XN_NODE_TYPE_IMAGE ){
                nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_ImageGenerator);
                CHECK_RC(nRetVal, "Find image node");
                LOG_D("%s", "ImageGenerator created.");
            }
            else if( (*it).GetDescription().Type == XN_NODE_TYPE_DEPTH ){
                nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator);
                CHECK_RC(nRetVal, "Find depth node");
                LOG_D("%s", "DepthGenerator created.");            
            }
            else{
                LOG_D("%s %s %s", ::xnProductionNodeTypeToString((*it).GetDescription().Type ),
                      (*it).GetInstanceName(),
                      (*it).GetDescription().strName);
            }
        }
    }
    else{
        LOG_I("Reading config from: '%s'", CONFIG_XML_PATH);
        
        nRetVal = g_Context.InitFromXmlFile(CONFIG_XML_PATH, g_scriptNode, &errors);
        if (nRetVal == XN_STATUS_NO_NODE_PRESENT){
            XnChar strError[1024];
            errors.ToString(strError, 1024);
            LOG_E("%s\n", strError);
            return (nRetVal);
        }
        else if (nRetVal != XN_STATUS_OK){
            LOG_E("Open failed: %s", xnGetStatusString(nRetVal));
            return (nRetVal);
        }
        
        nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_DEPTH, g_DepthGenerator);
        CHECK_RC(nRetVal,"No depth");
        
        // ImageGeneratorの作成
        nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_ImageGenerator);
        CHECK_RC(nRetVal, "Find image generator");
        
    }
    // UserGeneratorの取得
    nRetVal = g_Context.FindExistingNode(XN_NODE_TYPE_USER, g_UserGenerator);
    if(nRetVal!=XN_STATUS_OK){
        nRetVal = g_UserGenerator.Create(g_Context); 
        CHECK_RC(nRetVal, "Create user generator");
    }

    
    XnCallbackHandle hUserCallbacks, hCalibrationStart, hCalibrationComplete, hPoseDetected;
    if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_SKELETON)){
        LOG_E("%s", "Supplied user generator doesn't support skeleton");
        return 1;
    }
    nRetVal = g_UserGenerator.RegisterUserCallbacks(User_NewUser, User_LostUser, NULL, hUserCallbacks);
    CHECK_RC(nRetVal, "Register to user callbacks");

    g_SkeletonCap = g_UserGenerator.GetSkeletonCap();
    nRetVal = g_SkeletonCap.RegisterToCalibrationStart(UserCalibration_CalibrationStart, NULL, hCalibrationStart);
    CHECK_RC(nRetVal, "Register to calibration start");

    nRetVal = g_SkeletonCap.RegisterToCalibrationComplete(UserCalibration_CalibrationComplete, NULL, hCalibrationComplete);
    CHECK_RC(nRetVal, "Register to calibration complete");
    
    if (g_SkeletonCap.NeedPoseForCalibration()){
        g_bNeedPose = TRUE;
        if (!g_UserGenerator.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION)){
            LOG_E("%s", "Pose required, but not supported");
            return 1;
        }
        nRetVal = g_UserGenerator.GetPoseDetectionCap().RegisterToPoseDetected(UserPose_PoseDetected, NULL, hPoseDetected);
        CHECK_RC(nRetVal, "Register to Pose Detected");
        g_SkeletonCap.GetCalibrationPose(g_strPose);
    }
    
    g_SkeletonCap.SetSkeletonProfile(XN_SKEL_PROFILE_ALL);
    
    nRetVal = g_Context.StartGeneratingAll();
    CHECK_RC(nRetVal, "StartGenerating");
    
//.........这里部分代码省略.........
开发者ID:sparkgene,项目名称:recorder_test,代码行数:101,代码来源:main.cpp


示例12: captureRGB

cv::Mat xncv::captureRGB(const xn::ImageGenerator& generator)
{
	xn::ImageMetaData meta;
	generator.GetMetaData(meta);
	return cv::Mat(meta.YRes(), meta.XRes(),cv::DataType<cv::Vec3b>::type, (void*)meta.RGB24Data());
}
开发者ID:ViniGodoy,项目名称:xncv,代码行数:6,代码来源:functions.cpp


示例13: glutDisplay

// this function is called each frame
void glutDisplay (void)
{

	glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	// Setup the OpenGL viewpoint
	glMatrixMode(GL_PROJECTION);
	glPushMatrix();
	glLoadIdentity();

	// Check if Registration is done for Depth and RGB Images - Brandyn, Sravanthi
	g_DepthGenerator.GetAlternativeViewPointCap().SetViewPoint(g_ImageGenerator);
//	g_DepthGenerator.GetAlternativeViewPointCap().ResetViewPoint();

	xn::SceneMetaData sceneMD;
	xn::DepthMetaData depthMD;
	xn::ImageMetaData imageMD;
	g_DepthGenerator.GetMetaData(depthMD);
	g_ImageGenerator.GetMetaData(imageMD);
	#ifdef USE_GLUT
	glOrtho(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
	#else
	glOrthof(0, depthMD.XRes(), depthMD.YRes(), 0, -1.0, 1.0);
	#endif

	glDisable(GL_TEXTURE_2D);

	if (!g_bPause)
	{
		// Read next available data
		g_Context.WaitAndUpdateAll();
	}

		// Process the data
		//DRAW
		// Check if Registration is done for Depth and RGB Images - Brandyn, Sravanthi
		g_DepthGenerator.GetAlternativeViewPointCap().SetViewPoint(g_ImageGenerator);
	//	g_DepthGenerator.GetAlternativeViewPointCap().ResetViewPoint();

		g_DepthGenerator.GetMetaData(depthMD);
		g_ImageGenerator.GetMetaData(imageMD);
		g_UserGenerator.GetUserPixels(0, sceneMD);


		DrawDepthMap(depthMD, imageMD, sceneMD, g_nPlayer);

		if (g_nPlayer != 0)
		{
			XnPoint3D com;
			g_UserGenerator.GetCoM(g_nPlayer, com);
			if (com.Z == 0)
			{
				g_nPlayer = 0;
				FindPlayer();
			}
		}

	#ifdef USE_GLUT
	glutSwapBuffers();
	#endif
}
开发者ID:sravanthib,项目名称:openni_annotator,代码行数:62,代码来源:main.cpp


示例14: grabFrame

bool CvCapture_OpenNI::grabFrame()
{
    if( !isOpened() )
        return false;

    bool isGrabbed = false;
    if( !approxSyncGrabber.empty() && approxSyncGrabber->isRun() )
    {
        isGrabbed = approxSyncGrabber->grab( depthMetaData, imageMetaData );
    }
    else
    {
        XnStatus status = context.WaitAndUpdateAll();
        if( status != XN_STATUS_OK )
            return false;

        if( depthGenerator.IsValid() )
            depthGenerator.GetMetaData( depthMetaData );
        if( imageGenerator.IsValid() )
            imageGenerator.GetMetaData( imageMetaData );
        isGrabbed = true;
    }

    return isGrabbed;
}
开发者ID:AndreSteenveld,项目名称:opencv,代码行数:25,代码来源:cap_openni.cpp


示例15: SetupImage

bool SetupImage(xn::Context& g_context)
{
	XnStatus nRetVal = XN_STATUS_OK;
	fprintf(stderr,"Setting up the image generator\n");

	if ((nRetVal = g_image.Create(g_context))!= XN_STATUS_OK)
	{
		printf("Could not create depth generator: %s\n", xnGetStatusString(nRetVal));
		return FALSE;
	}


	if ((nRetVal = g_context.FindExistingNode(XN_NODE_TYPE_IMAGE, g_image)) != XN_STATUS_OK)
	{
		fprintf(stderr,"Could not find image sensor: %s\n", xnGetStatusString(nRetVal));
		return FALSE;
	}

	XnMapOutputMode mapMode;
	mapMode.nXRes = XN_VGA_X_RES;
	mapMode.nYRes = XN_VGA_Y_RES;
	mapMode.nFPS = 30;
	if ((nRetVal = g_image.SetMapOutputMode(mapMode)) != XN_STATUS_OK)
	{
		fprintf(stderr,"Could not set image mode: %s\n", xnGetStatusString(nRetVal));
		return FALSE;
	}
	return TRUE;
}
开发者ID:Topographic0cean,项目名称:src,代码行数:29,代码来源:ImageFlinger.cpp


示例16: catch

	information()
	{
		RC(context.Init(),							"Context Intialized");
		
		XnMapOutputMode mode;
		mode.nXRes = XN_VGA_X_RES;
		mode.nYRes = XN_VGA_Y_RES;
		mode.nFPS  = 30;
		
		RC(image.Create(context),					"Create image buffer");
		RC(image.SetMapOutputMode(mode),			"Set image mode");
		
		RC(depth.Create(context),					"Create depth buffer");
		RC(depth.SetMapOutputMode(mode),			"Set depth mode");
		
		xn::Query q;
		RC(q.AddSupportedCapability(XN_CAPABILITY_SKELETON), "Request skeleton");

		try {
			RC(context.FindExistingNode(XN_NODE_TYPE_USER, user), "User generator");
		} catch (...) {
			RC(user.Create(context),					"Get skeleton!!!");
		}
//		RC(user.Create(context, &q),					"Get skeleton!!!");
//		
//		xn::NodeInfoList il;
//		RC(context.EnumerateProductionTrees(XN_NODE_TYPE_USER, &q, il, NULL),
//													"Enumerate nodes");
//		
//		xn::NodeInfo i = *il.Begin();
//		RC(context.CreateProductionTree(i),			"Create skeleton node");
//		RC(i.GetInstance(user),						"Get skeleton");
		
		user.RegisterUserCallbacks(User_NewUser, NULL, NULL, hUserCallbacks);
		user.GetSkeletonCap().RegisterCalibrationCallbacks(UserCalibration_CalibrationStart, UserCalibration_CalibrationEnd, &user, hCalibrationCallbacks);
		
		if (user.GetSkeletonCap().NeedPoseForCalibration())
		{
			if (!user.IsCapabilitySupported(XN_CAPABILITY_POSE_DETECTION))
			{
				post("Pose required, but not supported\n");
			}
			else
			{
				user.GetPoseDetectionCap().RegisterToPoseCallbacks(UserPose_PoseDetected, NULL, &user, hPoseCallbacks);
				user.GetSkeletonCap().GetCalibrationPose(g_strPose);
				user.GetSkeletonCap().SetSkeletonProfile(XN_SKEL_PROFILE_ALL);
			}
		}
		
		RC(context.StartGeneratingAll(),			"Start generating data");
		
		post("Kinect initialized!\n");
	}
开发者ID:kidaa,项目名称:Synthesis,代码行数:54,代码来源:tml.jit.OpenNI.cpp


示例17: Loop

void Loop(void)
{
	XnStatus nRetVal = XN_STATUS_OK;


	while (g_notDone)
	{

		if ((nRetVal = g_context.WaitOneUpdateAll(g_depth)) != XN_STATUS_OK)
			//if ((nRetVal = g_context.WaitAndUpdateAll()) != XN_STATUS_OK)
		{
			fprintf(stderr,"Could not update data: %s\n", xnGetStatusString(nRetVal));
			continue;
		}
		if (g_haveDepth)
		{
			const XnDepthPixel* pDepthMap = g_depth.GetDepthMap();
			ProcessDepthFrame(pDepthMap, g_depthWidth, g_depthHeight);
			FindFingertip();
		}

		if (g_haveImage)
		{
			const XnRGB24Pixel* pImageMap = g_image.GetRGB24ImageMap();
			ProcessImageFrame(pImageMap, g_depthWidth, g_depthHeight);
		}



		ShowFrame();

		CheckKeys();
	}
}
开发者ID:Topographic0cean,项目名称:src,代码行数:34,代码来源:PrimesenseTestbed.cpp


示例18: setCommonProperty

bool CvCapture_OpenNI::setCommonProperty( int propIdx, double propValue )
{
    bool isSet = false;

    switch( propIdx )
    {
    // There is a set of properties that correspond to depth generator by default
    // (is they are pass without particular generator flag).
    case CV_CAP_PROP_OPENNI_REGISTRATION:
        isSet = setDepthGeneratorProperty( propIdx, propValue );
        break;
    case CV_CAP_PROP_OPENNI_APPROX_FRAME_SYNC :
        if( propValue && depthGenerator.IsValid() && imageGenerator.IsValid() )
        {
            // start synchronization
            if( approxSyncGrabber.empty() )
            {
                approxSyncGrabber = new ApproximateSyncGrabber( context, depthGenerator, imageGenerator, maxBufferSize, isCircleBuffer, maxTimeDuration );
            }
            else
            {
                approxSyncGrabber->finish();

                // update params
                approxSyncGrabber->setMaxBufferSize(maxBufferSize);
                approxSyncGrabber->setIsCircleBuffer(isCircleBuffer);
                approxSyncGrabber->setMaxTimeDuration(maxTimeDuration);
            }
            approxSyncGrabber->start();
        }
        else if( !propValue && !approxSyncGrabber.empty() )
        {
            // finish synchronization
            approxSyncGrabber->finish();
        }
        break;
    case CV_CAP_PROP_OPENNI_MAX_BUFFER_SIZE :
        maxBufferSize = cvRound(propValue);
        if( !approxSyncGrabber.empty() )
            approxSyncGrabber->setMaxBufferSize(maxBufferSize);
        break;
    case CV_CAP_PROP_OPENNI_CIRCLE_BUFFER :
        if( !approxSyncGrabber.empty() )
            approxSyncGrabber->setIsCircleBuffer(isCircleBuffer);
        break;
    case CV_CAP_PROP_OPENNI_MAX_TIME_DURATION :
        maxTimeDuration = cvRound(propValue);
        if( !approxSyncGrabber.empty() )
            approxSyncGrabber->setMaxTimeDuration(maxTimeDuration);
        break;
    default:
    {
        std::stringstream ss;
        ss << "Such parameter (propIdx=" << propIdx << ") isn't supported for setting.\n";
        CV_Error( CV_StsBadArg, ss.str().c_str() );
    }
    }

    return isSet;
}
开发者ID:AndreSteenveld,项目名称:opencv,代码行数:60,代码来源:cap_openni.cpp


示例19: setDepthGeneratorProperty

bool CvCapture_OpenNI::setDepthGeneratorProperty( int propIdx, double propValue )
{
    bool isSet = false;

    CV_Assert( depthGenerator.IsValid() );

    switch( propIdx )
    {
    case CV_CAP_PROP_OPENNI_REGISTRATION:
        {
            if( propValue != 0.0 ) // "on"
            {
                // if there isn't image generator (i.e. ASUS XtionPro doesn't have it)
                // then the property isn't avaliable
                if( imageGenerator.IsValid() )
                {
                    if( !depthGenerator.GetAlternativeViewPointCap().IsViewPointAs(imageGenerator) )
                    {
                        if( depthGenerator.GetAlternativeViewPointCap().IsViewPointSupported(imageGenerator) )
                        {
                            XnStatus status = depthGenerator.GetAlternativeViewPointCap().SetViewPoint(imageGenerator);
                            if( status != XN_STATUS_OK )
                                std::cerr << "CvCapture_OpenNI::setDepthGeneratorProperty : " << xnGetStatusString(status) << std::endl;
                            else
                                isSet = true;
                        }
                        else
                            std::cerr << "CvCapture_OpenNI::setDepthGeneratorProperty : Unsupported viewpoint." << std::endl;
                    }
                    else
                        isSet = true;
                }
            }
            else // "off"
            {
                XnStatus status = depthGenerator.GetAlternativeViewPointCap().ResetViewPoint();
                if( status != XN_STATUS_OK )
                    std::cerr << "CvCapture_OpenNI::setDepthGeneratorProperty : " << xnGetStatusString(status) << std::endl;
                else
                    isSet = true;
            }
        }
        break;
    default:
    {
        std::stringstream ss;
        ss << "Depth generator does not support such parameter (propIdx=" << propIdx << ") for setting.\n";
        CV_Error( CV_StsBadArg, ss.str().c_str() );
    }
    }

    return isSet;
}
开发者ID:AndreSteenveld,项目名称:opencv,代码行数:53,代码来源:cap_openni.cpp


示例20: captureOne

bool DataCapture::captureOne()
{
    XnStatus rc = context_.WaitAndUpdateAll(); // want this to be WaitOneUpdateAll(RGB image)
    if( rc != XN_STATUS_OK )
    {
        std::cout << "WaitAndUpdateAll: " << xnGetStatusString(rc) << std::endl;
        return false;
    }

    // grab image
    imageGen_.GetMetaData(imageMd_);
    const XnRGB24Pixel* rgbData = imageMd_.RGB24Data();
    for( unsigned int i = 0; i < 640 * 480; ++i )
    {
        pRgbData_[3*i] = rgbData->nRed;
        pRgbData_[3*i + 1] = rgbData->nGreen;
        pRgbData_[3*i + 2] = rgbData->nBlue;
        ++rgbData;
    }

    // grab depth image
    depthGen_.GetMetaData(depthMd_);
    const uint16_t* pDepthDataU16 = depthMd_.Data();
    for( int i = 0; i < 640 * 480; ++i)
    {
        uint16_t d = pDepthDataU16[i];
        if( d != 0 )
        {
            pDepthData_[i] = (d * 255)/2048;
        }
        else
        {
            pDepthData_[i] = 0; // should be NAN
        }
    }
    return true;
}
开发者ID:cvilas,项目名称:scratch,代码行数:37,代码来源:main.cpp



注:本文中的xn::ImageGenerator类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ xn::NodeInfoList类代码示例发布时间:2022-05-31
下一篇:
C++ xn::HandsGenerator类代码示例发布时间:2022-05-31
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap