• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ openni::VideoFrameRef类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中openni::VideoFrameRef的典型用法代码示例。如果您正苦于以下问题:C++ VideoFrameRef类的具体用法?C++ VideoFrameRef怎么用?C++ VideoFrameRef使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了VideoFrameRef类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: cvt_oniimage

  inline void cvt_oniimage(openni::VideoFrameRef src, image &to, const MemOp &m)
  {
	const void* data = src.getData();
	void* datab = const_cast<void*>(data);
    to = image(src.getWidth(), src.getHeight(), src.getStrideInBytes(), datab, m);
    to.set_format(image::FORMAT_DEPTH_16);
  }
开发者ID:rickesh,项目名称:image-babble,代码行数:7,代码来源:openni.hpp


示例2: convert_depth_map

void convert_depth_map(const openni::VideoFrameRef &in, cv::Mat& out)
{
    const void *data = in.getData();
    int sizes[2] = {in.getHeight(), in.getWidth()};

    cv::Mat s1, s2, s3;
    s1 = cv::Mat(2, sizes, CV_16UC1, (void*)data);
	cv::normalize(s1, s2, 0, 255, CV_MINMAX, CV_8UC1);
    cv::cvtColor(s2,out, CV_GRAY2BGR);
	
	/*
		const nite::UserId* pLabels = map.getPixels();
	for (int y=0; y<map.getHeight(); ++y)
	{
		for (int x=0;x<map.getWidth(); ++x, ++pLabels)
		{
			uint16_t &v = s1.at<uint16_t>(cv::Point(x,y));
			if (!*pLabels)
				v = 0;
		}
	}
*/
	
//	cv::normalize(s1, out, 0, 255, CV_MINMAX, CV_8UC1);
}
开发者ID:timprepscius,项目名称:kinect,代码行数:25,代码来源:ComputationalDebuggerCV.cpp


示例3: Calculate

void CGraph::Calculate(float* pHistogram, int histogramSize, const openni::VideoFrameRef& depthFrame)
{
	const openni::DepthPixel* pDepth = (const openni::DepthPixel*)depthFrame.getData();
	int width = depthFrame.getWidth();
	int height = depthFrame.getHeight();
	memset(pHistogram, 0, histogramSize*sizeof(float));
	int restOfRow = depthFrame.getStrideInBytes() / sizeof(openni::DepthPixel) - width;

	unsigned int nNumberOfPoints = 0;
	for (int y = 0; y < height; ++y)
	{
		for (int x = 0; x < width; ++x, ++pDepth)
		{
			if (*pDepth != 0)
			{
				pHistogram[*pDepth]++;
				nNumberOfPoints++;
			}
		}
		pDepth += restOfRow;
	}
	for (int nIndex = 1; nIndex < histogramSize; nIndex++)
	{
		pHistogram[nIndex] += pHistogram[nIndex - 1];
	}
	if (nNumberOfPoints)
	{
		for (int nIndex = 1; nIndex < histogramSize; nIndex++)
		{
			pHistogram[nIndex] = (256 * (1.0f - (pHistogram[nIndex] / nNumberOfPoints)));
		}
	}
}
开发者ID:FindNorthStar,项目名称:3D_ver_2,代码行数:33,代码来源:Graph.cpp


示例4: calculateHistogram

/*
* Fuction to draw histogram of depth image
*/
void calculateHistogram(int* pHistogram, int histogramSize, const openni::VideoFrameRef& depthFrame)
{
	const openni::DepthPixel* pDepth = (const openni::DepthPixel*)depthFrame.getData();
	int* pHistogram_temp = new int[histogramSize];
	int width = depthFrame.getWidth();
	int height = depthFrame.getHeight();
	// Calculate the accumulative histogram (the yellow HandSegmentation...)
	memset(pHistogram, 0, histogramSize*sizeof(int));
	memset(pHistogram_temp, 0, histogramSize*sizeof(int));
	int restOfRow = depthFrame.getStrideInBytes() / sizeof(openni::DepthPixel) - width;

	unsigned int nNumberOfPoints = 0;
	for (int y = 0; y < height; ++y)
	{
		for (int x = 0; x < width; ++x, ++pDepth)
		{
			if (*pDepth != 0 && *pDepth <= MAX_DEPTH)
			{
				pHistogram_temp[*pDepth]++;
				nNumberOfPoints++;
			}
		}
		pDepth += restOfRow;
	}
	if (nNumberOfPoints)
	{
		for (int nIndex=1; nIndex < histogramSize; nIndex++)
		{
			pHistogram_temp[nIndex] += pHistogram_temp[nIndex-1];
			pHistogram[nIndex] = (int)(256 * (1.0f - ((float)pHistogram_temp[nIndex] / nNumberOfPoints)));
		}
	}
}
开发者ID:nguyentri,项目名称:HandGestureWDF,代码行数:36,代码来源:Utilities.cpp


示例5: calculateHistogram

void calculateHistogram(float* pHistogram, int histogramSize, const openni::VideoFrameRef& frame)
{
	const openni::DepthPixel* pDepth = (const openni::DepthPixel*)frame.getData();
	// Calculate the accumulative histogram (the yellow display...)
	memset(pHistogram, 0, histogramSize*sizeof(float));
	int restOfRow = frame.getStrideInBytes() / sizeof(openni::DepthPixel) - frame.getWidth();
	int height = frame.getHeight();
	int width = frame.getWidth();

	unsigned int nNumberOfPoints = 0;
	for (int y = 0; y < height; ++y)
	{
		for (int x = 0; x < width; ++x, ++pDepth)
		{
			if (*pDepth != 0)
			{
				pHistogram[*pDepth]++;
				nNumberOfPoints++;
			}
		}
		pDepth += restOfRow;
	}
	for (int nIndex=1; nIndex<histogramSize; nIndex++)
	{
		pHistogram[nIndex] += pHistogram[nIndex-1];
	}
	if (nNumberOfPoints)
	{
		for (int nIndex=1; nIndex<histogramSize; nIndex++)
		{
			pHistogram[nIndex] = (256 * (1.0f - (pHistogram[nIndex] / nNumberOfPoints)));
		}
	}
}
开发者ID:ballanlu,项目名称:GammaLib,代码行数:34,代码来源:Video_OpenNI.cpp


示例6: setPixels

void ColorStream::setPixels(openni::VideoFrameRef frame)
{
    Stream::setPixels(frame);

    openni::VideoMode m = frame.getVideoMode();

    int w = m.getResolutionX();
    int h = m.getResolutionY();
    int num_pixels = w * h;

    pix.allocate(w, h, 3);

    if (m.getPixelFormat() == openni::PIXEL_FORMAT_RGB888)
    {
        const unsigned char *src = (const unsigned char*)frame.getData();
        unsigned char *dst = pix.getBackBuffer().getPixels();

        for (int i = 0; i < num_pixels; i++)
        {
            dst[0] = src[0];
            dst[1] = src[1];
            dst[2] = src[2];
            src += 3;
            dst += 3;
        }
    }

    pix.swap();
}
开发者ID:shigeokitamura,项目名称:ofxNI2,代码行数:29,代码来源:ofxNI2.cpp


示例7: SetDepthFrame

void GeomDepthCalculator::SetDepthFrame(openni::VideoFrameRef& depthFrame)
{
	int w = depthFrame.getWidth();
	int h = depthFrame.getHeight();

	

	const openni::DepthPixel* pDepthRow = (const openni::DepthPixel*)depthFrame.getData();

	int rowSize = depthFrame.getStrideInBytes();
	rowSize /= sizeof(openni::DepthPixel);

	DepthFrame::FrameData<ushort>* frame = 0;
	if (m_frames.size() < m_maxFrames)
	{
		frame = new DepthFrame::FrameData<ushort>();
	}
	else
	{
		frame = *m_frames.begin();
		m_frames.erase(m_frames.begin());
	}
	frame->copyData(pDepthRow, w, h);
	m_frames.push_back(frame);

	m_avgFrame.createData(w,h);

	_averageFrames();

	m_frame->SetRawData(m_avgFrame.Data(), w, h);
	m_frame->CalculateDepth();
	if (m_calcNormals)
		m_frame->CalculateNormals();
	
}
开发者ID:yingzhang536,项目名称:mrayy-Game-Engine,代码行数:35,代码来源:GeomDepthCalculator.cpp


示例8: copyFrame

 void copyFrame(openni::VideoFrameRef& frame, MRPT_DATA& dst){
     const char*  data    = (const char*)frame.getData();
     const int    stride  = frame.getStrideInBytes();
     const int    width   = frame.getWidth();
     const int    height  = frame.getHeight();
     resize(dst, width, height);
     for (int y = 0; y < height; ++y, data+=stride){
         copyRow<NI_PIXEL, MRPT_DATA>(data, dst, width, y);
     }
 }
开发者ID:chen0510566,项目名称:mrpt,代码行数:10,代码来源:COpenNI2Generic.cpp


示例9: showDepthStream

 cv::Mat showDepthStream( const openni::VideoFrameRef& depthFrame )
 {
   // 距離データを画像化する(16bit)
   cv::Mat depthImage = cv::Mat( depthFrame.getHeight(),
                                depthFrame.getWidth(),
                                CV_16UC1, (unsigned short*)depthFrame.getData() );
   
   // 0-10000mmまでのデータを0-255(8bit)にする
   depthImage.convertTo( depthImage, CV_8U, 255.0 / 10000 );
   
   return depthImage;
 }
开发者ID:OpenNI2-Book,项目名称:openni2book,代码行数:12,代码来源:main.cpp


示例10: showColorStream

 // カラーストリームを表示できる形に変換する
 cv::Mat showColorStream( const openni::VideoFrameRef& colorFrame )
 {
   // OpenCV の形に変換する
   cv::Mat colorImage = cv::Mat( colorFrame.getHeight(),
                                colorFrame.getWidth(),
                                CV_8UC3, (unsigned char*)colorFrame.getData() );
   
   // BGR の並びを RGB に変換する
   cv::cvtColor( colorImage, colorImage, CV_RGB2BGR );
   
   return colorImage;
 }
开发者ID:OpenNI2-Book,项目名称:openni2book,代码行数:13,代码来源:main.cpp


示例11: getColorImage

cv::Mat getColorImage(openni::VideoFrameRef& color_frame)
{
  if(!color_frame.isValid())
  {
    return cv::Mat();
  }
  openni::VideoMode video_mode = color_frame.getVideoMode();
  cv::Mat color_img = cv::Mat(video_mode.getResolutionY(),
                              video_mode.getResolutionX(),
                              CV_8UC3, (char*)color_frame.getData());
  cv::Mat ret_img;
  cv::cvtColor(color_img, ret_img, CV_RGB2BGR);
  return ret_img;
}
开发者ID:skyfiregao,项目名称:ARSmartGlass,代码行数:14,代码来源:main.cpp


示例12: getDepthImage

// CV_16U
cv::Mat getDepthImage(openni::VideoFrameRef& depth_frame)
{
  if(!depth_frame.isValid())
  {
    return cv::Mat();
  }

  openni::VideoMode video_mode = depth_frame.getVideoMode();
  cv::Mat depth_img = cv::Mat(video_mode.getResolutionY(),
                              video_mode.getResolutionX(),
                              CV_16U, (char*)depth_frame.getData());

  return depth_img.clone();
}
开发者ID:skyfiregao,项目名称:ARSmartGlass,代码行数:15,代码来源:main.cpp


示例13: toggleStreamState

void toggleStreamState(openni::VideoStream& stream, openni::VideoFrameRef& frame, bool& isOn, openni::SensorType type, const char* name)
{
    openni::Status nRetVal = openni::STATUS_OK;

    if (!stream.isValid())
    {
        nRetVal = stream.create(g_device, type);
        if (nRetVal != openni::STATUS_OK)
        {
            displayError("Failed to create %s stream:\n%s", name, openni::OpenNI::getExtendedError());
            return;
        }
    }

    if (isOn)
    {
        stream.stop();
        frame.release();
    }
    else
    {
        nRetVal = stream.start();
        if (nRetVal != openni::STATUS_OK)
        {
            displayError("Failed to start %s stream:\n%s", name, openni::OpenNI::getExtendedError());
            return;
        }
    }

    isOn = !isOn;
}
开发者ID:higuchi-yuuki,项目名称:OpenNI2,代码行数:31,代码来源:Device.cpp


示例14: displayFrame

void SampleViewer::displayFrame(const openni::VideoFrameRef& frame)
{
	if (!frame.isValid())
		return;

	const openni::DepthPixel* pDepthRow = (const openni::DepthPixel*)frame.getData();
	openni::RGB888Pixel* pTexRow = m_pTexMap + frame.getCropOriginY() * m_nTexMapX;
	int rowSize = frame.getStrideInBytes() / sizeof(openni::DepthPixel);

	for (int y = 0; y < frame.getHeight(); ++y)
	{
		const openni::DepthPixel* pDepth = pDepthRow;
		openni::RGB888Pixel* pTex = pTexRow + frame.getCropOriginX();

		for (int x = 0; x < frame.getWidth(); ++x, ++pDepth, ++pTex)
		{
			if (*pDepth != 0)
			{
				int nHistValue = m_pDepthHist[*pDepth];
				pTex->r = nHistValue;
				pTex->g = nHistValue;
				pTex->b = nHistValue;
			}
		}

		pDepthRow += rowSize;
		pTexRow += m_nTexMapX;
	}

}
开发者ID:Arkapravo,项目名称:OpenNI2,代码行数:30,代码来源:Viewer.cpp


示例15: convert_pixel_map

void convert_pixel_map(const openni::VideoFrameRef &in, cv::Mat& out)
{
    const void *data = in.getData();
    int sizes[2] = {in.getHeight(), in.getWidth()};

    cv::Mat s1, &s2 = out;
    s1 = cv::Mat(2, sizes, CV_8UC3, (void *)data);
    cv::cvtColor(s1,s2, CV_RGB2BGR);
	
	/*
	const nite::UserId* pLabels = map.getPixels();
	for (int y=0; y<map.getHeight(); ++y)
	{
		for (int x=0;x<map.getWidth(); ++x, ++pLabels)
		{
			cv::Vec3b &v = s2.at<cv::Vec3b>(cv::Point(x,y));
			if (*pLabels == 0)
				v = cv::Vec3b(0,0,0);
		}
	}
	*/
}
开发者ID:timprepscius,项目名称:kinect,代码行数:22,代码来源:ComputationalDebuggerCV.cpp


示例16: toCVTImage

            static void toCVTImage( Image& dst, const openni::VideoFrameRef& frame )
            {
                dst.reallocate( frame.getWidth(), frame.getHeight(), Openni2Helper::toIFormat( frame.getVideoMode().getPixelFormat() ) );

                switch( frame.getVideoMode().getPixelFormat() ){
                    case openni::PIXEL_FORMAT_RGB888:
                        copyRGB( dst, ( const uint8_t* )frame.getData(), frame.getStrideInBytes() );
                        break;
                    default:
                        copyData( dst, ( const uint8_t* )frame.getData(), frame.getStrideInBytes() );
                }
            }
开发者ID:hksonngan,项目名称:cvt,代码行数:12,代码来源:OpenNI2Camera.cpp


示例17: getSmallerDepth

unsigned short DepthSensor::getSmallerDepth( const openni::VideoFrameRef& depthFrame ) {
    openni::VideoMode videoMode = depthStream.getVideoMode();

    int depthIndex = videoMode.getResolutionX() * videoMode.getResolutionY();

    unsigned short* depth = (unsigned short*)depthFrame.getData();

    unsigned short min = 10000;
    for (int i = 0; i < depthIndex; i++) {
        if (depth[i] == 0) {
        }
        else if (min > depth[i]) {
            min = depth[i];
        }
    }
    return min;
}
开发者ID:Tokunn,项目名称:etoshanV2,代码行数:17,代码来源:depth.cpp


示例18: updateColorFrame

void KinectHelper::updateColorFrame(openni::VideoFrameRef frame){
    DEBUG_QUEUE qDebug() << "queued color frame#" << frame.getFrameIndex();
    colorQueue.enqueue(frame);
}
开发者ID:prernaa,项目名称:handtrack_share2,代码行数:4,代码来源:KinectHelper.cpp


示例19: showColorStream

 // カラーストリームを表示できる形に変換する
 cv::Mat showColorStream( const openni::VideoFrameRef& colorFrame )
 {
   cv::Mat colorImage;
   
   // Color ストリーム
   if ( colorFrame.getVideoMode().getPixelFormat() ==
       openni::PIXEL_FORMAT_RGB888 ) {
     // OpenCV の形に変換する
     colorImage = cv::Mat( colorFrame.getHeight(),
                          colorFrame.getWidth(),
                          CV_8UC3, (unsigned char*)colorFrame.getData() );
     
     // BGR の並びを RGB に変換する
     cv::cvtColor( colorImage, colorImage, CV_RGB2BGR );
   }
   // Xtion IR ストリーム
   else if ( colorFrame.getVideoMode().getPixelFormat() ==
            openni::PIXEL_FORMAT_GRAY16 ) {
     // XitonのIRのフォーマットは16bitグレースケール
     // 実際は255諧調らしく、CV_8Uに落とさないと見えない
     colorImage = cv::Mat( colorFrame.getHeight(),
                          colorFrame.getWidth(),
                          CV_16UC1, (unsigned short*)colorFrame.getData() );
     colorImage.convertTo( colorImage, CV_8U );
   }
   // Kinect for Windows IR ストリーム
   else {
     // KinectのIRのフォーマットは8bitグレースケール
     // Kinect SDKは16bitグレースケール
     colorImage = cv::Mat( colorFrame.getHeight(),
                          colorFrame.getWidth(),
                          CV_8UC1, (unsigned char*)colorFrame.getData() );
   }
   
   return colorImage;
 }
开发者ID:OpenNI2-Book,项目名称:openni2book,代码行数:37,代码来源:main.cpp


示例20: updateDepthFrame

void KinectHelper::updateDepthFrame(openni::VideoFrameRef frame){
    /// Fetch new depth frame from the frame listener class
    DEBUG_QUEUE qDebug() << "queued depth frame#" << frame.getFrameIndex();
    depthQueue.enqueue(frame);
}
开发者ID:prernaa,项目名称:handtrack_share2,代码行数:5,代码来源:KinectHelper.cpp



注:本文中的openni::VideoFrameRef类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ openni::VideoStream类代码示例发布时间:2022-05-31
下一篇:
C++ openni::Device类代码示例发布时间:2022-05-31
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap