本文整理汇总了C++中processFrame函数的典型用法代码示例。如果您正苦于以下问题:C++ processFrame函数的具体用法?C++ processFrame怎么用?C++ processFrame使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了processFrame函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: processVideo
void processVideo(CameraCalibration& calibration, cv::VideoCapture& capture)
{
// Grab first frame to get the frame dimensions
cv::Mat currentFrame, srcFrame;
capture >> srcFrame;
resize(srcFrame, currentFrame, cvSize(653, 368));
// Check the capture succeeded:
if (currentFrame.empty())
{
std::cout << "Cannot open video capture device" << std::endl;
return;
}
cv::Size frameSize(currentFrame.cols, currentFrame.rows);
std::auto_ptr<MarkerDetectionFacade> detector = createMarkerDetection(calibration);
ARDrawingContext drawingCtx("Markerless AR", frameSize, calibration);
bool shouldQuit = false;
do
{
capture >> srcFrame;
if (srcFrame.empty())
{
shouldQuit = true;
continue;
}
resize(srcFrame, currentFrame, cvSize(653,368));
shouldQuit = processFrame(currentFrame, detector, drawingCtx);
} while (!shouldQuit);
}
开发者ID:fengyu3941,项目名称:marker_based_ar_demo,代码行数:32,代码来源:main.cpp
示例2: QDomDocument
QDomDocument *XmlWriter::toXml()
{
QDomImplementation implementation;
QDomDocumentType docType = implementation.createDocumentType(
"scribe-document", "scribe", "qt.nokia.com/scribe");
document = new QDomDocument(docType);
// ### This processing instruction is required to ensure that any kind
// of encoding is given when the document is written.
QDomProcessingInstruction process = document->createProcessingInstruction(
"xml", "version=\"1.0\" encoding=\"utf-8\"");
document->appendChild(process);
QDomElement documentElement = document->createElement("document");
document->appendChild(documentElement);
//! [0]
QTextFrame *root = textDocument->rootFrame();
//! [0]
if (root)
processFrame(documentElement, root);
return document;
}
开发者ID:Akheon23,项目名称:chromecast-mirrored-source.vendor,代码行数:26,代码来源:xmlwriter.cpp
示例3: SVO_START_TIMER
void FrameHandlerMono::addImage(const cv::Mat& img, const double timestamp)
{
if(!startFrameProcessingCommon(timestamp))
return;
// some cleanup from last iteration, can't do before because of visualization
core_kfs_.clear();
overlap_kfs_.clear();
// create new frame
SVO_START_TIMER("pyramid_creation");
new_frame_.reset(new Frame(cam_, img.clone(), timestamp));
SVO_STOP_TIMER("pyramid_creation");
// process frame
UpdateResult res = RESULT_FAILURE;
if(stage_ == STAGE_DEFAULT_FRAME)
res = processFrame();
else if(stage_ == STAGE_SECOND_FRAME)
res = processSecondFrame();
else if(stage_ == STAGE_FIRST_FRAME)
res = processFirstFrame();
else if(stage_ == STAGE_RELOCALIZING)
res = relocalizeFrame(SE3d(Matrix3d::Identity(), Vector3d::Zero()),
map_.getClosestKeyframe(last_frame_));
// set last frame
last_frame_ = new_frame_;
new_frame_.reset();
// finish processing
finishFrameProcessingCommon(last_frame_->id_, res, last_frame_->nObs());
}
开发者ID:fradelg,项目名称:rpg_svo,代码行数:33,代码来源:frame_handler_mono.cpp
示例4: processVideo
/**
* Processes a recorded video or live view from web-camera and allows you to adjust homography refinement and
* reprojection threshold in runtime.
*/
void processVideo(const cv::Mat& patternImage, CameraCalibration& calibration, cv::VideoCapture& capture)
{
// Grab first frame to get the frame dimensions
cv::Mat currentFrame;
capture >> currentFrame;
// Check the capture succeeded:
if (currentFrame.empty())
{
std::cout << "Cannot open video capture device" << std::endl;
return;
}
cv::Size frameSize(currentFrame.cols, currentFrame.rows);
ARPipeline pipeline(patternImage, calibration);
ARDrawingContext drawingCtx("Markerless AR", frameSize, calibration);
bool shouldQuit = false;
do
{
capture >> currentFrame;
if (currentFrame.empty())
{
shouldQuit = true;
continue;
}
shouldQuit = processFrame(currentFrame, pipeline, drawingCtx);
} while (!shouldQuit);
}
开发者ID:TBFMX,项目名称:opencvWork,代码行数:35,代码来源:main.cpp
示例5: SVO_WARN_STREAM_THROTTLE
FrameHandlerMono::UpdateResult FrameHandlerMono::relocalizeFrame(
const SE3d& T_cur_ref,
FramePtr ref_keyframe)
{
SVO_WARN_STREAM_THROTTLE(1.0, "Relocalizing frame");
if(ref_keyframe == nullptr)
{
SVO_INFO_STREAM("No reference keyframe.");
return RESULT_FAILURE;
}
SparseImgAlign img_align(Config::kltMaxLevel(), Config::kltMinLevel(),
30, SparseImgAlign::GaussNewton, false, false);
size_t img_align_n_tracked = img_align.run(ref_keyframe, new_frame_);
if(img_align_n_tracked > 30)
{
SE3d T_f_w_last = last_frame_->T_f_w_;
last_frame_ = ref_keyframe;
FrameHandlerMono::UpdateResult res = processFrame();
if(res != RESULT_FAILURE)
{
stage_ = STAGE_DEFAULT_FRAME;
SVO_INFO_STREAM("Relocalization successful.");
}
else
new_frame_->T_f_w_ = T_f_w_last; // reset to last well localized pose
return res;
}
return RESULT_FAILURE;
}
开发者ID:fradelg,项目名称:rpg_svo,代码行数:29,代码来源:frame_handler_mono.cpp
示例6: useFrame
void useFrame(cv::Mat& mRgba){
int64 now = cv::getTickCount();
int64 then;
time_queue.push(now);
// Process frame
if(mRgba.cols != 0) {
processFrame(mRgba);
char buffer[256];
sprintf(buffer, "Display performance: %dx%d @ %.3f", mRgba.cols, mRgba.rows, fps);
cv::putText(mRgba, std::string(buffer), cv::Point(8,64),
cv::FONT_HERSHEY_COMPLEX_SMALL, 1, cv::Scalar(0,255,255,255));
}
if (time_queue.size() >= 2)
then = time_queue.front();
else
then = 0;
if (time_queue.size() >= 25)
time_queue.pop();
fps = time_queue.size() * (float)cv::getTickFrequency() / (now-then);
}
开发者ID:jbboin,项目名称:opencv-skeleton-android,代码行数:28,代码来源:native.cpp
示例7: switch
int Viewfinder::qt_metacall(QMetaObject::Call _c, int _id, void **_a)
{
_id = QWidget::qt_metacall(_c, _id, _a);
if (_id < 0)
return _id;
if (_c == QMetaObject::InvokeMetaMethod) {
switch (_id) {
case 0: processFrames((*reinterpret_cast< int(*)>(_a[1]))); break;
case 1: processFrame(); break;
case 2: toggleCube(); break;
case 3: toggleGourd(); break;
case 4: paintCube(); break;
case 5: paintGourd(); break;
case 6: changeX(); break;
case 7: changeY(); break;
case 8: changeZ(); break;
case 9: rotateX(); break;
case 10: rotateY(); break;
case 11: rotateZ(); break;
case 12: plus(); break;
case 13: minus(); break;
case 14: openDirectory(); break;
default: ;
}
_id -= 15;
}
return _id;
}
开发者ID:sumingfei,项目名称:aug_reality,代码行数:28,代码来源:moc_Viewfinder.cpp
示例8: processTable
void XmlWriter::processFrame(QDomElement &parent, QTextFrame *frame)
{
QDomElement frameElement = document->createElement("frame");
frameElement.setAttribute("begin", frame->firstPosition());
frameElement.setAttribute("end", frame->lastPosition());
parent.appendChild(frameElement);
//! [0]
QTextFrame::iterator it;
for (it = frame->begin(); !(it.atEnd()); ++it) {
QTextFrame *childFrame = it.currentFrame();
QTextBlock childBlock = it.currentBlock();
if (childFrame) {
QTextTable *childTable = qobject_cast<QTextTable*>(childFrame);
if (childTable)
processTable(frameElement, childTable);
else
processFrame(frameElement, childFrame);
} else if (childBlock.isValid())
//! [0] //! [1]
processBlock(frameElement, childBlock);
}
//! [1]
}
开发者ID:Fale,项目名称:qtmoko,代码行数:28,代码来源:xmlwriter.cpp
示例9: liveScanner
void
liveScanner (dsd_opts * opts, dsd_state * state)
{
if (opts->audio_in_fd == -1)
{
if (pthread_mutex_lock(&state->input_mutex))
{
printf("liveScanner -> Unable to lock mutex\n");
}
}
while (1)
{
noCarrier (opts, state);
state->synctype = getFrameSync (opts, state);
// recalibrate center/umid/lmid
state->center = ((state->max) + (state->min)) / 2;
state->umid = (((state->max) - state->center) * 5 / 8) + state->center;
state->lmid = (((state->min) - state->center) * 5 / 8) + state->center;
while (state->synctype != -1)
{
processFrame (opts, state);
state->synctype = getFrameSync (opts, state);
// recalibrate center/umid/lmid
state->center = ((state->max) + (state->min)) / 2;
state->umid = (((state->max) - state->center) * 5 / 8) + state->center;
state->lmid = (((state->min) - state->center) * 5 / 8) + state->center;
}
}
}
开发者ID:LetsGoRosco,项目名称:gr-dsd,代码行数:29,代码来源:dsd_main.c
示例10: EXEC_DEBUG
AlgorithmStatus OverlapAdd::process() {
EXEC_DEBUG("process()");
AlgorithmStatus status = acquireData();
EXEC_DEBUG("data acquired");
if (status != OK) {
if (!shouldStop()) return status;
int available = input("frame").available();
if (available == 0) {
return FINISHED;
}
// otherwise, there are still some frames
return CONTINUE;
}
const vector<vector<Real> >& frames = _frames.tokens();
vector<Real>& output = _output.tokens();
assert(frames.size() == 1 && (int) output.size() == _hopSize);
const vector<Real> & windowedFrame = frames[0];
if (windowedFrame.empty()) throw EssentiaException("OverlapAdd: the input frame is empty");
processFrame(_tmpFrame, windowedFrame, output, _frameHistory, _frameSize,
_hopSize, _normalizationGain);
EXEC_DEBUG("releasing");
releaseData();
EXEC_DEBUG("released");
return OK;
}
开发者ID:HybridVigor,项目名称:essentia-musicbricks,代码行数:33,代码来源:overlapadd.cpp
示例11: pollCallback
void pollCallback(polled_camera::GetPolledImage::Request& req,
polled_camera::GetPolledImage::Response& rsp,
sensor_msgs::Image& image, sensor_msgs::CameraInfo& info)
{
if (trigger_mode_ != prosilica::Software) {
rsp.success = false;
rsp.status_message = "Camera is not in software triggered mode";
return;
}
tPvFrame* frame = NULL;
try {
cam_->setBinning(req.binning_x, req.binning_y);
if (req.roi.x_offset || req.roi.y_offset || req.roi.width || req.roi.height) {
// GigE cameras use ROI in binned coordinates, so scale the values down.
// The ROI is expanded if necessary to land on binned coordinates.
unsigned int left_x = req.roi.x_offset / req.binning_x;
unsigned int top_y = req.roi.y_offset / req.binning_y;
unsigned int right_x = (req.roi.x_offset + req.roi.width + req.binning_x - 1) / req.binning_x;
unsigned int bottom_y = (req.roi.y_offset + req.roi.height + req.binning_y - 1) / req.binning_y;
unsigned int width = right_x - left_x;
unsigned int height = bottom_y - top_y;
cam_->setRoi(left_x, top_y, width, height);
} else {
cam_->setRoiToWholeFrame();
}
// Zero duration means "no timeout"
unsigned long timeout = req.timeout.isZero() ? PVINFINITE : req.timeout.toNSec() / 1e6;
frame = cam_->grab(timeout);
}
catch (prosilica::ProsilicaException &e) {
if (e.error_code == ePvErrBadSequence)
throw; // not easily recoverable
rsp.success = false;
rsp.status_message = (boost::format("Prosilica exception: %s") % e.what()).str();
return;
}
if (!frame) {
/// @todo Would be nice if grab() gave more info
rsp.success = false;
rsp.status_message = "Failed to capture frame, may have timed out";
return;
}
info = cam_info_;
image.header.frame_id = img_.header.frame_id;
if (!processFrame(frame, image, info)) {
rsp.success = false;
rsp.status_message = "Captured frame but failed to process it";
return;
}
info.roi.do_rectify = req.roi.do_rectify; // do_rectify should be preserved from request
rsp.success = true;
}
开发者ID:Anuragch,项目名称:stingray-3-0,代码行数:60,代码来源:prosilica_node.cpp
示例12: sizeof
int VBoxNetLwipNAT::processGSO(PCPDMNETWORKGSO pGso, size_t cbFrame)
{
if (!PDMNetGsoIsValid(pGso, cbFrame,
cbFrame - sizeof(PDMNETWORKGSO)))
return VERR_INVALID_PARAMETER;
cbFrame -= sizeof(PDMNETWORKGSO);
uint8_t abHdrScratch[256];
uint32_t const cSegs = PDMNetGsoCalcSegmentCount(pGso,
cbFrame);
for (size_t iSeg = 0; iSeg < cSegs; iSeg++)
{
uint32_t cbSegFrame;
void *pvSegFrame =
PDMNetGsoCarveSegmentQD(pGso,
(uint8_t *)(pGso + 1),
cbFrame,
abHdrScratch,
iSeg,
cSegs,
&cbSegFrame);
int rc = processFrame(pvSegFrame, cbSegFrame);
if (RT_FAILURE(rc))
{
return rc;
}
}
return VINF_SUCCESS;
}
开发者ID:bayasist,项目名称:vbox,代码行数:31,代码来源:VBoxNetLwipNAT.cpp
示例13: processFrame
void AudioProcessor::updateCB(const ros::TimerEvent& timer_event)
{
result_ = channel_->getSpectrum(spectrum_, spectrum_size_, 0, fft_method_);
if (result_ == FMOD_OK)
{
now_time_ = ros::Time::now(); // try to grab as close to getting message as possible
freq_status_.tick();
if (received_first_frame_)
{
max_period_between_updates_ = std::max(max_period_between_updates_, (timer_event.current_real - timer_event.last_real).toSec());
last_callback_duration_ = timer_event.profile.last_duration.toSec();
}
received_first_frame_ = true;
consequtively_dropped_frames_ = 0;
processFrame();
}
else
{
ROS_ERROR("Could not get spectrum. (%d) : %s.", result_, FMOD_ErrorString(result_));
consequtively_dropped_frames_++;
if(consequtively_dropped_frames_ > max_dropped_frames_)
{
shutdown();
}
}
frame_count_++;
system_->update();
diagnostic_updater_.update();
}
开发者ID:HiroyukiMikita,项目名称:usc-clmc-ros-pkg,代码行数:31,代码来源:audio_processor.cpp
示例14: deviceConnected
void LeapMotionPlugin::pluginUpdate(float deltaTime, const controller::InputCalibrationData& inputCalibrationData) {
if (!_enabled) {
return;
}
const auto frame = _controller.frame();
const auto frameID = frame.id();
if (_lastFrameID >= frameID) {
// Leap Motion not connected or duplicate frame.
return;
}
if (!_hasLeapMotionBeenConnected) {
emit deviceConnected(getName());
_hasLeapMotionBeenConnected = true;
}
processFrame(frame); // Updates _joints.
auto joints = _joints;
auto userInputMapper = DependencyManager::get<controller::UserInputMapper>();
userInputMapper->withLock([&, this]() {
_inputDevice->update(deltaTime, inputCalibrationData, joints, _prevJoints);
});
_prevJoints = joints;
_lastFrameID = frameID;
}
开发者ID:SeijiEmery,项目名称:hifi,代码行数:30,代码来源:LeapMotionPlugin.cpp
示例15: processFrame
QString TextDocumentSerializer::toSimpleHtml(QTextDocument *document)
{
this->document = document;
QTextFrame *root = document->rootFrame();
return processFrame(root);
}
开发者ID:atilm,项目名称:RequirementsManager,代码行数:7,代码来源:textdocumentserializer.cpp
示例16: while
/*
* QoS implementation using 8 queues for each priority and round-robin algorithm.
*/
void Switch::switchFrames()
{
while(1)
{
for (int i = 7; i >= 0; --i)
{
for (int j = 0; j <= i; ++j)
{
r.lock();
if (!queues[i].empty())
{
Frame* frame = queues[i].front();
queues[i].pop();
if(prt==true)
std::cout << frame->toString() << std::endl;
r.unlock();
processFrame(frame, frame->sourcePort);
}
else
{
r.unlock();
break;
}
}
}
}
}
开发者ID:przemeklal,项目名称:switch-logic,代码行数:33,代码来源:Switch.cpp
示例17: catch
void KeyframeMapper::RGBDCallback(
const ImageMsg::ConstPtr& rgb_msg,
const ImageMsg::ConstPtr& depth_msg,
const CameraInfoMsg::ConstPtr& info_msg)
{
tf::StampedTransform transform;
const ros::Time& time = rgb_msg->header.stamp;
try{
tf_listener_.waitForTransform(
fixed_frame_, rgb_msg->header.frame_id, time, ros::Duration(0.1));
tf_listener_.lookupTransform(
fixed_frame_, rgb_msg->header.frame_id, time, transform);
}
catch(...)
{
return;
}
// create a new frame and increment the counter
rgbdtools::RGBDFrame frame;
createRGBDFrameFromROSMessages(rgb_msg, depth_msg, info_msg, frame);
frame.index = rgbd_frame_index_;
rgbd_frame_index_++;
bool result = processFrame(frame, eigenAffineFromTf(transform));
if (result) publishKeyframeData(keyframes_.size() - 1);
publishPath();
}
开发者ID:jihoonl,项目名称:ccny_rgbd_tools,代码行数:31,代码来源:keyframe_mapper.cpp
示例18: switch
void Application::loop()
{
// Check for key input
int key = cv::waitKey(20);
switch (key)
{
case 's':
makeScreenshots();
break;
case 'c':
clearOutputImage();
break;
case 'q':
m_isFinished = true;
}
// Grab new images from the Kinect's cameras
m_depthCamera->frameFromCamera(m_rgbImage, m_depthImage, CV_16UC1);
// Process the current frame
processFrame();
// Display the images
cv::imshow("raw", m_rgbImage);
cv::imshow("depth", m_depthImage);
cv::imshow("output", m_outputImage);
}
开发者ID:kaozente,项目名称:The-Jacob-Nielsen-Experience,代码行数:30,代码来源:Application.cpp
示例19: ERROR
void UT612ByteStreamParser::processByte(uint8_t byte)
{
_byteCount++;
bool currentByteIsLinefeed = (byte == 0x0a);
bool frameIsEnding = _lastByteWasCarriageReturn && currentByteIsLinefeed;
_buffer.push_back(byte);
if (frameIsEnding && _buffer.size() >= frame_size)
{
//
// Warn on frame alignment errors
//
int bytesSinceLastFrame = _byteCount - _lastFrameByteCount;
if (bytesSinceLastFrame != frame_size)
{
std::cerr << "\nERROR - FRAME ALIGNMENT ERROR (diff=" << bytesSinceLastFrame << ")" << std::endl;
}
_lastFrameByteCount = _byteCount;
//
// Process the frame
//
std::cout << _frameCount++ << "\t";
if (_doTimestamp)
{
std::cout << getCurrentTime() << "\t";
}
try {
std::string values = processFrame(_buffer, _buffer.size() - frame_size);
std::cout << values;
}
catch (std::runtime_error & e)
{
std::cerr << "Exception: \""<< e.what() << "\"\n";
const uint8_t* d = &_buffer[_buffer.size() - frame_size];
std::cerr << "Offending bytes: " << convert2HexString(d, frame_size) << std::endl;
}
bool showHex = false;
if (showHex)
{
const uint8_t* d = &_buffer[_buffer.size() - frame_size];
std::cout << "\t" << convert2HexString(d, frame_size);
}
std::cout << std::endl;
_buffer.clear();
}
_lastByteWasCarriageReturn = (byte == 0x0d);
}
开发者ID:optisimon,项目名称:UT612-linux-software,代码行数:58,代码来源:UT612ByteStreamParser.cpp
示例20: publishImage
void publishImage (FlyCapture2::Image * frame)
{
//sensor_msgs::CameraInfoPtr cam_info(new sensor_msgs::CameraInfo(cam_info_->getCameraInfo()));
// or with a member variable:
cam_info_ptr_ = sensor_msgs::CameraInfoPtr(new sensor_msgs::CameraInfo(cam_info_mgr_->getCameraInfo()));
if (processFrame (frame, img_, cam_info_ptr_))
streaming_pub_.publish (img_, *cam_info_ptr_);
}
开发者ID:ccny-ros-pkg,项目名称:ccny_camera_drivers,代码行数:9,代码来源:pgr_camera_node.cpp
注:本文中的processFrame函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论