• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ optimizablegraph::Edge类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中optimizablegraph::Edge的典型用法代码示例。如果您正苦于以下问题:C++ Edge类的具体用法?C++ Edge怎么用?C++ Edge使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了Edge类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: gaugeFreedom

  bool SparseOptimizer::gaugeFreedom()
  {
    if (vertices().empty())
      return false;

    int maxDim=0;
    for (HyperGraph::VertexIDMap::iterator it=vertices().begin(); it!=vertices().end(); ++it){
      OptimizableGraph::Vertex* v=static_cast<OptimizableGraph::Vertex*>(it->second); 
      maxDim = std::max(maxDim,v->dimension());
    }

    for (HyperGraph::VertexIDMap::iterator it=vertices().begin(); it!=vertices().end(); ++it){
      OptimizableGraph::Vertex* v=static_cast<OptimizableGraph::Vertex*>(it->second);
      if (v->dimension() == maxDim) {
        // test for fixed vertex
        if (v->fixed()) {
          return false;
        }
        // test for full dimension prior
        for (HyperGraph::EdgeSet::const_iterator eit = v->edges().begin(); eit != v->edges().end(); ++eit) {
          OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*eit);
          if (e->vertices().size() == 1 && e->dimension() == maxDim)
            return false;
        }
      }
    }
    return true;
  }
开发者ID:skarlsson,项目名称:tmp-android,代码行数:28,代码来源:sparse_optimizer.cpp


示例2: addEdge

  bool OptimizableGraph::addEdge(HyperGraph::Edge* e_)
  {
    OptimizableGraph::Edge* e = dynamic_cast<OptimizableGraph::Edge*>(e_);
    assert(e && "Edge does not inherit from OptimizableGraph::Edge");
    //    std::cerr << "subclass of OptimizableGraph::Edge confirmed";
    if (! e)
      return false;
    bool eresult = HyperGraph::addEdge(e);
    if (! eresult)
      return false;
    //    std::cerr << "called HyperGraph::addEdge" << std::endl;
    e->_internalId = _nextEdgeId++;
    if (e->numUndefinedVertices())
      return true;
    //    std::cerr << "internalId set" << std::endl;
    if (! e->resolveParameters()){
      cerr << __FUNCTION__ << ": FATAL, cannot resolve parameters for edge " << e << endl;
      return false;
    }
    //    std::cerr << "parameters set" << std::endl;
    if (! e->resolveCaches()){
      cerr << __FUNCTION__ << ": FATAL, cannot resolve caches for edge " << e << endl;
      return false;
    }
    //    std::cerr << "updating jacobian size" << std::endl;
    _jacobianWorkspace.updateSize(e);

    //    std::cerr << "about to return true" << std::endl;

    return true;
  }
开发者ID:Aerobota,项目名称:g2o,代码行数:31,代码来源:optimizable_graph.cpp


示例3: setRobustKernel

void MainWindow::setRobustKernel()
{
  SparseOptimizer* optimizer = viewer->graph;
  bool robustKernel = cbRobustKernel->isChecked();
  double huberWidth = leKernelWidth->text().toDouble();  
  //odometry edges are those whose node ids differ by 1
  
  bool onlyLoop = cbOnlyLoop->isChecked();

  if (robustKernel) {
    QString strRobustKernel = coRobustKernel->currentText();
    AbstractRobustKernelCreator* creator = RobustKernelFactory::instance()->creator(strRobustKernel.toStdString());
    if (! creator) {
      cerr << strRobustKernel.toStdString() << " is not a valid robust kernel" << endl;
      return;
    }
    for (SparseOptimizer::EdgeSet::const_iterator it = optimizer->edges().begin(); it != optimizer->edges().end(); ++it) {
      OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
      if (onlyLoop) {
        if (e->vertices().size() >= 2 && std::abs(e->vertex(0)->id() - e->vertex(1)->id()) != 1) {
          e->setRobustKernel(creator->construct());
          e->robustKernel()->setDelta(huberWidth);
        }
      } else {
        e->setRobustKernel(creator->construct());
        e->robustKernel()->setDelta(huberWidth);
      }
    }    
  } else {
    for (SparseOptimizer::EdgeSet::const_iterator it = optimizer->edges().begin(); it != optimizer->edges().end(); ++it) {
      OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
      e->setRobustKernel(0);
    }
  }
}
开发者ID:2maz,项目名称:g2o,代码行数:35,代码来源:main_window.cpp


示例4: setRobustKernel

void MainWindow::setRobustKernel()
{
  SparseOptimizer* optimizer = viewer->graph;
  bool robustKernel = cbRobustKernel->isChecked();
  double huberWidth = leKernelWidth->text().toDouble();

  if (robustKernel) {
    QString strRobustKernel = coRobustKernel->currentText();
    AbstractRobustKernelCreator* creator = RobustKernelFactory::instance()->creator(strRobustKernel.toStdString());
    if (! creator) {
      cerr << strRobustKernel.toStdString() << " is not a valid robust kernel" << endl;
      return;
    }
    for (SparseOptimizer::EdgeSet::const_iterator it = optimizer->edges().begin(); it != optimizer->edges().end(); ++it) {
      OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
      e->setRobustKernel(creator->construct());
      e->robustKernel()->setDelta(huberWidth);
    }
  } else {
    for (SparseOptimizer::EdgeSet::const_iterator it = optimizer->edges().begin(); it != optimizer->edges().end(); ++it) {
      OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
      e->setRobustKernel(0);
    }
  }
}
开发者ID:Crusty82,项目名称:g2o_tutorial,代码行数:25,代码来源:main_window.cpp


示例5: computeActiveErrors

  void SparseOptimizer::computeActiveErrors()
  {
    // call the callbacks in case there is something registered
    HyperGraphActionSet& actions = _graphActions[AT_COMPUTEACTIVERROR];
    if (actions.size() > 0) {
      for (HyperGraphActionSet::iterator it = actions.begin(); it != actions.end(); ++it)
        (*(*it))(this);
    }

#   ifdef G2O_OPENMP
#   pragma omp parallel for default (shared) if (_activeEdges.size() > 50)
#   endif
    for (int k = 0; k < static_cast<int>(_activeEdges.size()); ++k) {
      OptimizableGraph::Edge* e = _activeEdges[k];
      e->computeError();
    }

#  ifndef NDEBUG
    for (int k = 0; k < static_cast<int>(_activeEdges.size()); ++k) {
      OptimizableGraph::Edge* e = _activeEdges[k];
      bool hasNan = arrayHasNaN(e->errorData(), e->dimension());
      if (hasNan) {
        cerr << "computeActiveErrors(): found NaN in error for edge " << e << endl;
      }
    }
#  endif

  }
开发者ID:skarlsson,项目名称:tmp-android,代码行数:28,代码来源:sparse_optimizer.cpp


示例6: updateInitialization

  bool SparseOptimizer::updateInitialization(HyperGraph::VertexSet& vset, HyperGraph::EdgeSet& eset)
  {
    std::vector<HyperGraph::Vertex*> newVertices;
    newVertices.reserve(vset.size());
    _activeVertices.reserve(_activeVertices.size() + vset.size());
    _activeEdges.reserve(_activeEdges.size() + eset.size());
    for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) {
      OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
      if (!e->allVerticesFixed()) _activeEdges.push_back(e);
    }
    
    // update the index mapping
    size_t next = _ivMap.size();
    for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end(); ++it) {
      OptimizableGraph::Vertex* v=static_cast<OptimizableGraph::Vertex*>(*it);
      if (! v->fixed()){
        if (! v->marginalized()){
          v->setHessianIndex(next);
          _ivMap.push_back(v);
          newVertices.push_back(v);
          _activeVertices.push_back(v);
          next++;
        } 
        else // not supported right now
          abort();
      }
      else {
        v->setHessianIndex(-1);
      }
    }

    //if (newVertices.size() != vset.size())
    //cerr << __PRETTY_FUNCTION__ << ": something went wrong " << PVAR(vset.size()) << " " << PVAR(newVertices.size()) << endl;
    return _algorithm->updateStructure(newVertices, eset);
  }
开发者ID:skarlsson,项目名称:tmp-android,代码行数:35,代码来源:sparse_optimizer.cpp


示例7: default

bool BlockSolver<Traits>::buildSystem()
{
  // clear b vector
# ifdef G2O_OPENMP
# pragma omp parallel for default (shared) if (_optimizer->indexMapping().size() > 1000)
# endif
  for (int i = 0; i < static_cast<int>(_optimizer->indexMapping().size()); ++i) {
    OptimizableGraph::Vertex* v=_optimizer->indexMapping()[i];
    assert(v);
    v->clearQuadraticForm();
  }
  _Hpp->clear();
  if (_doSchur) {
    _Hll->clear();
    _Hpl->clear();
  }

  // resetting the terms for the pairwise constraints
  // built up the current system by storing the Hessian blocks in the edges and vertices
# ifndef G2O_OPENMP
  // no threading, we do not need to copy the workspace
  JacobianWorkspace& jacobianWorkspace = _optimizer->jacobianWorkspace();
# else
  // if running with threads need to produce copies of the workspace for each thread
  JacobianWorkspace jacobianWorkspace = _optimizer->jacobianWorkspace();
# pragma omp parallel for default (shared) firstprivate(jacobianWorkspace) if (_optimizer->activeEdges().size() > 100)
# endif
  for (int k = 0; k < static_cast<int>(_optimizer->activeEdges().size()); ++k) {
    OptimizableGraph::Edge* e = _optimizer->activeEdges()[k];
    e->linearizeOplus(jacobianWorkspace); // jacobian of the nodes' oplus (manifold)
    e->constructQuadraticForm();
#  ifndef NDEBUG
    for (size_t i = 0; i < e->vertices().size(); ++i) {
      const OptimizableGraph::Vertex* v = static_cast<const OptimizableGraph::Vertex*>(e->vertex(i));
      if (! v->fixed()) {
        bool hasANan = arrayHasNaN(jacobianWorkspace.workspaceForVertex(i), e->dimension() * v->dimension());
        if (hasANan) {
          std::cerr << "buildSystem(): NaN within Jacobian for edge " << e << " for vertex " << i << std::endl;
          break;
        }
      }
    }
#  endif
  }

  // flush the current system in a sparse block matrix
# ifdef G2O_OPENMP
# pragma omp parallel for default (shared) if (_optimizer->indexMapping().size() > 1000)
# endif
  for (int i = 0; i < static_cast<int>(_optimizer->indexMapping().size()); ++i) {
    OptimizableGraph::Vertex* v=_optimizer->indexMapping()[i];
    int iBase = v->colInHessian();
    if (v->marginalized())
      iBase+=_sizePoses;
    v->copyB(_b+iBase);
  }

  return 0;
}
开发者ID:Aerobota,项目名称:g2o,代码行数:59,代码来源:block_solver.hpp


示例8: operator

 double EstimatePropagatorCost::operator()(OptimizableGraph::Edge* edge, const OptimizableGraph::VertexSet& from, OptimizableGraph::Vertex* to_) const
 {
   OptimizableGraph::Edge* e = dynamic_cast<OptimizableGraph::Edge*>(edge);
   OptimizableGraph::Vertex* to = dynamic_cast<OptimizableGraph::Vertex*>(to_);
   SparseOptimizer::EdgeContainer::const_iterator it = _graph->findActiveEdge(e);
   if (it == _graph->activeEdges().end()) // it has to be an active edge
     return std::numeric_limits<double>::max();
   return e->initialEstimatePossible(from, to);
 }
开发者ID:Aand1,项目名称:Autoware,代码行数:9,代码来源:estimate_propagator.cpp


示例9: computeActiveErrors

 void SparseOptimizer::computeActiveErrors()
 {
   for (EdgeContainer::const_iterator
        it = _activeEdges.begin();
        it != _activeEdges.end();
        it++)
   {
     OptimizableGraph::Edge* e = *it;
     e->computeError();
     if (e->robustKernel())   e->robustifyError();
   }
 }
开发者ID:RoboWGT,项目名称:robo_groovy,代码行数:12,代码来源:graph_optimizer_sparse.cpp


示例10: linearizeSystem

  void SparseOptimizer::linearizeSystem()
  {
#   ifdef G2O_OPENMP
#   pragma omp parallel for default (shared) if (_activeEdges.size() > 50)
#   endif
    for (size_t k = 0; k < _activeEdges.size(); ++k)
    {
      OptimizableGraph::Edge* e = _activeEdges[k];
      // jacobian of the nodes' oplus (manifold)
      e->linearizeOplus();
    }
  }
开发者ID:RoboWGT,项目名称:robo_groovy,代码行数:12,代码来源:graph_optimizer_sparse.cpp


示例11: activeVertexChi

  double activeVertexChi(const OptimizableGraph::Vertex* v){
    const SparseOptimizer* s = dynamic_cast<const SparseOptimizer*>(v->graph());
    const OptimizableGraph::EdgeContainer& av = s->activeEdges();
    double chi = 0;
    int ne =0;
    for (HyperGraph::EdgeSet::iterator it = v->edges().begin(); it!=v->edges().end(); it++){
      OptimizableGraph::Edge* e = dynamic_cast <OptimizableGraph::Edge*> (*it);
      if (!e)
	continue;
      if (s->findActiveEdge(e)!=av.end()) {
	chi +=e->chi2();
	ne++;
      }
    }
    if (! ne)
      return -1;
    return chi/ne;
  }
开发者ID:MichaelRuhnke,项目名称:g2o,代码行数:18,代码来源:simple_star_ops.cpp


示例12: addEdge

  bool OptimizableGraph::addEdge(HyperGraph::Edge* e_)
  {
    OptimizableGraph::Edge* e = dynamic_cast<OptimizableGraph::Edge*>(e_);
    assert(e && "Edge does not inherit from OptimizableGraph::Edge");
    if (! e)
      return false;
    bool eresult = HyperGraph::addEdge(e);
    if (! eresult)
      return false;
    e->_internalId = _nextEdgeId++;
    if (! e->resolveParameters()){
      cerr << __FUNCTION__ << ": FATAL, cannot resolve parameters for edge " << e << endl;
      return false;
    }
    if (! e->resolveCaches()){
      cerr << __FUNCTION__ << ": FATAL, cannot resolve caches for edge " << e << endl;
      return false;
    } 
    _jacobianWorkspace.updateSize(e);

    return true;
  }
开发者ID:Crusty82,项目名称:g2o_tutorial,代码行数:22,代码来源:optimizable_graph.cpp


示例13: setEdgeVertex

  bool OptimizableGraph::setEdgeVertex(HyperGraph::Edge* e, int pos, HyperGraph::Vertex* v){
    if (! HyperGraph::setEdgeVertex(e,pos,v)){
      return false;
    }
    if (!e->numUndefinedVertices()){
#ifndef NDEBUG
      OptimizableGraph::Edge* ee = dynamic_cast<OptimizableGraph::Edge*>(e);
      assert(ee && "Edge is not a OptimizableGraph::Edge");
#else
      OptimizableGraph::Edge* ee = static_cast<OptimizableGraph::Edge*>(e);
#endif
      if (! ee->resolveParameters()){
	cerr << __FUNCTION__ << ": FATAL, cannot resolve parameters for edge " << e << endl;
	return false;
      }
      if (! ee->resolveCaches()){
	cerr << __FUNCTION__ << ": FATAL, cannot resolve caches for edge " << e << endl;
	return false;
      }
      _jacobianWorkspace.updateSize(e);
    }
    return true;
  }
开发者ID:Aerobota,项目名称:g2o,代码行数:23,代码来源:optimizable_graph.cpp


示例14: abort

bool BlockSolver<Traits>::updateStructure(const std::vector<HyperGraph::Vertex*>& vset, const HyperGraph::EdgeSet& edges)
{
  for (std::vector<HyperGraph::Vertex*>::const_iterator vit = vset.begin(); vit != vset.end(); ++vit) {
    OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*vit);
    int dim = v->dimension();
    if (! v->marginalized()){
      v->setColInHessian(_sizePoses);
      _sizePoses+=dim;
      _Hpp->rowBlockIndices().push_back(_sizePoses);
      _Hpp->colBlockIndices().push_back(_sizePoses);
      _Hpp->blockCols().push_back(typename SparseBlockMatrix<PoseMatrixType>::IntBlockMap());
      ++_numPoses;
      int ind = v->hessianIndex();
      PoseMatrixType* m = _Hpp->block(ind, ind, true);
      v->mapHessianMemory(m->data());
    } else {
      std::cerr << "updateStructure(): Schur not supported" << std::endl;
      abort();
    }
  }
  resizeVector(_sizePoses + _sizeLandmarks);

  for (HyperGraph::EdgeSet::const_iterator it = edges.begin(); it != edges.end(); ++it) {
    OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);

    for (size_t viIdx = 0; viIdx < e->vertices().size(); ++viIdx) {
      OptimizableGraph::Vertex* v1 = (OptimizableGraph::Vertex*) e->vertex(viIdx);
      int ind1 = v1->hessianIndex();
      int indexV1Bak = ind1;
      if (ind1 == -1)
        continue;
      for (size_t vjIdx = viIdx + 1; vjIdx < e->vertices().size(); ++vjIdx) {
        OptimizableGraph::Vertex* v2 = (OptimizableGraph::Vertex*) e->vertex(vjIdx);
        int ind2 = v2->hessianIndex();
        if (ind2 == -1)
          continue;
        ind1 = indexV1Bak;
        bool transposedBlock = ind1 > ind2;
        if (transposedBlock) // make sure, we allocate the upper triangular block
          std::swap(ind1, ind2);

        if (! v1->marginalized() && !v2->marginalized()) {
          PoseMatrixType* m = _Hpp->block(ind1, ind2, true);
          e->mapHessianMemory(m->data(), viIdx, vjIdx, transposedBlock);
        } else { 
          std::cerr << __PRETTY_FUNCTION__ << ": not supported" << std::endl;
        }
      }
    }

  }

  return true;
}
开发者ID:Aerobota,项目名称:g2o,代码行数:54,代码来源:block_solver.hpp


示例15: computeInitialGuess

  void SparseOptimizer::computeInitialGuess(EstimatePropagatorCost& costFunction)
  {
    OptimizableGraph::VertexSet emptySet;
    std::set<Vertex*> backupVertices;
    HyperGraph::VertexSet fixedVertices; // these are the root nodes where to start the initialization
    for (EdgeContainer::iterator it = _activeEdges.begin(); it != _activeEdges.end(); ++it) {
      OptimizableGraph::Edge* e = *it;
      for (size_t i = 0; i < e->vertices().size(); ++i) {
        OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(e->vertex(i));
	if (!v)
	  continue;
        if (v->fixed())
          fixedVertices.insert(v);
        else { // check for having a prior which is able to fully initialize a vertex
          for (EdgeSet::const_iterator vedgeIt = v->edges().begin(); vedgeIt != v->edges().end(); ++vedgeIt) {
            OptimizableGraph::Edge* vedge = static_cast<OptimizableGraph::Edge*>(*vedgeIt);
            if (vedge->vertices().size() == 1 && vedge->initialEstimatePossible(emptySet, v) > 0.) {
              //cerr << "Initialize with prior for " << v->id() << endl;
              vedge->initialEstimate(emptySet, v);
              fixedVertices.insert(v);
            }
          }
        }
        if (v->hessianIndex() == -1) {
          std::set<Vertex*>::const_iterator foundIt = backupVertices.find(v);
          if (foundIt == backupVertices.end()) {
            v->push();
            backupVertices.insert(v);
          }
        }
      }
    }

    EstimatePropagator estimatePropagator(this);
    estimatePropagator.propagate(fixedVertices, costFunction);

    // restoring the vertices that should not be initialized
    for (std::set<Vertex*>::iterator it = backupVertices.begin(); it != backupVertices.end(); ++it) {
      Vertex* v = *it;
      v->pop();
    }
    if (verbose()) {
      computeActiveErrors();
      cerr << "iteration= -1\t chi2= " << activeChi2()
          << "\t time= 0.0"
          << "\t cumTime= 0.0"
          << "\t (using initial guess from " << costFunction.name() << ")" << endl;
    }
  }
开发者ID:2maz,项目名称:g2o,代码行数:49,代码来源:sparse_optimizer.cpp


示例16: propagate

  void EstimatePropagator::propagate(OptimizableGraph::VertexSet& vset, 
      const EstimatePropagator::PropagateCost& cost, 
       const EstimatePropagator::PropagateAction& action,
       double maxDistance, 
       double maxEdgeCost)
  {
    reset();

    PriorityQueue frontier;
    for (OptimizableGraph::VertexSet::iterator vit=vset.begin(); vit!=vset.end(); ++vit){
      OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*vit);
      AdjacencyMap::iterator it = _adjacencyMap.find(v);
      assert(it != _adjacencyMap.end());
      it->second._distance = 0.;
      it->second._parent.clear();
      it->second._frontierLevel = 0;
      frontier.push(&it->second);
    }

    while(! frontier.empty()){
      AdjacencyMapEntry* entry = frontier.pop();
      OptimizableGraph::Vertex* u = entry->child();
      double uDistance = entry->distance();
      //cerr << "uDistance " << uDistance << endl;

      // initialize the vertex
      if (entry->_frontierLevel > 0) {
        action(entry->edge(), entry->parent(), u);
      }

      /* std::pair< OptimizableGraph::VertexSet::iterator, bool> insertResult = */ _visited.insert(u);
      OptimizableGraph::EdgeSet::iterator et = u->edges().begin();
      while (et != u->edges().end()){
        OptimizableGraph::Edge* edge = static_cast<OptimizableGraph::Edge*>(*et);
        ++et;

        int maxFrontier = -1;
        OptimizableGraph::VertexSet initializedVertices;
        for (size_t i = 0; i < edge->vertices().size(); ++i) {
          OptimizableGraph::Vertex* z = static_cast<OptimizableGraph::Vertex*>(edge->vertex(i));
          AdjacencyMap::iterator ot = _adjacencyMap.find(z);
          if (ot->second._distance != numeric_limits<double>::max()) {
            initializedVertices.insert(z);
            maxFrontier = (max)(maxFrontier, ot->second._frontierLevel);
          }
        }
        assert(maxFrontier >= 0);

        for (size_t i = 0; i < edge->vertices().size(); ++i) {
          OptimizableGraph::Vertex* z = static_cast<OptimizableGraph::Vertex*>(edge->vertex(i));
          if (z == u)
            continue;

          size_t wasInitialized = initializedVertices.erase(z);

          double edgeDistance = cost(edge, initializedVertices, z);
          if (edgeDistance > 0. && edgeDistance != std::numeric_limits<double>::max() && edgeDistance < maxEdgeCost) {
            double zDistance = uDistance + edgeDistance;
            //cerr << z->id() << " " << zDistance << endl;

            AdjacencyMap::iterator ot = _adjacencyMap.find(z);
            assert(ot!=_adjacencyMap.end());

            if (zDistance < ot->second.distance() && zDistance < maxDistance){
              //if (ot->second.inQueue)
                //cerr << "Updating" << endl;
              ot->second._distance = zDistance;
              ot->second._parent = initializedVertices;
              ot->second._edge = edge;
              ot->second._frontierLevel = maxFrontier + 1;
              frontier.push(&ot->second);
            }
          }

          if (wasInitialized > 0)
            initializedVertices.insert(z);

        }
      }
    }

    // writing debug information like cost for reaching each vertex and the parent used to initialize
#ifdef DEBUG_ESTIMATE_PROPAGATOR
    cerr << "Writing cost.dat" << endl;
    ofstream costStream("cost.dat");
    for (AdjacencyMap::const_iterator it = _adjacencyMap.begin(); it != _adjacencyMap.end(); ++it) {
      HyperGraph::Vertex* u = it->second.child();
      costStream << "vertex " << u->id() << "  cost " << it->second._distance << endl;
    }
    cerr << "Writing init.dat" << endl;
    ofstream initStream("init.dat");
    vector<AdjacencyMapEntry*> frontierLevels;
    for (AdjacencyMap::iterator it = _adjacencyMap.begin(); it != _adjacencyMap.end(); ++it) {
      if (it->second._frontierLevel > 0)
        frontierLevels.push_back(&it->second);
    }
    sort(frontierLevels.begin(), frontierLevels.end(), FrontierLevelCmp());
    for (vector<AdjacencyMapEntry*>::const_iterator it = frontierLevels.begin(); it != frontierLevels.end(); ++it) {
      AdjacencyMapEntry* entry       = *it;
      OptimizableGraph::Vertex* to   = entry->child();
//.........这里部分代码省略.........
开发者ID:Aand1,项目名称:Autoware,代码行数:101,代码来源:estimate_propagator.cpp


示例17: assert

bool BlockSolver<Traits>::buildStructure(bool zeroBlocks)
{
  assert(_optimizer);

  size_t sparseDim = 0;
  _numPoses=0;
  _numLandmarks=0;
  _sizePoses=0;
  _sizeLandmarks=0;
  int* blockPoseIndices = new int[_optimizer->indexMapping().size()];
  int* blockLandmarkIndices = new int[_optimizer->indexMapping().size()];

  for (size_t i = 0; i < _optimizer->indexMapping().size(); ++i) {
    OptimizableGraph::Vertex* v = _optimizer->indexMapping()[i];
    int dim = v->dimension();
    if (! v->marginalized()){
      v->setColInHessian(_sizePoses);
      _sizePoses+=dim;
      blockPoseIndices[_numPoses]=_sizePoses;
      ++_numPoses;
    } else {
      v->setColInHessian(_sizeLandmarks);
      _sizeLandmarks+=dim;
      blockLandmarkIndices[_numLandmarks]=_sizeLandmarks;
      ++_numLandmarks;
    }
    sparseDim += dim;
  }
  resize(blockPoseIndices, _numPoses, blockLandmarkIndices, _numLandmarks, sparseDim);
  delete[] blockLandmarkIndices;
  delete[] blockPoseIndices;

  // allocate the diagonal on Hpp and Hll
  int poseIdx = 0;
  int landmarkIdx = 0;
  for (size_t i = 0; i < _optimizer->indexMapping().size(); ++i) {
    OptimizableGraph::Vertex* v = _optimizer->indexMapping()[i];
    if (! v->marginalized()){
      //assert(poseIdx == v->hessianIndex());
      PoseMatrixType* m = _Hpp->block(poseIdx, poseIdx, true);
      if (zeroBlocks)
        m->setZero();
      v->mapHessianMemory(m->data());
      ++poseIdx;
    } else {
      LandmarkMatrixType* m = _Hll->block(landmarkIdx, landmarkIdx, true);
      if (zeroBlocks)
        m->setZero();
      v->mapHessianMemory(m->data());
      ++landmarkIdx;
    }
  }
  assert(poseIdx == _numPoses && landmarkIdx == _numLandmarks);

  // temporary structures for building the pattern of the Schur complement
  SparseBlockMatrixHashMap<PoseMatrixType>* schurMatrixLookup = 0;
  if (_doSchur) {
    schurMatrixLookup = new SparseBlockMatrixHashMap<PoseMatrixType>(_Hschur->rowBlockIndices(), _Hschur->colBlockIndices());
    schurMatrixLookup->blockCols().resize(_Hschur->blockCols().size());
  }

  // here we assume that the landmark indices start after the pose ones
  // create the structure in Hpp, Hll and in Hpl
  for (SparseOptimizer::EdgeContainer::const_iterator it=_optimizer->activeEdges().begin(); it!=_optimizer->activeEdges().end(); ++it){
    OptimizableGraph::Edge* e = *it;

    for (size_t viIdx = 0; viIdx < e->vertices().size(); ++viIdx) {
      OptimizableGraph::Vertex* v1 = (OptimizableGraph::Vertex*) e->vertex(viIdx);
      int ind1 = v1->hessianIndex();
      if (ind1 == -1)
        continue;
      int indexV1Bak = ind1;
      for (size_t vjIdx = viIdx + 1; vjIdx < e->vertices().size(); ++vjIdx) {
        OptimizableGraph::Vertex* v2 = (OptimizableGraph::Vertex*) e->vertex(vjIdx);
        int ind2 = v2->hessianIndex();
        if (ind2 == -1)
          continue;
        ind1 = indexV1Bak;
        bool transposedBlock = ind1 > ind2;
        if (transposedBlock){ // make sure, we allocate the upper triangle block
          std::swap(ind1, ind2);
        }
        if (! v1->marginalized() && !v2->marginalized()){
          PoseMatrixType* m = _Hpp->block(ind1, ind2, true);
          if (zeroBlocks)
            m->setZero();
          e->mapHessianMemory(m->data(), viIdx, vjIdx, transposedBlock);
          if (_Hschur) {// assume this is only needed in case we solve with the schur complement
            schurMatrixLookup->addBlock(ind1, ind2);
          }
        } else if (v1->marginalized() && v2->marginalized()){
          // RAINER hmm.... should we ever reach this here????
          LandmarkMatrixType* m = _Hll->block(ind1-_numPoses, ind2-_numPoses, true);
          if (zeroBlocks)
            m->setZero();
          e->mapHessianMemory(m->data(), viIdx, vjIdx, false);
        } else { 
          if (v1->marginalized()){ 
            PoseLandmarkMatrixType* m = _Hpl->block(v2->hessianIndex(),v1->hessianIndex()-_numPoses, true);
            if (zeroBlocks)
//.........这里部分代码省略.........
开发者ID:Aerobota,项目名称:g2o,代码行数:101,代码来源:block_solver.hpp


示例18: computeSimpleStars

  void computeSimpleStars(StarSet& stars,
			  SparseOptimizer* optimizer,
			  EdgeLabeler* labeler,
			  EdgeCreator* creator,
			  OptimizableGraph::Vertex* gauge_,
			  std::string edgeTag,
			  std::string vertexTag,
			  int level,
			  int step,
			  int backboneIterations,
			  int starIterations,
			  double rejectionThreshold,
			  bool debug){

    cerr << "preforming the tree actions" << endl;
    HyperDijkstra d(optimizer);
    // compute a spanning tree based on the types of edges and vertices in the pool
    EdgeTypesCostFunction f(edgeTag, vertexTag, level);
    d.shortestPaths(gauge_,
        &f,
        std::numeric_limits< double >::max(),
        1e-6,
        false,
        std::numeric_limits< double >::max()/2);

    HyperDijkstra::computeTree(d.adjacencyMap());
    // constructs the stars on the backbone

    BackBoneTreeAction bact(optimizer, vertexTag, level, step);
    bact.init();

    cerr << "free edges size " << bact.freeEdges().size() << endl;

    // perform breadth-first visit of the visit tree and create the stars on the backbone
    d.visitAdjacencyMap(d.adjacencyMap(),&bact,true);
    stars.clear();

    for (VertexStarMultimap::iterator it=bact.vertexStarMultiMap().begin();
        it!=bact.vertexStarMultiMap().end(); it++){
      stars.insert(it->second);
    }
    cerr << "stars.size: " << stars.size() << endl;
    cerr << "size: " << bact.vertexStarMultiMap().size() << endl;


    //  for each star

    //    for all vertices in the backbone, select all edges leading/leaving from that vertex
    //    that are contained in freeEdges.

    //      mark the corresponding "open" vertices and add them to a multimap (vertex->star)

    //    select a gauge in the backbone

    //    push all vertices on the backbone

    //    compute an initial guess on the backbone

    //    one round of optimization backbone

    //    lock all vertices in the backbone

    //    push all "open" vertices

    //    for each open vertex,
    //      compute an initial guess given the backbone
    //      do some rounds of solveDirect
    //      if (fail)
    //        - remove the vertex and the edges in that vertex from the star
    //   - make the structures consistent

    //    pop all "open" vertices
    //    pop all "vertices" in the backbone
    //    unfix the vertices in the backbone

    int starNum=0;
    for (StarSet::iterator it=stars.begin(); it!=stars.end(); it++){
      Star* s =*it;
      HyperGraph::VertexSet backboneVertices = s->_lowLevelVertices;
      HyperGraph::EdgeSet backboneEdges = s->_lowLevelEdges;
      if (backboneEdges.empty())
	continue;


      // cerr << "optimizing backbone" << endl;
      // one of these  should be the gauge, to be simple we select the fisrt one in the backbone
      OptimizableGraph::VertexSet gauge;
      gauge.insert(*backboneVertices.begin());
      s->gauge()=gauge;
      s->optimizer()->push(backboneVertices);
      s->optimizer()->setFixed(gauge,true);
      s->optimizer()->initializeOptimization(backboneEdges);
      s->optimizer()->computeInitialGuess();
      s->optimizer()->optimize(backboneIterations);
      s->optimizer()->setFixed(backboneVertices, true);

      // cerr << "assignind edges.vertices not in bbone" << endl;
      HyperGraph::EdgeSet otherEdges;
      HyperGraph::VertexSet otherVertices;
      std::multimap<HyperGraph::Vertex*, HyperGraph::Edge*> vemap;
//.........这里部分代码省略.........
开发者ID:MichaelRuhnke,项目名称:g2o,代码行数:101,代码来源:simple_star_ops.cpp


示例19: updateInitialization

  bool SparseOptimizerIncremental::updateInitialization(HyperGraph::VertexSet& vset, HyperGraph::EdgeSet& eset)
  {
    if (batchStep) {
      return SparseOptimizerOnline::updateInitialization(vset, eset);
    }

    for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end(); ++it) {
      OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it);
      v->clearQuadraticForm(); // be sure that b is zero for this vertex
    }

    // get the touched vertices
    _touchedVertices.clear();
    for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) {
      OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
      OptimizableGraph::Vertex* v1 = static_cast<OptimizableGraph::Vertex*>(e->vertices()[0]);
      OptimizableGraph::Vertex* v2 = static_cast<OptimizableGraph::Vertex*>(e->vertices()[1]);
      if (! v1->fixed())
        _touchedVertices.insert(v1);
      if (! v2->fixed())
        _touchedVertices.insert(v2);
    }
    //cerr << PVAR(_touchedVertices.size()) << endl;

    // updating the internal structures
    std::vector<HyperGraph::Vertex*> newVertices;
    newVertices.reserve(vset.size());
    _activeVertices.reserve(_activeVertices.size() + vset.size());
    _activeEdges.reserve(_activeEdges.size() + eset.size());
    for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it)
      _activeEdges.push_back(static_cast<OptimizableGraph::Edge*>(*it));
    //cerr << "updating internal done." << endl;

    // update the index mapping
    size_t next = _ivMap.size();
    for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end(); ++it) {
      OptimizableGraph::Vertex* v=static_cast<OptimizableGraph::Vertex*>(*it);
      if (! v->fixed()){
        if (! v->marginalized()){
          v->setHessianIndex(next);
          _ivMap.push_back(v);
          newVertices.push_back(v);
          _activeVertices.push_back(v);
          next++;
        } 
        else // not supported right now
          abort();
      }
      else {
        v->setHessianIndex(-1);
      }
    }
    //cerr << "updating index mapping done." << endl;

    // backup the tempindex and prepare sorting structure
    VertexBackup backupIdx[_touchedVertices.size()];
    memset(backupIdx, 0, sizeof(VertexBackup) * _touchedVertices.size());
    int idx = 0;
    for (HyperGraph::VertexSet::iterator it = _touchedVertices.begin(); it != _touchedVertices.end(); ++it) {
      OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it);
      backupIdx[idx].hessianIndex = v->hessianIndex();
      backupIdx[idx].vertex = v;
      backupIdx[idx].hessianData = v->hessianData();
      ++idx;
    }
    sort(backupIdx, backupIdx + _touchedVertices.size()); // sort according to the hessianIndex which is the same order as used later by the optimizer
    for (int i = 0; i < idx; ++i) {
      backupIdx[i].vertex->setHessianIndex(i);
    }
    //cerr << "backup tempindex done." << endl;

    // building the structure of the update
    _updateMat.clear(true); // get rid of the old matrix structure
    _updateMat.rowBlockIndices().clear();
    _updateMat.colBlockIndices().clear();
    _updateMat.blockCols().clear();

    // placing the current stuff in _updateMat
    MatrixXd* lastBlock = 0;
    int sizePoses = 0;
    for (int i = 0; i < idx; ++i) {
      OptimizableGraph::Vertex* v = backupIdx[i].vertex;
      int dim = v->dimension();
      sizePoses+=dim;
      _updateMat.rowBlockIndices().push_back(sizePoses);
      _updateMat.colBlockIndices().push_back(sizePoses);
      _updateMat.blockCols().push_back(SparseBlockMatrix<MatrixXd>::IntBlockMap());
      int ind = v->hessianIndex();
      //cerr << PVAR(ind) << endl;
      if (ind >= 0) {
        MatrixXd* m = _updateMat.block(ind, ind, true);
        v->mapHessianMemory(m->data());
        lastBlock = m;
      }
    }
    lastBlock->diagonal().array() += 1e-6; // HACK to get Eigen value > 0


    for (HyperGraph::EdgeSet::const_iterator it = eset.begin(); it != eset.end(); ++it) {
      OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
//.........这里部分代码省略.........
开发者ID:ericperko,项目名称:g2o,代码行数:101,代码来源:graph_optimizer_sparse_incremental.cpp


示例20: buildIndexMapping

  bool SparseOptimizer::initializeOptimization
     (HyperGraph::VertexSet& vset, int level)
  {

    // Recorre todos los vertices introducidos en el optimizador.
    // Para cada vertice 'V' obtiene los edges de los que forma parte.
    // Para cada uno de esos edges, se mira si todos sus vertices estan en el
    // optimizador. Si lo estan, el edge se aniade a _activeEdges.
    // Si el vertice 'V' tiene algun edge con todos los demas vertices en el
    // optimizador, se aniade 'V' a _activeVertices

    // Al final se asignan unos indices internos para los vertices:
    // -1: vertices fijos
    // 0..n: vertices no fijos y NO marginalizables
    // n+1..m: vertices no fijos y marginalizables
    clearIndexMapping();
    _activeVertices.clear();
    _activeVertices.reserve(vset.size());
    _activeEdges.clear();

    set<Edge*> auxEdgeSet; // temporary structure to avoid duplicates

    for (HyperGraph::VertexSet::iterator
         it  = vset.begin();
         it != vset.end();
         it++)
    {
      OptimizableGraph::Vertex* v= (OptimizableGraph::Vertex*) *it;
      const OptimizableGraph::EdgeSet& vEdges=v->edges();
      // count if there are edges in that level. If not remove from the pool
      int levelEdges=0;
      for (OptimizableGraph::EdgeSet::const_iterator
           it  = vEdges.begin();
           it != vEdges.end();
           it++)
      {
        OptimizableGraph::Edge* e =
           reinterpret_cast<OptimizableGraph::Edge*>(*it);
        if (level < 0 || e->level() == level)
    

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ optimizablegraph::Vertex类代码示例发布时间:2022-05-31
下一篇:
C++ time::StopWatch类代码示例发布时间:2022-05-31
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap