• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ vectord类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中vectord的典型用法代码示例。如果您正苦于以下问题:C++ vectord类的具体用法?C++ vectord怎么用?C++ vectord使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了vectord类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: run_nlopt

  double run_nlopt(nlopt::algorithm algo, eval_func fpointer,
		   vectord& Xnext, int maxf, const std::vector<double>& vd, 
		   const std::vector<double>& vu, void* objPointer)
  {
    double fmin = 0.0;
    size_t n = Xnext.size(); 
    nlopt::opt opt (algo,n);

    std::vector<double> xstd(n);
    opt.set_lower_bounds(vd);
    opt.set_upper_bounds(vu);
    opt.set_min_objective(fpointer, objPointer);
    opt.set_maxeval(maxf);
    
    // It seems BOBYQA can be unstable if the same point is repeated
    // tested over and over. NLOPT bug?
    opt.set_ftol_rel(1e-12);	
    opt.set_ftol_abs(1e-12);

    std::copy(Xnext.begin(),Xnext.end(),xstd.begin());
      
    try 
      { 
	opt.optimize(xstd, fmin);  
      }
    catch (nlopt::roundoff_limited& e)
      {
	FILE_LOG(logDEBUG) << "NLOPT Warning: Potential roundoff error. " 
			   << "In general, this can be ignored.";
      }

    std::copy(xstd.begin(),xstd.end(),Xnext.begin());
    return fmin;
  }
开发者ID:rmcantin,项目名称:bayesopt,代码行数:34,代码来源:inneroptimization.cpp


示例2: FILE_LOG

  void KernelModel::setKernel (const vectord &thetav, 
			      const vectord &stheta,
			      std::string k_name, 
			      size_t dim)
  {
    KernelFactory mKFactory;

    mKernel.reset(mKFactory.create(k_name, dim));

    if ((thetav.size() == 1) && (stheta.size() == 1) && (mKernel->nHyperParameters() != 1))
      {
	// We assume isotropic prior, so we replicate the vectors for all dimensions
	size_t n = mKernel->nHyperParameters();

	FILE_LOG(logINFO) << "Expected " << n << " hyperparameters."
			  << " Replicating parameters and prior.";

	vectord newthetav = svectord(n,thetav(0));
	vectord newstheta = svectord(n,stheta(0));

	setKernelPrior(newthetav,newstheta);
	mKernel->setHyperParameters(newthetav);
      }
    else
      {
	setKernelPrior(thetav,stheta);
	mKernel->setHyperParameters(thetav);
      }
  }
开发者ID:mathkann,项目名称:bayesopt,代码行数:29,代码来源:kernel_functors.cpp


示例3: evaluateSample

  double evaluateSample( const vectord &Xi ) 
  {
    double x[100];
    for (size_t i = 0; i < Xi.size(); ++i)
	x[i] = Xi(i);
    return testFunction(Xi.size(),x,NULL,NULL);
  };
开发者ID:NoviaDroid,项目名称:MarioDifficulty,代码行数:7,代码来源:bo_disc.cpp


示例4: setHyperParameters

    void setHyperParameters(const vectord &theta) 
    {
      if(theta.size() != n_params)
	{
	  throw std::invalid_argument("Wrong number of kernel hyperparameters");
	}
      params = theta; //TODO: To make enough space. Make it more efficient.
      std::transform(theta.begin(), theta.end(), params.begin(), (double (*)(double)) exp);
    };
开发者ID:mathkann,项目名称:bayesopt,代码行数:9,代码来源:kernel_atomic.hpp


示例5: computeWeightedNorm2

    inline double computeWeightedNorm2(const vectord &x1, const vectord &x2)
    {
      assert(n_inputs == x1.size());
      assert(x1.size() == x2.size());
      assert(x1.size() == params.size());

      vectord xd = x1-x2;
      vectord r = utils::ublas_elementwise_div(xd, params);
      return norm_2(r);
    };
开发者ID:mathkann,项目名称:bayesopt,代码行数:10,代码来源:kernel_atomic.hpp


示例6: setParameters

    int setParameters(const vectord& params)
    { 
      if(params.size() != n_params)
	{
	  FILE_LOG(logERROR) << "Wrong number of mean function parameters"; 
	  return -1; 
	}

      mConstParam = params(0);
      mParameters = boost::numeric::ublas::project(params, 
						   boost::numeric::ublas::range(1, params.size())); 
      return 0;
    };
开发者ID:NoviaDroid,项目名称:MarioDifficulty,代码行数:13,代码来源:mean_atomic.hpp


示例7: assert

/**************************************************************************************************
 *  Procedure                                                                                     *
 *                                                                                                *
 *  Description: getSigmaPoints                                                                   *
 *  Class      : UnscentedExpectedImprovement                                                     *
 **************************************************************************************************/
void UnscentedExpectedImprovement::getSigmaPoints(const vectord&          x             ,
                                                  const double            scale         ,
                                                  const int               dim           ,
                                                  const matrixd&          matrix_noise  ,
                                                  std::vector<vectord>&   xx            ,
                                                  std::vector<double>&    w             ,
                                                  const bool              matrix_convert)
{
    const size_t n = dim;

    assert(matrix_noise.size1() == n);
    assert(matrix_noise.size2() == n);
    assert(x.size()             == n);

    matrixd px;
    if (matrix_convert) px = UnscentedExpectedImprovement::convertMatrixNoise(matrix_noise, scale, dim);
    else                px = matrix_noise;

    // Output variable intialization
    xx = std::vector<vectord>();
    w  = std::vector<double>();
    xx.push_back(x);
    w .push_back(scale / (dim + scale));

    // Calculate query_i
    for (size_t col = 0; col < n; col += 1)
    {
        xx.push_back(x - boost::numeric::ublas::column(px, col));
        xx.push_back(x + boost::numeric::ublas::column(px, col));
        w .push_back(0.5 / (dim + scale));
        w .push_back(0.5 / (dim + scale));
    }
}
开发者ID:josemscnogueira,项目名称:bayesopt,代码行数:39,代码来源:criteria_uei.cpp


示例8: exit

/**************************************************************************************************
 *  Procecure                                                                                     *
 *                                                                                                *
 *  Description: chooseActiveVariables                                                            *
 *  Class      : iCubOptimizable                                                                  *
 **************************************************************************************************/
void iCubOptimizable::chooseActiveVariables(vectord& query)
{
    if (dim != query.size())
    {
        cout << endl << "[ERROR] Query size is not equal to mask active components. From: iCubOptimizable::chooseActiveVariables.";

        exit(-1);
    }

    uint    variables_updated = 0;
    vectord result            = vectord(_original_dim, 0.0);

    for (uint index = 0; index < _original_dim; index += 1)
    {
        if (_active_variables_mask[index] == true)
        {
            result[index]      = query[variables_updated];
            variables_updated += 1;
        }
        else
        {
            result[index]      = _icubparams.default_query[index];
        }
    }

    // Return new query
    query = result;
}
开发者ID:josemscnogueira,项目名称:icubgraspopt,代码行数:34,代码来源:iCubOptimizable.cpp


示例9: setSamples

  void Dataset::setSamples(const vectord &y)
  {
    mY = y;
    for (size_t i=0; i<y.size(); ++i)
      {
	updateMinMax(i);
      } 
  };
开发者ID:MLDL,项目名称:bayesopt,代码行数:8,代码来源:dataset.cpp


示例10: computeCrossCorrelation

  inline void KernelModel::computeCrossCorrelation(const vecOfvec& XX, 
						   const vectord &query,
						   vectord& knx)
  {
    std::vector<vectord>::const_iterator x_it  = XX.begin();
    vectord::iterator k_it = knx.begin();
    while(x_it != XX.end())
      {	*k_it++ = (*mKernel)(*x_it++, query); }
  }
开发者ID:mathkann,项目名称:bayesopt,代码行数:9,代码来源:kernel_functors.hpp


示例11: getFeatures

 vectord getFeatures(const vectord& x) 
 {
   using boost::numeric::ublas::range;
   using boost::numeric::ublas::project;
   vectord res(x.size()+1);
   res(0) = 1;
   project(res,range(1,res.size())) = x;
   return res; 
 };  
开发者ID:NoviaDroid,项目名称:MarioDifficulty,代码行数:9,代码来源:mean_atomic.hpp


示例12: setKernelPrior

  inline void KernelModel::setKernelPrior (const vectord &theta, 
					   const vectord &s_theta)
  {
    for (size_t i = 0; i<theta.size(); ++i)
      {
	boost::math::normal n(theta(i),s_theta(i));
	priorKernel.push_back(n);
      }
  };
开发者ID:mathkann,项目名称:bayesopt,代码行数:9,代码来源:kernel_functors.hpp


示例13: gauss

double gauss(const vectord& x, const vectord& mu, const matrixd& sigma)
{
  double n = static_cast<double>(x.size());
  const vectord vd = x-mu;
  matrixd invS = sigma;
  bayesopt::utils::inverse_cholesky(sigma,invS);
  matrixd sig = sigma;

  return pow(2*M_PI,n/2)*pow(determinant(sig),0.5)*exp(-0.5*inner_prod(vd,prod(invS,vd)));
}
开发者ID:mathkann,项目名称:bayesopt,代码行数:10,代码来源:testmcmc.cpp


示例14: operator

    double operator()( const vectord &x)
    {
      ++nCalls;
      size_t nDims = x.size();
    
      double beta = sqrt(2*log(static_cast<double>(nCalls*nCalls))*(nDims+1) 
			 + log(static_cast<double>(nDims))*nDims*mCoef);

      ProbabilityDistribution* d_ = mProc->prediction(x);
      return d_->lowerConfidenceBound(beta); 
    };
开发者ID:NoviaDroid,项目名称:MarioDifficulty,代码行数:11,代码来源:criteria_atomic.hpp


示例15: assert

  double NLOPT_Optimization::localTrialAround(vectord& Xnext)
  {
    assert(mDown.size() == Xnext.size());
    assert(mUp.size() == Xnext.size());
    const size_t n = Xnext.size();

    for (size_t i = 0; i < n; ++i) 
      {
	if (Xnext(i) < mDown[i] || Xnext(i) > mUp[i])
	  {
	    FILE_LOG(logDEBUG) << Xnext;
	    throw std::invalid_argument("Local trial withour proper"
					" initial point.");
	  }
      }

    nlopt::algorithm algo = nlopt::LN_BOBYQA;
    eval_func fpointer = &(NLOPT_Optimization::evaluate_nlopt);
    void* objPointer = static_cast<void *>(rbobj);
    const size_t nIter = 20;
    // std::vector<double> vd(n);
    // std::vector<double> vu(n);

    // for (size_t i = 0; i < n; ++i) 
    //   {
    // 	vd[i] = Xnext(i) - 0.01;
    // 	vu[i] = Xnext(i) + 0.01;
    //   }

    vectord start = Xnext;

    double fmin = run_nlopt(algo,fpointer,Xnext,nIter,
			    mDown,mUp,objPointer);

    FILE_LOG(logDEBUG) << "Near trial " << nIter << "|" 
		       << start << "-> " << Xnext << " f() ->" << fmin;
    
    return fmin;

  }
开发者ID:rmcantin,项目名称:bayesopt,代码行数:40,代码来源:inneroptimization.cpp


示例16: result

  inline void KernelRegressor::setHyperParameters(const vectord &theta)
  { 
    using boost::numeric::ublas::subrange;
    if (mLearnAll)
      {
	size_t nk = mKernel.nHyperParameters();
	size_t nm = mMean.nParameters();

	mKernel.setHyperParameters(subrange(theta,0,nk));

	vectord result(nm);
	std::transform(theta.begin()+nk, theta.begin()+nk+nm, 
		       result.begin(), (double (*)(double)) log);
	mMean.setParameters(result);

	mSigma = std::exp(theta(nk+nm));
      }     
    else
      {
	mKernel.setHyperParameters(theta); 
      }
  };
开发者ID:mathkann,项目名称:bayesopt,代码行数:22,代码来源:kernelregressor.hpp


示例17: setHyperParameters

    int setHyperParameters(const vectord &theta) 
    {
      using boost::numeric::ublas::subrange;

      size_t n_lhs = left->nHyperParameters();
      size_t n_rhs = right->nHyperParameters();
      if (theta.size() != n_lhs + n_rhs)
	{
	  FILE_LOG(logERROR) << "Wrong number of kernel hyperparameters"; 
	  return -1; 
	}
      left->setHyperParameters(subrange(theta,0,n_lhs));
      right->setHyperParameters(subrange(theta,n_lhs,n_lhs+n_rhs));
      return 0;
    };
开发者ID:NoviaDroid,项目名称:MarioDifficulty,代码行数:15,代码来源:kernel_combined.hpp


示例18: sampleInitialPoints

  void DiscreteModel::sampleInitialPoints(matrixd& xPoints, vectord& yPoints)
  {

    vecOfvec perms = mInputSet;
    
    // By using random permutations, we guarantee that 
    // the same point is not selected twice
    utils::randomPerms(perms,mEngine);
    
    // vectord xPoint(mInputSet[0].size());
    for(size_t i = 0; i < yPoints.size(); i++)
      {
	const vectord xP = perms[i];
	row(xPoints,i) = xP;
	yPoints(i) = evaluateSample(xP);
      }
  }
开发者ID:mathkann,项目名称:bayesopt,代码行数:17,代码来源:bayesoptdisc.cpp


示例19: evaluateSample

  double evaluateSample( const vectord& xin)
  {
     if (xin.size() != 2)
      {
	std::cout << "WARNING: This only works for 2D inputs." << std::endl
		  << "WARNING: Using only first two components." << std::endl;
      }

    float y = -1;
    double x1 = xin(0);
    double x2 = xin(1);
    
    bayesopt::utils::FileParser fp("results.txt");
    std::string call = "python ../examples/standalone_calls/eval_branin.py " + 
      fp.to_string(x1) + " " + fp.to_string(x2);
    int ret = system(call.c_str());
    
    fp.openInput();
    fp.read("y",y);
    fp.close();
    
    return y;
  }
开发者ID:MLDL,项目名称:bayesopt,代码行数:23,代码来源:branin_system_calls.cpp


示例20: run

  double NLOPT_Optimization::run(vectord &Xnext)
  {   
    assert(mDown.size() == Xnext.size());
    assert(mUp.size() == Xnext.size());

    eval_func fpointer;
    void *objPointer;

    size_t n = Xnext.size();
    double fmin = 1;
    int maxf1 = maxEvals*n;
    int maxf2 = 0;    // For a second pass
    const double coef_local = 0.1;
    //int ierror;

    // If Xnext is outside the bounding box, maybe it is undefined
    for (size_t i = 0; i < n; ++i) 
      {
	if (Xnext(i) < mDown[i] || Xnext(i) > mUp[i])
	  {
	    Xnext(i)=(mDown[i]+mUp[i])/2.0;
	  }
      }

    //    nlopt_opt opt;
    nlopt::algorithm algo;
    switch(alg)
      {
      case DIRECT: // Pure global. No gradient
	algo = nlopt::GN_DIRECT_L;
	fpointer = &(NLOPT_Optimization::evaluate_nlopt);
	objPointer = static_cast<void *>(rbobj);
	break;
      case COMBINED: // Combined local-global (80% DIRECT -> 20% BOBYQA). No gradient
	algo = nlopt::GN_DIRECT_L;
	maxf2 = static_cast<int>(static_cast<double>(maxf1)*coef_local);
	maxf1 -= maxf2;  // That way, the number of evaluations is the same in all methods.
	fpointer = &(NLOPT_Optimization::evaluate_nlopt);
	objPointer = static_cast<void *>(rbobj);
	break;
      case BOBYQA:  // Pure local. No gradient
	algo = nlopt::LN_BOBYQA;
	fpointer = &(NLOPT_Optimization::evaluate_nlopt);
	objPointer = static_cast<void *>(rbobj);
	break;
      case LBFGS:  // Pure local. Gradient based
	algo = nlopt::LD_LBFGS;
	fpointer = &(NLOPT_Optimization::evaluate_nlopt_grad);
	objPointer = static_cast<void *>(rgbobj);
	break;
      default: 
	throw std::invalid_argument("Inner optimization algorithm"
				    " not supported");
      }

    if (objPointer == NULL)
      {
	throw std::invalid_argument("Wrong object model "
				    "(gradient/no gradient)");
      }

    fmin = run_nlopt(algo,fpointer,Xnext,maxf1,
		     mDown,mUp,objPointer);

    FILE_LOG(logDEBUG) << "1st opt " << maxf1 << "-> " << Xnext 
		       << " f() ->" << fmin;
    if (maxf2)
      {
	//If the point is exactly at the limit, we may have trouble.
    	for (size_t i = 0; i < n; ++i) 
	  {
	    if (Xnext(i)-mDown[i] < 0.0001)
	      {
		Xnext(i) += 0.0001;
		FILE_LOG(logDEBUG) << "Hacking point for BOBYQA. THIS SHOULD NOT HAPPEN";
	      }
	    if (mUp[i] - Xnext(i) < 0.0001)
	      {
		Xnext(i) -= 0.0001;
		FILE_LOG(logDEBUG) << "Hacking point for BOBYQA. THIS SHOULD NOT HAPPEN";
	      }
	  }

	// BOBYQA may fail in this point. Could it be that EI is not twice differentiable?
	// fmin = run_nlopt(nlopt::LN_BOBYQA,fpointer,Xnext,maxf2,
	// 		 mDown,mUp,objPointer);
	fmin = run_nlopt(nlopt::LN_COBYLA,fpointer,Xnext,maxf2,
			 mDown,mUp,objPointer);
	FILE_LOG(logDEBUG) << "2nd opt " << maxf2 << "-> " << Xnext 
			   << " f() ->" << fmin;
      }

    return fmin;

  } // innerOptimize (uBlas)
开发者ID:rmcantin,项目名称:bayesopt,代码行数:95,代码来源:inneroptimization.cpp



注:本文中的vectord类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ vertex_type类代码示例发布时间:2022-05-31
下一篇:
C++ vector_vec3d类代码示例发布时间:2022-05-31
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap