本文整理汇总了C++中optimizer函数的典型用法代码示例。如果您正苦于以下问题:C++ optimizer函数的具体用法?C++ optimizer怎么用?C++ optimizer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了optimizer函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: optimizer
void
OMR::Simplifier::prePerformOnBlocks()
{
_invalidateUseDefInfo = false;
_alteredBlock = false;
_blockRemoved = false;
_useDefInfo = optimizer()->getUseDefInfo();
_valueNumberInfo = optimizer()->getValueNumberInfo();
_containingStructure = NULL;
if (_reassociate)
{
_hashTable.reset();
_hashTable.init(1000, true);
TR_ASSERT(comp()->getFlowGraph()->getStructure(), "assertion failure");
computeInvarianceOfAllStructures(comp(), comp()->getFlowGraph()->getStructure());
}
_ccHashTab.reset();
_ccHashTab.init(64, true);
if (trace())
{
comp()->dumpMethodTrees("Trees before simplification");
}
}
开发者ID:bjornvar,项目名称:omr,代码行数:28,代码来源:OMRSimplifier.cpp
示例2: optimizer
MDOUBLE ssrvDistanceSeqs2Tree::calcSideInfoGivenTreeAndAlpha(const sequenceContainer &sc, const tree &et, MDOUBLE alpha)
{
_newAlpha = alpha;
(static_cast<gammaDistribution*>(_spPtr->distr()))->setAlpha(alpha);
// optimize only nu (and tamura92 params, if relevant)
if (!dynamic_cast<tamura92*>(
static_cast<replacementModelSSRV*>(_spPtr->getPijAccelerator()->getReplacementModel())
->getBaseRM()
)
) {
bestParamSSRV optimizer(false,true,false,false);
optimizer(et,sc,*(static_cast<stochasticProcessSSRV*>(_spPtr)),_weights,
15,15,_epsilonLikelihoodImprovement4alphaOptimiz,_epsilonLikelihoodImprovement,
_epsilonLikelihoodImprovement4BBL,_maxIterationsBBL,5);
_newNu=optimizer.getBestNu();
return(optimizer.getBestL());
} else {
bestParamSSRV optimizer(false,true,true,false);
optimizer(et,sc,*(static_cast<stochasticProcessSSRV*>(_spPtr)),_weights,
15,15,_epsilonLikelihoodImprovement4alphaOptimiz,_epsilonLikelihoodImprovement,
_epsilonLikelihoodImprovement4BBL,_maxIterationsBBL,5);
_newNu=optimizer.getBestNu();
return(optimizer.getBestL());
}
}
开发者ID:AidanDelaney,项目名称:fastml2,代码行数:26,代码来源:ssrvDistanceSeqs2Tree.cpp
示例3: optimizer
void CtcPolytopeHull::contract(IntervalVector& box) {
if (!(limit_diam_box.contains(box.max_diam()))) return;
// is it necessary? YES (BNE) Soplex can give false infeasible results with large numbers
// cout << " box before LR " << box << endl;
try {
// Update the bounds the variables
mylinearsolver->initBoundVar(box);
//returns the number of constraints in the linearized system
int cont = lr.linearization(box,mylinearsolver);
if(cont<1) return;
optimizer(box);
// mylinearsolver->writeFile("LP.lp");
// system ("cat LP.lp");
// cout << " box after LR " << box << endl;
mylinearsolver->cleanConst();
}
catch(EmptyBoxException&) {
box.set_empty(); // empty the box before exiting in case of EmptyBoxException
mylinearsolver->cleanConst();
throw EmptyBoxException();
}
}
开发者ID:nicolaje,项目名称:IBEX,代码行数:31,代码来源:ibex_CtcPolytopeHull.cpp
示例4: optimizer
void IdwInterpolator::_buildModel()
{
_index.reset();
if (_p < 0.0)
{
NelderMead optimizer(1, new IdwOptimizeFunction(*this), _stopDelta);
Vector result;
result.prepare(1);
_p = 1.0;
result[0] = _p;
optimizer.step(result, -estimateError());
_p = 4.0;
result[0] = _p;
optimizer.step(result, -estimateError());
int count = 0;
while (optimizer.done() == false)
{
double e = -estimateError();
cout << "error: " << e << " count: " << count++ << endl;
result = optimizer.step(result, e);
_p = result[0];
}
}
}
开发者ID:msorenson,项目名称:hootenanny,代码行数:28,代码来源:IdwInterpolator.cpp
示例5: TEST
//*************************************************************************
TEST (EssentialMatrixFactor2, extraMinimization) {
// Additional test with camera moving in positive X direction
// We start with a factor graph and add constraints to it
// Noise sigma is 1, assuming pixel measurements
NonlinearFactorGraph graph;
for (size_t i = 0; i < data.number_tracks(); i++)
graph.add(EssentialMatrixFactor2(100, i, pA(i), pB(i), model2, K));
// Check error at ground truth
Values truth;
truth.insert(100, trueE);
for (size_t i = 0; i < data.number_tracks(); i++) {
Point3 P1 = data.tracks[i].p;
truth.insert(i, double(baseline / P1.z()));
}
EXPECT_DOUBLES_EQUAL(0, graph.error(truth), 1e-8);
// Optimize
LevenbergMarquardtParams parameters;
// parameters.setVerbosity("ERROR");
LevenbergMarquardtOptimizer optimizer(graph, truth, parameters);
Values result = optimizer.optimize();
// Check result
EssentialMatrix actual = result.at<EssentialMatrix>(100);
EXPECT(assert_equal(trueE, actual, 1e-1));
for (size_t i = 0; i < data.number_tracks(); i++)
EXPECT_DOUBLES_EQUAL(truth.at<double>(i), result.at<double>(i), 1e-1);
// Check error at result
EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-4);
}
开发者ID:exoter-rover,项目名称:slam-gtsam,代码行数:34,代码来源:testEssentialMatrixFactor.cpp
示例6: biasSVDFunc
void BiasSVD<OptimizerType>::Apply(const arma::mat& data,
const size_t rank,
arma::mat& u,
arma::mat& v,
arma::vec& p,
arma::vec& q)
{
// batchSize is 1 in our implementation of Bias SVD.
// batchSize other than 1 has not been supported yet.
const int batchSize = 1;
Log::Warn << "The batch size for optimizing BiasSVD is 1."
<< std::endl;
// Make the optimizer object using a BiasSVDFunction object.
BiasSVDFunction<arma::mat> biasSVDFunc(data, rank, lambda);
ens::StandardSGD optimizer(alpha, batchSize,
iterations * data.n_cols);
// Get optimized parameters.
arma::mat parameters = biasSVDFunc.GetInitialPoint();
optimizer.Optimize(biasSVDFunc, parameters);
// Constants for extracting user and item matrices.
const size_t numUsers = max(data.row(0)) + 1;
const size_t numItems = max(data.row(1)) + 1;
// Extract user and item matrices, user and item bias from the optimized
// parameters.
u = parameters.submat(0, numUsers, rank - 1, numUsers + numItems - 1).t();
v = parameters.submat(0, 0, rank - 1, numUsers - 1);
p = parameters.row(rank).subvec(numUsers, numUsers + numItems - 1).t();
q = parameters.row(rank).subvec(0, numUsers - 1).t();
}
开发者ID:dasayan05,项目名称:mlpack,代码行数:33,代码来源:bias_svd_impl.hpp
示例7: visibleSize
SparseAutoencoder<OptimizerType>::SparseAutoencoder(const arma::mat& data,
const size_t visibleSize,
const size_t hiddenSize,
double lambda,
double beta,
double rho) :
visibleSize(visibleSize),
hiddenSize(hiddenSize),
lambda(lambda),
beta(beta),
rho(rho)
{
SparseAutoencoderFunction encoderFunction(data, visibleSize, hiddenSize,
lambda, beta, rho);
OptimizerType<SparseAutoencoderFunction> optimizer(encoderFunction);
parameters = encoderFunction.GetInitialPoint();
// Train the model.
Timer::Start("sparse_autoencoder_optimization");
const double out = optimizer.Optimize(parameters);
Timer::Stop("sparse_autoencoder_optimization");
Log::Info << "SparseAutoencoder::SparseAutoencoder(): final objective of "
<< "trained model is " << out << "." << std::endl;
}
开发者ID:Andrew-He,项目名称:mlpack,代码行数:26,代码来源:sparse_autoencoder_impl.hpp
示例8: optimize
/**
* Entry point for an optimization pass.
*/
size_t optimize(
std::unique_ptr<SingleImplAnalysis> analysis,
const ClassHierarchy& ch,
Scope& scope, const SingleImplConfig& config) {
OptimizationImpl optimizer(std::move(analysis), ch);
return optimizer.optimize(scope, config);
}
开发者ID:RyanFu,项目名称:redex,代码行数:10,代码来源:SingleImplOptimize.cpp
示例9: RELEASE_ASSERT
JSObject* ProgramExecutable::initializeGlobalProperties(VM& vm, CallFrame* callFrame, JSScope* scope)
{
RELEASE_ASSERT(scope);
JSGlobalObject* globalObject = scope->globalObject();
RELEASE_ASSERT(globalObject);
ASSERT(&globalObject->vm() == &vm);
JSObject* exception = 0;
UnlinkedProgramCodeBlock* unlinkedCodeBlock = globalObject->createProgramCodeBlock(callFrame, this, &exception);
if (exception)
return exception;
m_unlinkedProgramCodeBlock.set(vm, this, unlinkedCodeBlock);
BatchedTransitionOptimizer optimizer(vm, globalObject);
for (size_t i = 0, numberOfFunctions = unlinkedCodeBlock->numberOfFunctionDecls(); i < numberOfFunctions; ++i) {
UnlinkedFunctionExecutable* unlinkedFunctionExecutable = unlinkedCodeBlock->functionDecl(i);
ASSERT(!unlinkedFunctionExecutable->name().isEmpty());
globalObject->addFunction(callFrame, unlinkedFunctionExecutable->name());
if (vm.typeProfiler() || vm.controlFlowProfiler()) {
vm.functionHasExecutedCache()->insertUnexecutedRange(sourceID(),
unlinkedFunctionExecutable->typeProfilingStartOffset(),
unlinkedFunctionExecutable->typeProfilingEndOffset());
}
}
const VariableEnvironment& variableDeclarations = unlinkedCodeBlock->variableDeclarations();
for (auto& entry : variableDeclarations) {
ASSERT(entry.value.isVar());
globalObject->addVar(callFrame, Identifier::fromUid(&vm, entry.key.get()));
}
return 0;
}
开发者ID:buchongyu,项目名称:webkit,代码行数:34,代码来源:Executable.cpp
示例10: Q_ASSERT
// --------------------------------------------------------
void ClutchHandle::optimizePosition(float aspect_ratio)
{
Q_ASSERT(_radius_mode == OriginalRadius);
//QPointF c = _clutch_handle->posT();
//QPointF a = _absolute_handle->posT();
qDebug() << "pre src pos" << _mg->srcCenterT();
qDebug() << "pre src radius" << _mg->srcRadiusT();
qDebug() << "pre dst pos" << _mg->dstCenterT();
qDebug() << "pre dst radius" << _mg->dstRadiusT();
QPointF cma = _mg->dstCenterT() - _mg->srcCenterT();
if(QLineF(_mg->dstCenterT(), _mg->srcCenterT()).length() < _mg->dstRadiusT() + _mg->srcRadiusT())
{
qDebug() << "old src pos" << _mg->srcCenterT();
qDebug() << "old src radius" << _mg->srcRadiusT();
qDebug() << "old dst pos" << _mg->dstCenterT();
qDebug() << "old dst radius" << _mg->dstRadiusT();
MagnifyingGlassOptimizer optimizer(
_mg,
aspect_ratio,
atan2(-cma.y(), cma.x()) * 180 / 3.1415f
);
optimizer.optimizeLastGlassPosition();
qDebug() << "new src pos" << _mg->srcCenterT();
qDebug() << "new src radius" << _mg->srcRadiusT();
qDebug() << "new dst pos" << _mg->dstCenterT();
qDebug() << "new dst radius" << _mg->dstRadiusT();
//_original_dst_radius = _mg->dstRadiusT(); // is this line required?
}
}
开发者ID:dpkay,项目名称:fingerglass,代码行数:32,代码来源:handle.cpp
示例11: main
int main(int argc, char ** argv) {
MPI_Init(&argc, &argv);
QUESO::FullEnvironment env(MPI_COMM_WORLD, "", "", NULL);
QUESO::VectorSpace<QUESO::GslVector, QUESO::GslMatrix> paramSpace(env,
"space_", 3, NULL);
QUESO::GslVector minBound(paramSpace.zeroVector());
minBound[0] = -10.0;
minBound[1] = -10.0;
minBound[2] = -10.0;
QUESO::GslVector maxBound(paramSpace.zeroVector());
maxBound[0] = 10.0;
maxBound[1] = 10.0;
maxBound[2] = 10.0;
QUESO::BoxSubset<QUESO::GslVector, QUESO::GslMatrix> domain("", paramSpace,
minBound, maxBound);
ObjectiveFunction<QUESO::GslVector, QUESO::GslMatrix> objectiveFunction(
"", domain);
QUESO::GslVector initialPoint(paramSpace.zeroVector());
initialPoint[0] = 9.0;
initialPoint[1] = -9.0;
initialPoint[1] = -1.0;
QUESO::GslOptimizer optimizer(objectiveFunction);
double tol = 1.0e-10;
optimizer.setTolerance(tol);
optimizer.set_solver_type(QUESO::GslOptimizer::STEEPEST_DESCENT);
QUESO::OptimizerMonitor monitor(env);
monitor.set_display_output(true,true);
std::cout << "Solving with Steepest Decent" << std::endl;
optimizer.minimize(&monitor);
if (std::abs( optimizer.minimizer()[0] - 1.0) > tol) {
std::cerr << "GslOptimize failed. Found minimizer at: " << optimizer.minimizer()[0]
<< std::endl;
std::cerr << "Actual minimizer is 1.0" << std::endl;
queso_error();
}
std::string nm = "nelder_mead2";
optimizer.set_solver_type(nm);
monitor.reset();
monitor.set_display_output(true,true);
std::cout << std::endl << "Solving with Nelder Mead" << std::endl;
optimizer.minimize(&monitor);
monitor.print(std::cout,false);
return 0;
}
开发者ID:EricDoug,项目名称:queso,代码行数:60,代码来源:test_gsloptimizer.C
示例12: RELEASE_ASSERT
JSObject* ProgramExecutable::initializeGlobalProperties(VM& vm, CallFrame* callFrame, JSScope* scope)
{
RELEASE_ASSERT(scope);
JSGlobalObject* globalObject = scope->globalObject();
RELEASE_ASSERT(globalObject);
ASSERT(&globalObject->vm() == &vm);
JSObject* exception = 0;
UnlinkedProgramCodeBlock* unlinkedCodeBlock = globalObject->createProgramCodeBlock(callFrame, this, &exception);
if (exception)
return exception;
m_unlinkedProgramCodeBlock.set(vm, this, unlinkedCodeBlock);
BatchedTransitionOptimizer optimizer(vm, globalObject);
const UnlinkedProgramCodeBlock::VariableDeclations& variableDeclarations = unlinkedCodeBlock->variableDeclarations();
const UnlinkedProgramCodeBlock::FunctionDeclations& functionDeclarations = unlinkedCodeBlock->functionDeclarations();
for (size_t i = 0; i < functionDeclarations.size(); ++i) {
UnlinkedFunctionExecutable* unlinkedFunctionExecutable = functionDeclarations[i].second.get();
JSValue value = JSFunction::create(vm, unlinkedFunctionExecutable->link(vm, m_source, lineNo(), 0), scope);
globalObject->addFunction(callFrame, functionDeclarations[i].first, value);
}
for (size_t i = 0; i < variableDeclarations.size(); ++i) {
if (variableDeclarations[i].second & DeclarationStacks::IsConstant)
globalObject->addConst(callFrame, variableDeclarations[i].first);
else
globalObject->addVar(callFrame, variableDeclarations[i].first);
}
return 0;
}
开发者ID:kunalnaithani,项目名称:webkit,代码行数:33,代码来源:Executable.cpp
示例13: _hashTable
// Simplify all blocks
//
OMR::Simplifier::Simplifier(TR::OptimizationManager *manager)
: TR::Optimization(manager),
_hashTable(manager->trMemory(), stackAlloc),
_ccHashTab(manager->trMemory(), stackAlloc)
{
_invalidateUseDefInfo = false;
_alteredBlock = false;
_blockRemoved = false;
_useDefInfo = optimizer()->getUseDefInfo();
_valueNumberInfo = optimizer()->getValueNumberInfo();
_reassociate = comp()->getOption(TR_EnableReassociation);
_containingStructure = NULL;
}
开发者ID:bjornvar,项目名称:omr,代码行数:18,代码来源:OMRSimplifier.cpp
示例14: operator
param_type
operator()(const Range& samples, std::size_t labels, std::size_t features) {
conjugate_gradient<param_type> optimizer(
new slope_binary_search<param_type>(1e-6, wolfe<T>::conjugate_gradient()),
{1e-6, false}
);
param_ll_objective<softmax_ll<T>, Range> objective(
samples,
regul_ ? new l2_regularization<param_type>(regul_) : nullptr
);
optimizer.objective(&objective);
optimizer.solution(param_type(labels, features, T(0)));
for (std::size_t it = 0; !optimizer.converged() && it < max_iter_; ++it) {
line_search_result<T> value = optimizer.iterate();
if (verbose_) {
std::cout << "Iteration " << it << ", " << value << std::endl;
}
}
if (!optimizer.converged()) {
std::cerr << "Warning: failed to converge" << std::endl;
}
if (verbose_) {
std::cout << "Number of calls: " << objective.calls() << std::endl;
}
return optimizer.solution();
}
开发者ID:chongbingbao,项目名称:libgm,代码行数:26,代码来源:softmax_mle.hpp
示例15: TEST_F
TEST_F(EmptyArrayToStringOptimizerTests,
OptimizerHasNonEmptyID) {
ShPtr<EmptyArrayToStringOptimizer> optimizer(
new EmptyArrayToStringOptimizer(module));
EXPECT_TRUE(!optimizer->getId().empty()) <<
"the optimizer should have a non-empty ID";
}
开发者ID:SchuckBeta,项目名称:retdec,代码行数:8,代码来源:empty_array_to_string_optimizer_tests.cpp
示例16: TEST_F
TEST_F(VarDefLoopOptimizerTests,
OptimizerHasNonEmptyID) {
ShPtr<VarDefForLoopOptimizer> optimizer(
new VarDefForLoopOptimizer(module));
EXPECT_TRUE(!optimizer->getId().empty()) <<
"the optimizer should have a non-empty ID";
}
开发者ID:SchuckBeta,项目名称:retdec,代码行数:8,代码来源:var_def_for_loop_optimizer_tests.cpp
示例17: comp
void
OMR::Simplifier::postPerformOnBlocks()
{
if (trace())
comp()->dumpMethodTrees("Trees after simplification");
#ifdef DEBUG
resetBlockVisitFlags(comp());
#endif
// Invalidate usedef and value number information if necessary
//
if (_useDefInfo && _invalidateUseDefInfo)
optimizer()->setUseDefInfo(NULL);
if (_valueNumberInfo && _invalidateValueNumberInfo)
optimizer()->setValueNumberInfo(NULL);
}
开发者ID:bjornvar,项目名称:omr,代码行数:17,代码来源:OMRSimplifier.cpp
示例18: bayes_optimization_disc
int bayes_optimization_disc(int nDim, eval_func f, void* f_data,
double *valid_x, size_t n_points,
double *x, double *minf, bopt_params parameters)
{
vectord result(nDim);
vectord input(nDim);
vecOfvec xSet;
for(size_t i = 0; i<n_points;++i)
{
for(int j = 0; j<nDim; ++j)
{
input(j) = valid_x[i*nDim+j];
}
xSet.push_back(input);
}
if(parameters.n_init_samples > n_points)
{
parameters.n_init_samples = n_points;
parameters.n_iterations = 0;
}
try
{
CDiscreteModel optimizer(xSet,parameters);
optimizer.set_eval_funct(f);
optimizer.save_other_data(f_data);
optimizer.optimize(result);
std::copy(result.begin(), result.end(), x);
*minf = optimizer.getValueAtMinimum();
}
catch (std::bad_alloc& e)
{
FILE_LOG(logERROR) << e.what();
return BAYESOPT_OUT_OF_MEMORY;
}
catch (std::invalid_argument& e)
{
FILE_LOG(logERROR) << e.what();
return BAYESOPT_INVALID_ARGS;
}
catch (std::runtime_error& e)
{
FILE_LOG(logERROR) << e.what();
return BAYESOPT_RUNTIME_ERROR;
}
catch (...)
{
FILE_LOG(logERROR) << "Unknown error";
return BAYESOPT_FAILURE;
}
return 0; /* everything ok*/
}
开发者ID:mathkann,项目名称:bayesopt,代码行数:58,代码来源:bayesoptwpr.cpp
示例19: main
int main(int argc, char* argv[])
{
if (argc != 2)
{
puts("plPageOptimizer: wrong number of arguments");
return 1;
}
plFileName filename = argv[1];
plPrintf("Optimizing {}...", filename);
#ifndef _DEBUG
try {
#endif
plResManager* resMgr = new plResManager;
hsgResMgr::Init(resMgr);
#ifndef _DEBUG
} catch (...) {
puts(" ***crashed on init");
return 2;
}
#endif
#ifndef _DEBUG
try
#endif
{
plPageOptimizer optimizer(argv[1]);
optimizer.Optimize();
}
#ifndef _DEBUG
catch (...) {
puts(" ***crashed on optimizing");
return 2;
}
#endif
#ifndef _DEBUG
try {
#endif
// Reading in objects may have generated dirty state which we're obviously
// not sending out. Clear it so that we don't have leaked keys before the
// ResMgr goes away.
std::vector<plSynchedObject::StateDefn> carryOvers;
plSynchedObject::ClearDirtyState(carryOvers);
hsgResMgr::Shutdown();
#ifndef _DEBUG
} catch (...) {
puts(" ***crashed on shutdown");
return 2;
}
#endif
return 0;
}
开发者ID:Drakesinger,项目名称:Plasma,代码行数:56,代码来源:main.cpp
示例20: regressor
double SoftmaxRegression<OptimizerType>::Train(const arma::mat& data,
const arma::Row<size_t>& labels,
const size_t numClasses)
{
SoftmaxRegressionFunction regressor(data, labels, numClasses,
lambda, fitIntercept);
OptimizerType<SoftmaxRegressionFunction> optimizer(regressor);
return Train(optimizer);
}
开发者ID:AmesianX,项目名称:mlpack,代码行数:10,代码来源:softmax_regression_impl.hpp
注:本文中的optimizer函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论