本文整理汇总了C#中mlpreport类的典型用法代码示例。如果您正苦于以下问题:C# mlpreport类的具体用法?C# mlpreport怎么用?C# mlpreport使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
mlpreport类属于命名空间,在下文中一共展示了mlpreport类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C#代码示例。
示例1: _pexec_mlptrainensemblees
/*************************************************************************
Single-threaded stub. HPC ALGLIB replaces it by multithreaded code.
*************************************************************************/
public static void _pexec_mlptrainensemblees(mlptrainer s,
mlpe.mlpensemble ensemble,
int nrestarts,
mlpreport rep)
{
mlptrainensemblees(s,ensemble,nrestarts,rep);
}
开发者ID:Kerbas-ad-astra,项目名称:MechJeb2,代码行数:10,代码来源:dataanalysis.cs
示例2: mlpetraines
/*************************************************************************
Training neural networks ensemble using early stopping.
INPUT PARAMETERS:
Ensemble - model with initialized geometry
XY - training set
NPoints - training set size
Decay - weight decay coefficient, >=0.001
Restarts - restarts, >0.
OUTPUT PARAMETERS:
Ensemble - trained model
Info - return code:
* -2, if there is a point with class number
outside of [0..NClasses-1].
* -1, if incorrect parameters was passed
(NPoints<0, Restarts<1).
* 6, if task has been solved.
Rep - training report.
OOBErrors - out-of-bag generalization error estimate
-- ALGLIB --
Copyright 10.03.2009 by Bochkanov Sergey
*************************************************************************/
public static void mlpetraines(mlpe.mlpensemble ensemble,
double[,] xy,
int npoints,
double decay,
int restarts,
ref int info,
mlpreport rep)
{
int i = 0;
int k = 0;
int ccount = 0;
int pcount = 0;
double[,] trnxy = new double[0,0];
double[,] valxy = new double[0,0];
int trnsize = 0;
int valsize = 0;
int tmpinfo = 0;
mlpreport tmprep = new mlpreport();
mlpbase.modelerrors moderr = new mlpbase.modelerrors();
int nin = 0;
int nout = 0;
int wcount = 0;
int i_ = 0;
int i1_ = 0;
info = 0;
nin = mlpbase.mlpgetinputscount(ensemble.network);
nout = mlpbase.mlpgetoutputscount(ensemble.network);
wcount = mlpbase.mlpgetweightscount(ensemble.network);
if( (npoints<2 || restarts<1) || (double)(decay)<(double)(0) )
{
info = -1;
return;
}
if( mlpbase.mlpissoftmax(ensemble.network) )
{
for(i=0; i<=npoints-1; i++)
{
if( (int)Math.Round(xy[i,nin])<0 || (int)Math.Round(xy[i,nin])>=nout )
{
info = -2;
return;
}
}
}
info = 6;
//
// allocate
//
if( mlpbase.mlpissoftmax(ensemble.network) )
{
ccount = nin+1;
pcount = nin;
}
else
{
ccount = nin+nout;
pcount = nin+nout;
}
trnxy = new double[npoints, ccount];
valxy = new double[npoints, ccount];
rep.ngrad = 0;
rep.nhess = 0;
rep.ncholesky = 0;
//
// train networks
//
for(k=0; k<=ensemble.ensemblesize-1; k++)
{
//
// Split set
//
//.........这里部分代码省略.........
开发者ID:Kerbas-ad-astra,项目名称:MechJeb2,代码行数:101,代码来源:dataanalysis.cs
示例3: restarts
/*************************************************************************
This function trains neural network ensemble passed to this function using
current dataset and early stopping training algorithm. Each early stopping
round performs NRestarts random restarts (thus, EnsembleSize*NRestarts
training rounds is performed in total).
FOR USERS OF COMMERCIAL EDITION:
! Commercial version of ALGLIB includes two important improvements of
! this function:
! * multicore support (C++ and C# computational cores)
! * SSE support (C++ computational core)
!
! Second improvement gives constant speedup (2-3X). First improvement
! gives close-to-linear speedup on multicore systems. Following
! operations can be executed in parallel:
! * EnsembleSize training sessions performed for each of ensemble
! members (always parallelized)
! * NRestarts training sessions performed within each of training
! sessions (if NRestarts>1)
! * gradient calculation over large dataset (if dataset is large enough)
!
! In order to use multicore features you have to:
! * use commercial version of ALGLIB
! * call this function with "smp_" prefix, which indicates that
! multicore code will be used (for multicore support)
!
! In order to use SSE features you have to:
! * use commercial version of ALGLIB on Intel processors
! * use C++ computational core
!
! This note is given for users of commercial edition; if you use GPL
! edition, you still will be able to call smp-version of this function,
! but all computations will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
INPUT PARAMETERS:
S - trainer object;
Ensemble - neural network ensemble. It must have same number of
inputs and outputs/classes as was specified during
creation of the trainer object.
NRestarts - number of restarts, >=0:
* NRestarts>0 means that specified number of random
restarts are performed during each ES round;
* NRestarts=0 is silently replaced by 1.
OUTPUT PARAMETERS:
Ensemble - trained ensemble;
Rep - it contains all type of errors.
NOTE: this training method uses BOTH early stopping and weight decay! So,
you should select weight decay before starting training just as you
select it before training "conventional" networks.
NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(),
or single-point dataset was passed, ensemble is filled by zero
values.
NOTE: this method uses sum-of-squares error function for training.
-- ALGLIB --
Copyright 22.08.2012 by Bochkanov Sergey
*************************************************************************/
public static void mlptrainensemblees(mlptrainer s,
mlpe.mlpensemble ensemble,
int nrestarts,
mlpreport rep)
{
int nin = 0;
int nout = 0;
int ntype = 0;
int ttype = 0;
alglib.smp.shared_pool esessions = new alglib.smp.shared_pool();
apserv.sinteger sgrad = new apserv.sinteger();
mlpbase.modelerrors tmprep = new mlpbase.modelerrors();
alglib.ap.assert(s.npoints>=0, "MLPTrainEnsembleES: parameter S is not initialized or is spoiled(S.NPoints<0)");
if( !mlpe.mlpeissoftmax(ensemble) )
{
ntype = 0;
}
else
{
ntype = 1;
}
if( s.rcpar )
{
ttype = 0;
}
else
{
ttype = 1;
}
alglib.ap.assert(ntype==ttype, "MLPTrainEnsembleES: internal error - type of input network is not similar to network type in trainer object");
nin = mlpbase.mlpgetinputscount(ensemble.network);
alglib.ap.assert(s.nin==nin, "MLPTrainEnsembleES: number of inputs in trainer is not equal to number of inputs in ensemble network");
nout = mlpbase.mlpgetoutputscount(ensemble.network);
alglib.ap.assert(s.nout==nout, "MLPTrainEnsembleES: number of outputs in trainer is not equal to number of outputs in ensemble network");
//.........这里部分代码省略.........
开发者ID:Kerbas-ad-astra,项目名称:MechJeb2,代码行数:101,代码来源:dataanalysis.cs
示例4: mlpkfoldcvlm
/*************************************************************************
Cross-validation estimate of generalization error.
Base algorithm - Levenberg-Marquardt.
INPUT PARAMETERS:
Network - neural network with initialized geometry. Network is
not changed during cross-validation - it is used only
as a representative of its architecture.
XY - training set.
SSize - training set size
Decay - weight decay, same as in MLPTrainLBFGS
Restarts - number of restarts, >0.
restarts are counted for each partition separately, so
total number of restarts will be Restarts*FoldsCount.
FoldsCount - number of folds in k-fold cross-validation,
2<=FoldsCount<=SSize.
recommended value: 10.
OUTPUT PARAMETERS:
Info - return code, same as in MLPTrainLBFGS
Rep - report, same as in MLPTrainLM/MLPTrainLBFGS
CVRep - generalization error estimates
-- ALGLIB --
Copyright 09.12.2007 by Bochkanov Sergey
*************************************************************************/
public static void mlpkfoldcvlm(mlpbase.multilayerperceptron network,
double[,] xy,
int npoints,
double decay,
int restarts,
int foldscount,
ref int info,
mlpreport rep,
mlpcvreport cvrep)
{
info = 0;
mlpkfoldcvgeneral(network, xy, npoints, decay, restarts, foldscount, true, 0.0, 0, ref info, rep, cvrep);
}
开发者ID:lgatto,项目名称:proteowizard,代码行数:41,代码来源:dataanalysis.cs
示例5: dataset
/*************************************************************************
This function trains neural network passed to this function, using current
dataset (one which was passed to MLPSetDataset() or MLPSetSparseDataset())
and current training settings. Training from NRestarts random starting
positions is performed, best network is chosen.
Training is performed using current training algorithm.
INPUT PARAMETERS:
S - trainer object
Network - neural network. It must have same number of inputs and
output/classes as was specified during creation of the
trainer object.
NRestarts - number of restarts, >=0:
* NRestarts>0 means that specified number of random
restarts are performed, best network is chosen after
training
* NRestarts=0 means that current state of the network
is used for training.
OUTPUT PARAMETERS:
Network - trained network
NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(),
network is filled by zero values. Same behavior for functions
MLPStartTraining and MLPContinueTraining.
NOTE: this method uses sum-of-squares error function for training.
-- ALGLIB --
Copyright 23.07.2012 by Bochkanov Sergey
*************************************************************************/
public static void mlptrainnetwork(mlptrainer s,
mlpbase.multilayerperceptron network,
int nrestarts,
mlpreport rep)
{
int nin = 0;
int nout = 0;
int wcount = 0;
int ntype = 0;
int ttype = 0;
alglib.ap.assert(s.npoints>=0, "MLPTrainNetwork: parameter S is not initialized or is spoiled(S.NPoints<0)");
if( !mlpbase.mlpissoftmax(network) )
{
ntype = 0;
}
else
{
ntype = 1;
}
if( s.rcpar )
{
ttype = 0;
}
else
{
ttype = 1;
}
alglib.ap.assert(ntype==ttype, "MLPTrainNetwork: type of input network is not similar to network type in trainer object");
mlpbase.mlpproperties(network, ref nin, ref nout, ref wcount);
alglib.ap.assert(s.nin==nin, "MLPTrainNetwork: number of inputs in trainer is not equal to number of inputs in network");
alglib.ap.assert(s.nout==nout, "MLPTrainNetwork: number of outputs in trainer is not equal to number of outputs in network");
alglib.ap.assert(nrestarts>=0, "MLPTrainNetwork: NRestarts<0.");
apserv.rvectorsetlengthatleast(ref s.wbest, wcount);
apserv.rvectorsetlengthatleast(ref s.wfinal, wcount);
//
// Create LBFGS optimizer
//
minlbfgs.minlbfgscreate(wcount, Math.Min(wcount, s.lbfgsfactor), network.weights, s.tstate);
minlbfgs.minlbfgssetcond(s.tstate, 0.0, 0.0, s.wstep, s.maxits);
minlbfgs.minlbfgssetxrep(s.tstate, true);
//
// Create duplicate of the network
//
mlpbase.mlpcopy(network, s.tnetwork);
//
// Train
//
mlptrainnetworkx(s, network, s.tnetwork, s.tstate, nrestarts, s.subset, -1, s.subset, 0, s.wbest, s.wfinal, rep);
}
开发者ID:thunder176,项目名称:HeuristicLab,代码行数:85,代码来源:dataanalysis.cs
示例6: problems
/*************************************************************************
Neural network training using modified Levenberg-Marquardt with exact
Hessian calculation and regularization. Subroutine trains neural network
with restarts from random positions. Algorithm is well suited for small
and medium scale problems (hundreds of weights).
INPUT PARAMETERS:
Network - neural network with initialized geometry
XY - training set
NPoints - training set size
Decay - weight decay constant, >=0.001
Decay term 'Decay*||Weights||^2' is added to error
function.
If you don't know what Decay to choose, use 0.001.
Restarts - number of restarts from random position, >0.
If you don't know what Restarts to choose, use 2.
OUTPUT PARAMETERS:
Network - trained neural network.
Info - return code:
* -9, if internal matrix inverse subroutine failed
* -2, if there is a point with class number
outside of [0..NOut-1].
* -1, if wrong parameters specified
(NPoints<0, Restarts<1).
* 2, if task has been solved.
Rep - training report
-- ALGLIB --
Copyright 10.03.2009 by Bochkanov Sergey
*************************************************************************/
public static void mlptrainlm(mlpbase.multilayerperceptron network,
double[,] xy,
int npoints,
double decay,
int restarts,
ref int info,
mlpreport rep)
{
int nin = 0;
int nout = 0;
int wcount = 0;
double lmftol = 0;
double lmsteptol = 0;
int i = 0;
int k = 0;
double v = 0;
double e = 0;
double enew = 0;
double xnorm2 = 0;
double stepnorm = 0;
double[] g = new double[0];
double[] d = new double[0];
double[,] h = new double[0,0];
double[,] hmod = new double[0,0];
double[,] z = new double[0,0];
bool spd = new bool();
double nu = 0;
double lambdav = 0;
double lambdaup = 0;
double lambdadown = 0;
minlbfgs.minlbfgsreport internalrep = new minlbfgs.minlbfgsreport();
minlbfgs.minlbfgsstate state = new minlbfgs.minlbfgsstate();
double[] x = new double[0];
double[] y = new double[0];
double[] wbase = new double[0];
double[] wdir = new double[0];
double[] wt = new double[0];
double[] wx = new double[0];
int pass = 0;
double[] wbest = new double[0];
double ebest = 0;
int invinfo = 0;
matinv.matinvreport invrep = new matinv.matinvreport();
int solverinfo = 0;
densesolver.densesolverreport solverrep = new densesolver.densesolverreport();
int i_ = 0;
info = 0;
mlpbase.mlpproperties(network, ref nin, ref nout, ref wcount);
lambdaup = 10;
lambdadown = 0.3;
lmftol = 0.001;
lmsteptol = 0.001;
//
// Test for inputs
//
if( npoints<=0 || restarts<1 )
{
info = -1;
return;
}
if( mlpbase.mlpissoftmax(network) )
{
for(i=0; i<=npoints-1; i++)
{
if( (int)Math.Round(xy[i,nin])<0 || (int)Math.Round(xy[i,nin])>=nout )
{
//.........这里部分代码省略.........
开发者ID:lgatto,项目名称:proteowizard,代码行数:101,代码来源:dataanalysis.cs
示例7: stopping
/*************************************************************************
Neural network training using early stopping (base algorithm - L-BFGS with
regularization).
INPUT PARAMETERS:
Network - neural network with initialized geometry
TrnXY - training set
TrnSize - training set size, TrnSize>0
ValXY - validation set
ValSize - validation set size, ValSize>0
Decay - weight decay constant, >=0.001
Decay term 'Decay*||Weights||^2' is added to error
function.
If you don't know what Decay to choose, use 0.001.
Restarts - number of restarts, either:
* strictly positive number - algorithm make specified
number of restarts from random position.
* -1, in which case algorithm makes exactly one run
from the initial state of the network (no randomization).
If you don't know what Restarts to choose, choose one
one the following:
* -1 (deterministic start)
* +1 (one random restart)
* +5 (moderate amount of random restarts)
OUTPUT PARAMETERS:
Network - trained neural network.
Info - return code:
* -2, if there is a point with class number
outside of [0..NOut-1].
* -1, if wrong parameters specified
(NPoints<0, Restarts<1, ...).
* 2, task has been solved, stopping criterion met -
sufficiently small step size. Not expected (we
use EARLY stopping) but possible and not an
error.
* 6, task has been solved, stopping criterion met -
increasing of validation set error.
Rep - training report
NOTE:
Algorithm stops if validation set error increases for a long enough or
step size is small enought (there are task where validation set may
decrease for eternity). In any case solution returned corresponds to the
minimum of validation set error.
-- ALGLIB --
Copyright 10.03.2009 by Bochkanov Sergey
*************************************************************************/
public static void mlptraines(mlpbase.multilayerperceptron network,
double[,] trnxy,
int trnsize,
double[,] valxy,
int valsize,
double decay,
int restarts,
ref int info,
mlpreport rep)
{
int i = 0;
int pass = 0;
int nin = 0;
int nout = 0;
int wcount = 0;
double[] w = new double[0];
double[] wbest = new double[0];
double e = 0;
double v = 0;
double ebest = 0;
double[] wfinal = new double[0];
double efinal = 0;
int itcnt = 0;
int itbest = 0;
minlbfgs.minlbfgsreport internalrep = new minlbfgs.minlbfgsreport();
minlbfgs.minlbfgsstate state = new minlbfgs.minlbfgsstate();
double wstep = 0;
bool needrandomization = new bool();
int i_ = 0;
info = 0;
wstep = 0.001;
//
// Test inputs, parse flags, read network geometry
//
if( ((trnsize<=0 || valsize<=0) || (restarts<1 && restarts!=-1)) || (double)(decay)<(double)(0) )
{
info = -1;
return;
}
if( restarts==-1 )
{
needrandomization = false;
restarts = 1;
}
else
{
needrandomization = true;
//.........这里部分代码省略.........
开发者ID:lgatto,项目名称:proteowizard,代码行数:101,代码来源:dataanalysis.cs
示例8: support
/*************************************************************************
This function estimates generalization error using cross-validation on the
current dataset with current training settings.
FOR USERS OF COMMERCIAL EDITION:
! Commercial version of ALGLIB includes two important improvements of
! this function:
! * multicore support (C++ and C# computational cores)
! * SSE support (C++ computational core)
!
! Second improvement gives constant speedup (2-3X). First improvement
! gives close-to-linear speedup on multicore systems. Following
! operations can be executed in parallel:
! * FoldsCount cross-validation rounds (always)
! * NRestarts training sessions performed within each of
! cross-validation rounds (if NRestarts>1)
! * gradient calculation over large dataset (if dataset is large enough)
!
! In order to use multicore features you have to:
! * use commercial version of ALGLIB
! * call this function with "smp_" prefix, which indicates that
! multicore code will be used (for multicore support)
!
! In order to use SSE features you have to:
! * use commercial version of ALGLIB on Intel processors
! * use C++ computational core
!
! This note is given for users of commercial edition; if you use GPL
! edition, you still will be able to call smp-version of this function,
! but all computations will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
INPUT PARAMETERS:
S - trainer object
Network - neural network. It must have same number of inputs and
output/classes as was specified during creation of the
trainer object. Network is not changed during cross-
validation and is not trained - it is used only as
representative of its architecture. I.e., we estimate
generalization properties of ARCHITECTURE, not some
specific network.
NRestarts - number of restarts, >=0:
* NRestarts>0 means that for each cross-validation
round specified number of random restarts is
performed, with best network being chosen after
training.
* NRestarts=0 is same as NRestarts=1
FoldsCount - number of folds in k-fold cross-validation:
* 2<=FoldsCount<=size of dataset
* recommended value: 10.
* values larger than dataset size will be silently
truncated down to dataset size
OUTPUT PARAMETERS:
Rep - structure which contains cross-validation estimates:
* Rep.RelCLSError - fraction of misclassified cases.
* Rep.AvgCE - acerage cross-entropy
* Rep.RMSError - root-mean-square error
* Rep.AvgError - average error
* Rep.AvgRelError - average relative error
NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(),
or subset with only one point was given, zeros are returned as
estimates.
NOTE: this method performs FoldsCount cross-validation rounds, each one
with NRestarts random starts. Thus, FoldsCount*NRestarts networks
are trained in total.
NOTE: Rep.RelCLSError/Rep.AvgCE are zero on regression problems.
NOTE: on classification problems Rep.RMSError/Rep.AvgError/Rep.AvgRelError
contain errors in prediction of posterior probabilities.
-- ALGLIB --
Copyright 23.07.2012 by Bochkanov Sergey
*************************************************************************/
public static void mlpkfoldcv(mlptrainer s,
mlpbase.multilayerperceptron network,
int nrestarts,
int foldscount,
mlpreport rep)
{
alglib.smp.shared_pool pooldatacv = new alglib.smp.shared_pool();
mlpparallelizationcv datacv = new mlpparallelizationcv();
mlpparallelizationcv sdatacv = null;
double[,] cvy = new double[0,0];
int[] folds = new int[0];
double[] buf = new double[0];
double[] dy = new double[0];
int nin = 0;
int nout = 0;
int wcount = 0;
int rowsize = 0;
int ntype = 0;
int ttype = 0;
int i = 0;
//.........这里部分代码省略.........
开发者ID:Kerbas-ad-astra,项目名称:MechJeb2,代码行数:101,代码来源:dataanalysis.cs
示例9: _pexec_mlpkfoldcv
/*************************************************************************
Single-threaded stub. HPC ALGLIB replaces it by multithreaded code.
*************************************************************************/
public static void _pexec_mlpkfoldcv(mlptrainer s,
mlpbase.multilayerperceptron network,
int nrestarts,
int foldscount,
mlpreport rep)
{
mlpkfoldcv(s,network,nrestarts,foldscount,rep);
}
开发者ID:Kerbas-ad-astra,项目名称:MechJeb2,代码行数:11,代码来源:dataanalysis.cs
示例10: init
public override void init()
{
trnsubset = new int[0];
valsubset = new int[0];
mlpsessions = new alglib.smp.shared_pool();
mlprep = new mlpreport();
network = new mlpbase.multilayerperceptron();
}
开发者ID:Kerbas-ad-astra,项目名称:MechJeb2,代码行数:8,代码来源:dataanalysis.cs
示例11: make_copy
public override alglib.apobject make_copy()
{
mlpreport _result = new mlpreport();
_result.relclserror = relclserror;
_result.avgce = avgce;
_result.rmserror = rmserror;
_result.avgerror = avgerror;
_result.avgrelerror = avgrelerror;
_result.ngrad = ngrad;
_result.nhess = nhess;
_result.ncholesky = ncholesky;
return _result;
}
开发者ID:Kerbas-ad-astra,项目名称:MechJeb2,代码行数:13,代码来源:dataanalysis.cs
示例12: restarts
/*************************************************************************
This function trains neural network ensemble passed to this function using
current dataset and early stopping training algorithm. Each early stopping
round performs NRestarts random restarts (thus, EnsembleSize*NRestarts
training rounds is performed in total).
INPUT PARAMETERS:
S - trainer object;
Ensemble - neural network ensemble. It must have same number of
inputs and outputs/classes as was specified during
creation of the trainer object.
NRestarts - number of restarts, >=0:
* NRestarts>0 means that specified number of random
restarts are performed during each ES round;
* NRestarts=0 is silently replaced by 1.
OUTPUT PARAMETERS:
Ensemble - trained ensemble;
Rep - it contains all type of errors.
NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(),
or single-point dataset was passed, ensemble is filled by zero
values.
NOTE: this method uses sum-of-squares error function for training.
-- ALGLIB --
Copyright 22.08.2012 by Bochkanov Sergey
*************************************************************************/
public static void mlptrainensemblees(mlptrainer s,
mlpe.mlpensemble ensemble,
int nrestarts,
mlpreport rep)
{
int pcount = 0;
mlpreport tmprep = new mlpreport();
int nin = 0;
int nout = 0;
int wcount = 0;
int ntype = 0;
int ttype = 0;
int i = 0;
int k = 0;
int i_ = 0;
int i1_ = 0;
alglib.ap.assert(s.npoints>=0, "MLPTrainEnsembleES: parameter S is not initialized or is spoiled(S.NPoints<0)");
if( !mlpe.mlpeissoftmax(ensemble) )
{
ntype = 0;
}
else
{
ntype = 1;
}
if( s.rcpar )
{
ttype = 0;
}
else
{
ttype = 1;
}
alglib.ap.assert(ntype==ttype, "MLPTrainEnsembleES: internal error - type of input network is not similar to network type in trainer object");
nin = mlpbase.mlpgetinputscount(ensemble.network);
alglib.ap.assert(s.nin==nin, "MLPTrainEnsembleES: number of inputs in trainer is not equal to number of inputs in ensemble network");
nout = mlpbase.mlpgetoutputscount(ensemble.network);
alglib.ap.assert(s.nout==nout, "MLPTrainEnsembleES: number of outputs in trainer is not equal to number of outputs in ensemble network");
alglib.ap.assert(nrestarts>=0, "MLPTrainEnsembleES: NRestarts<0.");
wcount = mlpbase.mlpgetweightscount(ensemble.network);
//
// Initialize parameter Rep
//
rep.relclserror = 0;
rep.avgce = 0;
rep.rmserror = 0;
rep.avgerror = 0;
rep.avgrelerror = 0;
rep.ngrad = 0;
rep.nhess = 0;
rep.ncholesky = 0;
//
// Allocate
//
if( mlpbase.mlpissoftmax(ensemble.network) )
{
pcount = nin;
}
else
{
pcount = nin+nout;
}
apserv.ivectorsetlengthatleast(ref s.subset, s.npoints);
apserv.ivectorsetlengthatleast(ref s.valsubset, s.npoints);
apserv.rvectorsetlengthatleast(ref s.wbest, wcount);
apserv.rvectorsetlengthatleast(ref s.wfinal, wcount);
//
//.........这里部分代码省略.........
开发者ID:thunder176,项目名称:HeuristicLab,代码行数:101,代码来源:dataanalysis.cs
示例13: dataset
/*************************************************************************
This function trains neural network passed to this function, using current
dataset (one which was passed to MLPSetDataset() or MLPSetSparseDataset())
and current training settings. Training from NRestarts random starting
positions is performed, best network is chosen.
This function is inteded to be used internally. It may be used in several
settings:
* training with ValSubsetSize=0, corresponds to "normal" training with
termination criteria based on S.MaxIts (steps count) and S.WStep (step
size). Training sample is given by TrnSubset/TrnSubsetSize.
* training with ValSubsetSize>0, corresponds to early stopping training
with additional MaxIts/WStep stopping criteria. Training sample is given
by TrnSubset/TrnSubsetSize, validation sample is given by ValSubset/
ValSubsetSize.
-- ALGLIB --
Copyright 13.08.2012 by Bochkanov Sergey
*************************************************************************/
private static void mlptrainnetworkx(mlptrainer s,
int nrestarts,
int algokind,
int[] trnsubset,
int trnsubsetsize,
int[] valsubset,
int valsubsetsize,
mlpbase.multilayerperceptron network,
mlpreport rep,
bool isrootcall,
alglib.smp.shared_pool sessions)
{
mlpbase.modelerrors modrep = new mlpbase.modelerrors();
double eval = 0;
double ebest = 0;
int ngradbatch = 0;
int nin = 0;
int nout = 0;
int wcount = 0;
int pcount = 0;
int itbest = 0;
int itcnt = 0;
int ntype = 0;
int ttype = 0;
bool rndstart = new bool();
int i = 0;
int nr0 = 0;
int nr1 = 0;
mlpreport rep0 = new mlpreport();
mlpreport rep1 = new mlpreport();
bool randomizenetwork = new bool();
double bestrmserror = 0;
smlptrnsession psession = null;
int i_ = 0;
mlpbase.mlpproperties(network, ref nin, ref nout, ref wcount);
//
// Process root call
//
if( isrootcall )
{
//
// Check correctness of parameters
//
alglib.ap.assert(algokind==0 || algokind==-1, "MLPTrainNetworkX: unexpected AlgoKind");
alglib.ap.assert(s.npoints>=0, "MLPTrainNetworkX: internal error - parameter S is not initialized or is spoiled(S.NPoints<0)");
if( s.rcpar )
{
ttype = 0;
}
else
{
ttype = 1;
}
if( !mlpbase.mlpissoftmax(network) )
{
ntype = 0;
}
else
{
ntype = 1;
}
alglib.ap.assert(ntype==ttype, "MLPTrainNetworkX: internal error - type of the training network is not similar to network type in trainer object");
alglib.ap.assert(s.nin==nin, "MLPTrainNetworkX: internal error - number of inputs in trainer is not equal to number of inputs in the training network.");
alglib.ap.assert(s.nout==nout, "MLPTrainNetworkX: internal error - number of outputs in trainer is not equal to number of outputs in the training network.");
alglib.ap.assert(nrestarts>=0, "MLPTrainNetworkX: internal error - NRestarts<0.");
alglib.ap.assert(alglib.ap.len(trnsubset)>=trnsubsetsize, "MLPTrainNetworkX: internal error - parameter TrnSubsetSize more than input subset size(Length(TrnSubset)<TrnSubsetSize)");
for(i=0; i<=trnsubsetsize-1; i++)
{
alglib.ap.assert(trnsubset[i]>=0 && trnsubset[i]<=s.npoints-1, "MLPTrainNetworkX: internal error - parameter TrnSubset contains incorrect index(TrnSubset[I]<0 or TrnSubset[I]>S.NPoints-1)");
}
alglib.ap.assert(alglib.ap.len(valsubset)>=valsubsetsize, "MLPTrainNetworkX: internal error - parameter ValSubsetSize more than input subset size(Length(ValSubset)<ValSubsetSize)");
for(i=0; i<=valsubsetsize-1; i++)
{
alglib.ap.assert(valsubset[i]>=0 && valsubset[i]<=s.npoints-1, "MLPTrainNetworkX: internal error - parameter ValSubset contains incorrect index(ValSubset[I]<0 or ValSubset[I]>S.NPoints-1)");
}
//
// Train
//.........这里部分代码省略.........
开发者ID:Kerbas-ad-astra,项目名称:MechJeb2,代码行数:101,代码来源:dataanalysis.cs
示例14: mlpebagginginternal
/*************************************************************************
Internal bagging subroutine.
-- ALGLIB --
Copyright 19.02.2009 by Bochkanov Sergey
*************************************************************************/
private static void mlpebagginginternal(mlpe.mlpensemble ensemble,
double[,] xy,
int npoints,
double decay,
int restarts,
double wstep,
int maxits,
bool lmalgorithm,
ref int info,
mlpreport rep,
mlpcvreport ooberrors)
{
double[,] xys = new double[0,0];
bool[] s = new bool[0];
double[,] oobbuf = new double[0,0];
int[] oobcntbuf = new int[0];
double[] x = new double[0];
double[] y = new double[0];
double[] dy = new double[0];
double[] dsbuf = new double[0];
int ccnt = 0;
int pcnt = 0;
int i = 0;
int j = 0;
int k = 0;
double v = 0;
mlpreport tmprep = new mlpreport();
int nin = 0;
int nout = 0;
int wcount = 0;
hqrnd.hqrndstate rs = new hqrnd.hqrndstate();
int i_ = 0;
int i1_ = 0;
info = 0;
nin = mlpbase.mlpgetinputscount(ensemble.network);
nout = mlpbase.mlpgetoutputscount(ensemble.network);
wcount = mlpbase.mlpgetweightscount(ensemble.network);
//
// Test for inputs
//
if( (!lmalgorithm && (double)(wstep)==(double)(0)) && maxits==0 )
{
info = -8;
return;
}
if( ((npoints<=0 || restarts<1) || (double)(wstep)<(double)(0)) || maxits<0 )
{
info = -1;
return;
}
if( mlpbase.mlpissoftmax(ensemble.network) )
{
for(i=0; i<=npoints-1; i++)
{
if( (int)Math.Round(xy[i,nin])<0 || (int)Math.Round(xy[i,nin])>=nout )
{
info = -2;
return;
}
}
}
//
// allocate temporaries
//
info = 2;
rep.ngrad = 0;
rep.nhess = 0;
rep.ncholesky = 0;
ooberrors.relclserror = 0;
ooberrors.avgce = 0;
ooberrors.rmserror = 0;
ooberrors.avgerror = 0;
ooberrors.avgrelerror = 0;
if( mlpbase.mlpissoftmax(ensemble.network) )
{
ccnt = nin+1;
pcnt = nin;
}
else
{
ccnt = nin+nout;
pcnt = nin+nout;
}
xys = new double[npoints, ccnt];
s = new bool[npoints];
oobbuf = new double[npoints, nout];
oobcntbuf = new int[npoints];
x = new double[nin];
y = new double[nout];
if( mlpbase.mlpissoftmax(ensemble.network) )
//.........这里部分代码省略.........
开发者ID:Kerbas-ad-astra,项目名称:MechJeb2,代码行数:101,代码来源:dataanalysis.cs
示例15: _pexec_mlptrainnetwork
/*************************************************************************
Single-threaded stub. HPC ALGLIB replaces it by multithreaded code.
*************************************************************************/
public static void _pexec_mlptrainnetwork(mlptrainer s,
mlpbase.multilayerperceptron network,
int nrestarts,
mlpreport rep)
{
mlptrainnetwork(s,network,nrestarts,rep);
}
开发者ID:Kerbas-ad-astra,项目名称:MechJeb2,代码行数:10,代码来源:dataanalysis.cs
|
请发表评论