本文整理汇总了C#中multilayerperceptron类的典型用法代码示例。如果您正苦于以下问题:C# multilayerperceptron类的具体用法?C# multilayerperceptron怎么用?C# multilayerperceptron使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
multilayerperceptron类属于命名空间,在下文中一共展示了multilayerperceptron类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C#代码示例。
示例1: mlpcreate0
/*************************************************************************
Creates neural network with NIn inputs, NOut outputs, without hidden
layers, with linear output layer. Network weights are filled with small
random values.
-- ALGLIB --
Copyright 04.11.2007 by Bochkanov Sergey
*************************************************************************/
public static void mlpcreate0(int nin,
int nout,
ref multilayerperceptron network)
{
int[] lsizes = new int[0];
int[] ltypes = new int[0];
int[] lconnfirst = new int[0];
int[] lconnlast = new int[0];
int layerscount = 0;
int lastproc = 0;
layerscount = 1+2;
//
// Allocate arrays
//
lsizes = new int[layerscount-1+1];
ltypes = new int[layerscount-1+1];
lconnfirst = new int[layerscount-1+1];
lconnlast = new int[layerscount-1+1];
//
// Layers
//
addinputlayer(nin, ref lsizes, ref ltypes, ref lconnfirst, ref lconnlast, ref lastproc);
addbiasedsummatorlayer(nout, ref lsizes, ref ltypes, ref lconnfirst, ref lconnlast, ref lastproc);
//
// Create
//
mlpcreate(nin, nout, ref lsizes, ref ltypes, ref lconnfirst, ref lconnlast, layerscount, false, ref network);
}
开发者ID:palefacer,项目名称:TelescopeOrientation,代码行数:40,代码来源:mlpbase.cs
示例2: layers
/*************************************************************************
Same as MLPCreate0, but with two hidden layers (NHid1 and NHid2 neurons)
with non-linear activation function. Output layer is linear.
$ALL
-- ALGLIB --
Copyright 04.11.2007 by Bochkanov Sergey
*************************************************************************/
public static void mlpcreate2(int nin,
int nhid1,
int nhid2,
int nout,
multilayerperceptron network)
{
int[] lsizes = new int[0];
int[] ltypes = new int[0];
int[] lconnfirst = new int[0];
int[] lconnlast = new int[0];
int layerscount = 0;
int lastproc = 0;
layerscount = 1+3+3+3;
//
// Allocate arrays
//
lsizes = new int[layerscount-1+1];
ltypes = new int[layerscount-1+1];
lconnfirst = new int[layerscount-1+1];
lconnlast = new int[layerscount-1+1];
//
// Layers
//
addinputlayer(nin, ref lsizes, ref ltypes, ref lconnfirst, ref lconnlast, ref lastproc);
addbiasedsummatorlayer(nhid1, ref lsizes, ref ltypes, ref lconnfirst, ref lconnlast, ref lastproc);
addactivationlayer(1, ref lsizes, ref ltypes, ref lconnfirst, ref lconnlast, ref lastproc);
addbiasedsummatorlayer(nhid2, ref lsizes, ref ltypes, ref lconnfirst, ref lconnlast, ref lastproc);
addactivationlayer(1, ref lsizes, ref ltypes, ref lconnfirst, ref lconnlast, ref lastproc);
addbiasedsummatorlayer(nout, ref lsizes, ref ltypes, ref lconnfirst, ref lconnlast, ref lastproc);
addactivationlayer(-5, ref lsizes, ref ltypes, ref lconnfirst, ref lconnlast, ref lastproc);
//
// Create
//
mlpcreate(nin, nout, lsizes, ltypes, lconnfirst, lconnlast, layerscount, false, network);
fillhighlevelinformation(network, nin, nhid1, nhid2, nout, false, true);
}
开发者ID:lgatto,项目名称:proteowizard,代码行数:48,代码来源:dataanalysis.cs
示例3: mlpunserialize
/*************************************************************************
Unserialization of MultiLayerPerceptron strucure
INPUT PARAMETERS:
RA - real array which stores network
OUTPUT PARAMETERS:
Network - restored network
-- ALGLIB --
Copyright 29.03.2008 by Bochkanov Sergey
*************************************************************************/
public static void mlpunserialize(ref double[] ra,
ref multilayerperceptron network)
{
int i = 0;
int ssize = 0;
int ntotal = 0;
int nin = 0;
int nout = 0;
int wcount = 0;
int sigmalen = 0;
int offs = 0;
int i_ = 0;
int i1_ = 0;
System.Diagnostics.Debug.Assert((int)Math.Round(ra[1])==mlpvnum, "MLPUnserialize: incorrect array!");
//
// Unload StructInfo from IA
//
offs = 3;
ssize = (int)Math.Round(ra[2]);
network.structinfo = new int[ssize-1+1];
for(i=0; i<=ssize-1; i++)
{
network.structinfo[i] = (int)Math.Round(ra[offs+i]);
}
offs = offs+ssize;
//
// Unload info from StructInfo
//
ssize = network.structinfo[0];
nin = network.structinfo[1];
nout = network.structinfo[2];
ntotal = network.structinfo[3];
wcount = network.structinfo[4];
if( network.structinfo[6]==0 )
{
sigmalen = nin+nout;
}
else
{
sigmalen = nin;
}
//
// Allocate space for other fields
//
network.weights = new double[wcount-1+1];
network.columnmeans = new double[sigmalen-1+1];
network.columnsigmas = new double[sigmalen-1+1];
network.neurons = new double[ntotal-1+1];
network.chunks = new double[3*ntotal+1, chunksize-1+1];
network.nwbuf = new double[Math.Max(wcount, 2*nout)-1+1];
network.dfdnet = new double[ntotal-1+1];
network.x = new double[nin-1+1];
network.y = new double[nout-1+1];
network.derror = new double[ntotal-1+1];
//
// Copy parameters from RA
//
i1_ = (offs) - (0);
for(i_=0; i_<=wcount-1;i_++)
{
network.weights[i_] = ra[i_+i1_];
}
offs = offs+wcount;
i1_ = (offs) - (0);
for(i_=0; i_<=sigmalen-1;i_++)
{
network.columnmeans[i_] = ra[i_+i1_];
}
offs = offs+sigmalen;
i1_ = (offs) - (0);
for(i_=0; i_<=sigmalen-1;i_++)
{
network.columnsigmas[i_] = ra[i_+i1_];
}
offs = offs+sigmalen;
}
开发者ID:palefacer,项目名称:TelescopeOrientation,代码行数:93,代码来源:mlpbase.cs
示例4: mlpcopy
/*************************************************************************
Copying of neural network
INPUT PARAMETERS:
Network1 - original
OUTPUT PARAMETERS:
Network2 - copy
-- ALGLIB --
Copyright 04.11.2007 by Bochkanov Sergey
*************************************************************************/
public static void mlpcopy(ref multilayerperceptron network1,
ref multilayerperceptron network2)
{
int i = 0;
int ssize = 0;
int ntotal = 0;
int nin = 0;
int nout = 0;
int wcount = 0;
int i_ = 0;
//
// Unload info
//
ssize = network1.structinfo[0];
nin = network1.structinfo[1];
nout = network1.structinfo[2];
ntotal = network1.structinfo[3];
wcount = network1.structinfo[4];
//
// Allocate space
//
network2.structinfo = new int[ssize-1+1];
network2.weights = new double[wcount-1+1];
if( mlpissoftmax(ref network1) )
{
network2.columnmeans = new double[nin-1+1];
network2.columnsigmas = new double[nin-1+1];
}
else
{
network2.columnmeans = new double[nin+nout-1+1];
network2.columnsigmas = new double[nin+nout-1+1];
}
network2.neurons = new double[ntotal-1+1];
network2.chunks = new double[3*ntotal+1, chunksize-1+1];
network2.nwbuf = new double[Math.Max(wcount, 2*nout)-1+1];
network2.dfdnet = new double[ntotal-1+1];
network2.x = new double[nin-1+1];
network2.y = new double[nout-1+1];
network2.derror = new double[ntotal-1+1];
//
// Copy
//
for(i=0; i<=ssize-1; i++)
{
network2.structinfo[i] = network1.structinfo[i];
}
for(i_=0; i_<=wcount-1;i_++)
{
network2.weights[i_] = network1.weights[i_];
}
if( mlpissoftmax(ref network1) )
{
for(i_=0; i_<=nin-1;i_++)
{
network2.columnmeans[i_] = network1.columnmeans[i_];
}
for(i_=0; i_<=nin-1;i_++)
{
network2.columnsigmas[i_] = network1.columnsigmas[i_];
}
}
else
{
for(i_=0; i_<=nin+nout-1;i_++)
{
network2.columnmeans[i_] = network1.columnmeans[i_];
}
for(i_=0; i_<=nin+nout-1;i_++)
{
network2.columnsigmas[i_] = network1.columnsigmas[i_];
}
}
for(i_=0; i_<=ntotal-1;i_++)
{
network2.neurons[i_] = network1.neurons[i_];
}
for(i_=0; i_<=ntotal-1;i_++)
{
network2.dfdnet[i_] = network1.dfdnet[i_];
}
for(i_=0; i_<=nin-1;i_++)
{
network2.x[i_] = network1.x[i_];
//.........这里部分代码省略.........
开发者ID:palefacer,项目名称:TelescopeOrientation,代码行数:101,代码来源:mlpbase.cs
示例5: mlpchunkedgradient
/*************************************************************************
Internal subroutine, chunked gradient
*************************************************************************/
private static void mlpchunkedgradient(ref multilayerperceptron network,
ref double[,] xy,
int cstart,
int csize,
ref double e,
ref double[] grad,
bool naturalerrorfunc)
{
int i = 0;
int j = 0;
int k = 0;
int kl = 0;
int n1 = 0;
int n2 = 0;
int w1 = 0;
int w2 = 0;
int c1 = 0;
int c2 = 0;
int ntotal = 0;
int nin = 0;
int nout = 0;
int offs = 0;
double f = 0;
double df = 0;
double d2f = 0;
double v = 0;
double s = 0;
double fown = 0;
double deown = 0;
double net = 0;
double lnnet = 0;
double mx = 0;
bool bflag = new bool();
int istart = 0;
int ineurons = 0;
int idfdnet = 0;
int iderror = 0;
int izeros = 0;
int i_ = 0;
int i1_ = 0;
//
// Read network geometry, prepare data
//
nin = network.structinfo[1];
nout = network.structinfo[2];
ntotal = network.structinfo[3];
istart = network.structinfo[5];
c1 = cstart;
c2 = cstart+csize-1;
ineurons = 0;
idfdnet = ntotal;
iderror = 2*ntotal;
izeros = 3*ntotal;
for(j=0; j<=csize-1; j++)
{
network.chunks[izeros,j] = 0;
}
//
// Forward pass:
// 1. Load inputs from XY to Chunks[0:NIn-1,0:CSize-1]
// 2. Forward pass
//
for(i=0; i<=nin-1; i++)
{
for(j=0; j<=csize-1; j++)
{
if( (double)(network.columnsigmas[i])!=(double)(0) )
{
network.chunks[i,j] = (xy[c1+j,i]-network.columnmeans[i])/network.columnsigmas[i];
}
else
{
network.chunks[i,j] = xy[c1+j,i]-network.columnmeans[i];
}
}
}
for(i=0; i<=ntotal-1; i++)
{
offs = istart+i*nfieldwidth;
if( network.structinfo[offs+0]>0 )
{
//
// Activation function:
// * calculate F vector, F(i) = F(NET(i))
//
n1 = network.structinfo[offs+2];
for(i_=0; i_<=csize-1;i_++)
{
network.chunks[i,i_] = network.chunks[n1,i_];
}
for(j=0; j<=csize-1; j++)
{
mlpactivationfunction(network.chunks[i,j], network.structinfo[offs+0], ref f, ref df, ref d2f);
//.........这里部分代码省略.........
开发者ID:palefacer,项目名称:TelescopeOrientation,代码行数:101,代码来源:mlpbase.cs
示例6: mlpcreate
/*************************************************************************
Internal subroutine.
-- ALGLIB --
Copyright 04.11.2007 by Bochkanov Sergey
*************************************************************************/
private static void mlpcreate(int nin,
int nout,
ref int[] lsizes,
ref int[] ltypes,
ref int[] lconnfirst,
ref int[] lconnlast,
int layerscount,
bool isclsnet,
ref multilayerperceptron network)
{
int i = 0;
int j = 0;
int ssize = 0;
int ntotal = 0;
int wcount = 0;
int offs = 0;
int nprocessed = 0;
int wallocated = 0;
int[] localtemp = new int[0];
int[] lnfirst = new int[0];
int[] lnsyn = new int[0];
//
// Check
//
System.Diagnostics.Debug.Assert(layerscount>0, "MLPCreate: wrong parameters!");
System.Diagnostics.Debug.Assert(ltypes[0]==-2, "MLPCreate: wrong LTypes[0] (must be -2)!");
for(i=0; i<=layerscount-1; i++)
{
System.Diagnostics.Debug.Assert(lsizes[i]>0, "MLPCreate: wrong LSizes!");
System.Diagnostics.Debug.Assert(lconnfirst[i]>=0 & (lconnfirst[i]<i | i==0), "MLPCreate: wrong LConnFirst!");
System.Diagnostics.Debug.Assert(lconnlast[i]>=lconnfirst[i] & (lconnlast[i]<i | i==0), "MLPCreate: wrong LConnLast!");
}
//
// Build network geometry
//
lnfirst = new int[layerscount-1+1];
lnsyn = new int[layerscount-1+1];
ntotal = 0;
wcount = 0;
for(i=0; i<=layerscount-1; i++)
{
//
// Analyze connections.
// This code must throw an assertion in case of unknown LTypes[I]
//
lnsyn[i] = -1;
if( ltypes[i]>=0 )
{
lnsyn[i] = 0;
for(j=lconnfirst[i]; j<=lconnlast[i]; j++)
{
lnsyn[i] = lnsyn[i]+lsizes[j];
}
}
else
{
if( ltypes[i]==-2 | ltypes[i]==-3 | ltypes[i]==-4 )
{
lnsyn[i] = 0;
}
}
System.Diagnostics.Debug.Assert(lnsyn[i]>=0, "MLPCreate: internal error #0!");
//
// Other info
//
lnfirst[i] = ntotal;
ntotal = ntotal+lsizes[i];
if( ltypes[i]==0 )
{
wcount = wcount+lnsyn[i]*lsizes[i];
}
}
ssize = 7+ntotal*nfieldwidth;
//
// Allocate
//
network.structinfo = new int[ssize-1+1];
network.weights = new double[wcount-1+1];
if( isclsnet )
{
network.columnmeans = new double[nin-1+1];
network.columnsigmas = new double[nin-1+1];
}
else
{
network.columnmeans = new double[nin+nout-1+1];
network.columnsigmas = new double[nin+nout-1+1];
}
//.........这里部分代码省略.........
开发者ID:palefacer,项目名称:TelescopeOrientation,代码行数:101,代码来源:mlpbase.cs
示例7: mlpcreateb1
/*************************************************************************
Same as MLPCreateB0 but with non-linear hidden layer.
-- ALGLIB --
Copyright 30.03.2008 by Bochkanov Sergey
*************************************************************************/
public static void mlpcreateb1(int nin,
int nhid,
int nout,
double b,
double d,
ref multilayerperceptron network)
{
int[] lsizes = new int[0];
int[] ltypes = new int[0];
int[] lconnfirst = new int[0];
int[] lconnlast = new int[0];
int layerscount = 0;
int lastproc = 0;
int i = 0;
layerscount = 1+3+3;
if( (double)(d)>=(double)(0) )
{
d = 1;
}
else
{
d = -1;
}
//
// Allocate arrays
//
lsizes = new int[layerscount-1+1];
ltypes = new int[layerscount-1+1];
lconnfirst = new int[layerscount-1+1];
lconnlast = new int[layerscount-1+1];
//
// Layers
//
addinputlayer(nin, ref lsizes, ref ltypes, ref lconnfirst, ref lconnlast, ref lastproc);
addbiasedsummatorlayer(nhid, ref lsizes, ref ltypes, ref lconnfirst, ref lconnlast, ref lastproc);
addactivationlayer(1, ref lsizes, ref ltypes, ref lconnfirst, ref lconnlast, ref lastproc);
addbiasedsummatorlayer(nout, ref lsizes, ref ltypes, ref lconnfirst, ref lconnlast, ref lastproc);
addactivationlayer(3, ref lsizes, ref ltypes, ref lconnfirst, ref lconnlast, ref lastproc);
//
// Create
//
mlpcreate(nin, nout, ref lsizes, ref ltypes, ref lconnfirst, ref lconnlast, layerscount, false, ref network);
//
// Turn on ouputs shift/scaling.
//
for(i=nin; i<=nin+nout-1; i++)
{
network.columnmeans[i] = b;
network.columnsigmas[i] = d;
}
}
开发者ID:palefacer,项目名称:TelescopeOrientation,代码行数:62,代码来源:mlpbase.cs
示例8: mlperror
/*************************************************************************
Error function for neural network, internal subroutine.
-- ALGLIB --
Copyright 04.11.2007 by Bochkanov Sergey
*************************************************************************/
public static double mlperror(ref multilayerperceptron network,
ref double[,] xy,
int ssize)
{
double result = 0;
int i = 0;
int k = 0;
int nin = 0;
int nout = 0;
int wcount = 0;
double e = 0;
int i_ = 0;
int i1_ = 0;
mlpproperties(ref network, ref nin, ref nout, ref wcount);
result = 0;
for(i=0; i<=ssize-1; i++)
{
for(i_=0; i_<=nin-1;i_++)
{
network.x[i_] = xy[i,i_];
}
mlpprocess(ref network, ref network.x, ref network.y);
if( mlpissoftmax(ref network) )
{
//
// class labels outputs
//
k = (int)Math.Round(xy[i,nin]);
if( k>=0 & k<nout )
{
network.y[k] = network.y[k]-1;
}
}
else
{
//
// real outputs
//
i1_ = (nin) - (0);
for(i_=0; i_<=nout-1;i_++)
{
network.y[i_] = network.y[i_] - xy[i,i_+i1_];
}
}
e = 0.0;
for(i_=0; i_<=nout-1;i_++)
{
e += network.y[i_]*network.y[i_];
}
result = result+e/2;
}
return result;
}
开发者ID:palefacer,项目名称:TelescopeOrientation,代码行数:62,代码来源:mlpbase.cs
示例9: mlperrorn
/*************************************************************************
Natural error function for neural network, internal subroutine.
-- ALGLIB --
Copyright 04.11.2007 by Bochkanov Sergey
*************************************************************************/
public static double mlperrorn(ref multilayerperceptron network,
ref double[,] xy,
int ssize)
{
double result = 0;
int i = 0;
int k = 0;
int nin = 0;
int nout = 0;
int wcount = 0;
double e = 0;
int i_ = 0;
int i1_ = 0;
mlpproperties(ref network, ref nin, ref nout, ref wcount);
result = 0;
for(i=0; i<=ssize-1; i++)
{
//
// Process vector
//
for(i_=0; i_<=nin-1;i_++)
{
network.x[i_] = xy[i,i_];
}
mlpprocess(ref network, ref network.x, ref network.y);
//
// Update error function
//
if( network.structinfo[6]==0 )
{
//
// Least squares error function
//
i1_ = (nin) - (0);
for(i_=0; i_<=nout-1;i_++)
{
network.y[i_] = network.y[i_] - xy[i,i_+i1_];
}
e = 0.0;
for(i_=0; i_<=nout-1;i_++)
{
e += network.y[i_]*network.y[i_];
}
result = result+e/2;
}
else
{
//
// Cross-entropy error function
//
k = (int)Math.Round(xy[i,nin]);
if( k>=0 & k<nout )
{
result = result+safecrossentropy(1, network.y[k]);
}
}
}
return result;
}
开发者ID:palefacer,项目名称:TelescopeOrientation,代码行数:70,代码来源:mlpbase.cs
示例10: mlpissoftmax
/*************************************************************************
Tells whether network is SOFTMAX-normalized (i.e. classifier) or not.
-- ALGLIB --
Copyright 04.11.2007 by Bochkanov Sergey
*************************************************************************/
public static bool mlpissoftmax(ref multilayerperceptron network)
{
bool result = new bool();
result = network.structinfo[6]==1;
return result;
}
开发者ID:palefacer,项目名称:TelescopeOrientation,代码行数:13,代码来源:mlpbase.cs
示例11: mlpprocess
/*************************************************************************
Procesing
INPUT PARAMETERS:
Network - neural network
X - input vector, array[0..NIn-1].
OUTPUT PARAMETERS:
Y - result. Regression estimate when solving regression task,
vector of posterior probabilities for classification task.
Subroutine does not allocate memory for this vector, it is
responsibility of a caller to allocate it. Array must be
at least [0..NOut-1].
-- ALGLIB --
Copyright 04.11.2007 by Bochkanov Sergey
*************************************************************************/
public static void mlpprocess(ref multilayerperceptron network,
ref double[] x,
ref double[] y)
{
mlpinternalprocessvector(ref network.structinfo, ref network.weights, ref network.columnmeans, ref network.columnsigmas, ref network.neurons, ref network.dfdnet, ref x, ref y);
}
开发者ID:palefacer,项目名称:TelescopeOrientation,代码行数:23,代码来源:mlpbase.cs
示例12: mlpproperties
/*************************************************************************
Returns information about initialized network: number of inputs, outputs,
weights.
-- ALGLIB --
Copyright 04.11.2007 by Bochkanov Sergey
*************************************************************************/
public static void mlpproperties(ref multilayerperceptron network,
ref int nin,
ref int nout,
ref int wcount)
{
nin = network.structinfo[1];
nout = network.structinfo[2];
wcount = network.structinfo[4];
}
开发者ID:palefacer,项目名称:TelescopeOrientation,代码行数:16,代码来源:mlpbase.cs
示例13: mlpinitpreprocessor
/*************************************************************************
Internal subroutine.
-- ALGLIB --
Copyright 30.03.2008 by Bochkanov Sergey
*************************************************************************/
public static void mlpinitpreprocessor(ref multilayerperceptron network,
ref double[,] xy,
int ssize)
{
int i = 0;
int j = 0;
int jmax = 0;
int nin = 0;
int nout = 0;
int wcount = 0;
int ntotal = 0;
int istart = 0;
int offs = 0;
int ntype = 0;
double[] means = new double[0];
double[] sigmas = new double[0];
double s = 0;
mlpproperties(ref network, ref nin, ref nout, ref wcount);
ntotal = network.structinfo[3];
istart = network.structinfo[5];
//
// Means/Sigmas
//
if( mlpissoftmax(ref network) )
{
jmax = nin-1;
}
else
{
jmax = nin+nout-1;
}
means = new double[jmax+1];
sigmas = new double[jmax+1];
for(j=0; j<=jmax; j++)
{
means[j] = 0;
for(i=0; i<=ssize-1; i++)
{
means[j] = means[j]+xy[i,j];
}
means[j] = means[j]/ssize;
sigmas[j] = 0;
for(i=0; i<=ssize-1; i++)
{
sigmas[j] = sigmas[j]+AP.Math.Sqr(xy[i,j]-means[j]);
}
sigmas[j] = Math.Sqrt(sigmas[j]/ssize);
}
//
// Inputs
//
for(i=0; i<=nin-1; i++)
{
network.columnmeans[i] = means[i];
network.columnsigmas[i] = sigmas[i];
if( (double)(network.columnsigmas[i])==(double)(0) )
{
network.columnsigmas[i] = 1;
}
}
//
// Outputs
//
if( !mlpissoftmax(ref network) )
{
for(i=0; i<=nout-1; i++)
{
offs = istart+(ntotal-nout+i)*nfieldwidth;
ntype = network.structinfo[offs+0];
//
// Linear outputs
//
if( ntype==0 )
{
network.columnmeans[nin+i] = means[nin+i];
network.columnsigmas[nin+i] = sigmas[nin+i];
if( (double)(network.columnsigmas[nin+i])==(double)(0) )
{
network.columnsigmas[nin+i] = 1;
}
}
//
// Bounded outputs (half-interval)
//
if( ntype==3 )
{
s = means[nin+i]-network.columnmeans[nin+i];
if( (double)(s)==(double)(0) )
//.........这里部分代码省略.........
开发者ID:palefacer,项目名称:TelescopeOrientation,代码行数:101,代码来源:mlpbase.cs
示例14: mlprandomizefull
/*************************************************************************
Randomization of neural network weights and standartisator
-- ALGLIB --
Copyright 10.03.2008 by Bochkanov Sergey
*************************************************************************/
public static void mlprandomizefull(ref multilayerperceptron network)
{
int i = 0;
int nin = 0;
int nout = 0;
int wcount = 0;
int ntotal = 0;
int istart = 0;
int offs = 0;
int ntype = 0;
mlpproperties(ref network, ref nin, ref nout, ref wcount);
ntotal = network.structinfo[3];
istart = network.structinfo[5];
//
// Process network
//
for(i=0; i<=wcount-1; i++)
{
network.weights[i] = AP.Math.RandomReal()-0.5;
}
for(i=0; i<=nin-1; i++)
{
network.columnmeans[i] = 2*AP.Math.RandomReal()-1;
network.columnsigmas[i] = 1.5*AP.Math.RandomReal()+0.5;
}
if( !mlpissoftmax(ref network) )
{
for(i=0; i<=nout-1; i++)
{
offs = istart+(ntotal-nout+i)*nfieldwidth;
ntype = network.structinfo[offs+0];
if( ntype==0 )
{
//
// Shifts are changed only for linear outputs neurons
//
network.columnmeans[nin+i] = 2*AP.Math.RandomReal()-1;
}
if( ntype==0 | ntype==3 )
{
//
// Scales are changed only for linear or bounded outputs neurons.
// Note that scale randomization preserves sign.
//
network.columnsigmas[nin+i] = Math.Sign(network.columnsigmas[nin+i])*(1.5*AP.Math.RandomReal()+0.5);
}
}
}
}
开发者ID:palefacer,项目名称:TelescopeOrientation,代码行数:59,代码来源:mlpbase.cs
示例15: calculation
/*************************************************************************
Batch gradient calculation (natural error function). Internal subroutine.
-- ALGLIB --
Copyright 04.11.2007 by Bochkanov Sergey
*************************************************************************/
public static void mlpgradnbatch(ref multilayerperceptron network,
ref double[,] xy,
int ssize,
ref double e,
ref double[] grad)
{
int i = 0;
int nin = 0;
int nout = 0;
int wcount = 0;
mlpproperties(ref network, ref nin, ref nout, ref wcount);
for(i=0; i<=wcount-1; i++)
{
grad[i] = 0;
}
e = 0;
i = 0;
while( i<=ssize-1 )
{
mlpchunkedgradient(ref network, ref xy, i, Math.Min(ssize, i+chunksize)-i, ref e, ref grad, true);
i = i+chunksize;
}
}
开发者ID:palefacer,项目名称:TelescopeOrientation,代码行数:30,代码来源:mlpbase.cs
示例16: mlpclserror
/*************************************************************************
Classification error
-- ALGLIB --
Copyright 04.11.2007 by Bochkanov Sergey
*************************************************************************/
public static int mlpclserror(ref multilayerperceptron network,
ref double[,] xy,
int ssize)
{
int result = 0;
int i = 0;
int j = 0;
int nin = 0;
int nout = 0;
int wcount = 0;
double[] workx = new double[0];
double[] worky = new double[0];
int nn = 0;
int ns = 0;
int nmax = 0;
int i_ = 0;
mlpproperties(ref network, ref nin, ref nout, ref wcount);
workx = new double[nin-1+1];
worky = new double[nout-1+1];
result = 0;
for(i=0; i<=ssize-1; i++)
{
//
// Process
//
for(i_=0; i_<=nin-1;i_++)
{
workx[i_] = xy[i,i_];
}
mlpprocess(ref network, ref workx, ref worky);
//
// Network version of the answer
//
nmax = 0;
for(j=0; j<=nout-1; j++)
{
if( (double)(worky[j])>(double)(worky[nmax]) )
{
nmax = j;
}
}
nn = nmax;
//
// Right answer
//
if( mlpissoftmax(ref network) )
{
ns = (int)Math.Round(xy[i,nin]);
}
else
{
nmax = 0;
for(j=0; j<=nout-1; j++)
{
if( (double)(xy[i,nin+j])>(double)(xy[i,nin+nmax]) )
{
nmax = j;
}
}
ns = nmax;
}
//
// compare
//
if( nn!=ns )
{
result = result+1;
}
}
return result;
}
开发者ID:palefacer,项目名称:TelescopeOrientation,代码行数:82,代码来源:mlpbase.cs
示例17: mlphessianbatch
/*************************************************************************
Batch Hessian calculation using R-algorithm.
Internal subroutine.
-- ALGLIB --
Copyright 26.01.2008 by Bochkanov Sergey.
Hessian calculation based on R-algorithm described in
"Fast Exact Multiplication by the Hessian",
B. A. Pearlmutter,
Neural Computation, 1994.
*************************************************************************/
public static void mlphessianbatch(ref multilayerperceptron network,
ref double[,] xy,
int ssize,
ref double e,
ref double[] grad,
ref double[,] h)
{
mlphessianbatchinternal(ref network, ref xy, ssize, false, ref e, ref grad, ref h);
}
开发者ID:palefacer,项目名称:TelescopeOrientation,代码行数:21,代码来源:mlpbase.cs
示例18: mlprelclserror
/*************************************************************************
Relative classification error on the test set
INPUT PARAMETERS:
Network - network
XY - test set
NPoints - test set size
RESULT:
percent of incorrectly classified cases. Works both for
classifier networks and general purpose networks used as
classifiers.
-- ALGLIB --
Copyright 25.12.2008 by Bochkanov Sergey
*************************************************************************/
public static double mlprelclserror(ref multilayerperceptron network,
ref double[,] xy,
int npoints)
{
double result = 0;
result = (double)(mlpclserror(ref network, ref xy, npoints))/(double)(npoints);
return result;
}
开发者ID:palefacer,项目名称:TelescopeOrientation,代码行数:25,代码来源:mlpbase.cs
示例19: mlphessianbatchinternal
/*************************************************************************
Internal subroutine for Hessian calculation.
WARNING!!! Unspeakable math far beyong human capabilities :)
*************************************************************************/
private static void mlphessianbatchinternal(ref multilayerperceptron network,
ref double[,] xy,
int ssize,
bool naturalerr,
ref double e,
ref double[] grad,
ref double[,] h)
{
int nin = 0;
int nout = 0;
int wcount = 0;
int ntotal = 0;
int istart = 0;
int i = 0;
int j = 0;
int k = 0;
int kl = 0;
int offs = 0;
int n1 = 0;
int n2 = 0;
|
请发表评论