//.........这里部分代码省略.........
Info - error code:
* -4 internal SVD decomposition subroutine failed (very
rare and for degenerate systems only)
* -3 either too many constraints (M or more),
degenerate constraints (some constraints are
repetead twice) or inconsistent constraints were
specified.
* 1 task is solved
C - decomposition coefficients, array[0..M-1]
Rep - fitting report. Following fields are set:
* R2 non-adjusted coefficient of determination
(non-weighted)
* RMSError rms error on the (X,Y).
* AvgError average error on the (X,Y).
* AvgRelError average relative error on the non-zero Y
* MaxError maximum error
NON-WEIGHTED ERRORS ARE CALCULATED
IMPORTANT:
this subroitine doesn't calculate task's condition number for K<>0.
ERRORS IN PARAMETERS
This solver also calculates different kinds of errors in parameters and
fills corresponding fields of report:
* Rep.CovPar covariance matrix for parameters, array[K,K].
* Rep.ErrPar errors in parameters, array[K],
errpar = sqrt(diag(CovPar))
* Rep.ErrCurve vector of fit errors - standard deviations of empirical
best-fit curve from "ideal" best-fit curve built with
infinite number of samples, array[N].
errcurve = sqrt(diag(F*CovPar*F')),
where F is functions matrix.
* Rep.Noise vector of per-point estimates of noise, array[N]
IMPORTANT: errors in parameters are calculated without taking into
account boundary/linear constraints! Presence of constraints
changes distribution of errors, but there is no easy way to
account for constraints when you calculate covariance matrix.
NOTE: noise in the data is estimated as follows:
* for fitting without user-supplied weights all points are
assumed to have same level of noise, which is estimated from
the data
* for fitting with user-supplied weights we assume that noise
level in I-th point is inversely proportional to Ith weight.
Coefficient of proportionality is estimated from the data.
NOTE: we apply small amount of regularization when we invert squared
Jacobian and calculate covariance matrix. It guarantees that
algorithm won't divide by zero during inversion, but skews
error estimates a bit (fractional error is about 10^-9).
However, we believe that this difference is insignificant for
all practical purposes except for the situation when you want
to compare ALGLIB results with "reference" implementation up
to the last significant digit.
NOTE: covariance matrix is estimated using correction for degrees
of freedom (covariances are divided by N-M instead of dividing
by N).
-- ALGLIB --
Copyright 07.09.2009 by Bochkanov Sergey
*************************************************************************/
public static void lsfitlinearc(double[] y,
double[,] fmatrix,
double[,] cmatrix,
int n,
int m,
int k,
ref int info,
ref double[] c,
lsfitreport rep)
{
double[] w = new double[0];
int i = 0;
y = (double[])y.Clone();
info = 0;
c = new double[0];
alglib.ap.assert(n>=1, "LSFitLinearC: N<1!");
alglib.ap.assert(m>=1, "LSFitLinearC: M<1!");
alglib.ap.assert(k>=0, "LSFitLinearC: K<0!");
alglib.ap.assert(alglib.ap.len(y)>=n, "LSFitLinearC: length(Y)<N!");
alglib.ap.assert(apserv.isfinitevector(y, n), "LSFitLinearC: Y contains infinite or NaN values!");
alglib.ap.assert(alglib.ap.rows(fmatrix)>=n, "LSFitLinearC: rows(FMatrix)<N!");
alglib.ap.assert(alglib.ap.cols(fmatrix)>=m, "LSFitLinearC: cols(FMatrix)<M!");
alglib.ap.assert(apserv.apservisfinitematrix(fmatrix, n, m), "LSFitLinearC: FMatrix contains infinite or NaN values!");
alglib.ap.assert(alglib.ap.rows(cmatrix)>=k, "LSFitLinearC: rows(CMatrix)<K!");
alglib.ap.assert(alglib.ap.cols(cmatrix)>=m+1 || k==0, "LSFitLinearC: cols(CMatrix)<M+1!");
alglib.ap.assert(apserv.apservisfinitematrix(cmatrix, k, m+1), "LSFitLinearC: CMatrix contains infinite or NaN values!");
w = new double[n];
for(i=0; i<=n-1; i++)
{
w[i] = 1;
}
lsfitlinearwc(y, w, fmatrix, cmatrix, n, m, k, ref info, ref c, rep);
}
/*************************************************************************
Linear least squares fitting, without weights.
See LSFitLinearW for more information.
-- ALGLIB --
Copyright 17.08.2009 by Bochkanov Sergey
*************************************************************************/
public static void lsfitlinear(ref double[] y,
ref double[,] fmatrix,
int n,
int m,
ref int info,
ref double[] c,
ref lsfitreport rep)
{
double[] w = new double[0];
int i = 0;
if( n<1 )
{
info = -1;
return;
}
w = new double[n];
for(i=0; i<=n-1; i++)
{
w[i] = 1;
}
lsfitlinearinternal(ref y, ref w, ref fmatrix, n, m, ref info, ref c, ref rep);
}
public static void lsfitlinearc(double[] y, double[,] fmatrix, double[,] cmatrix, out int info, out double[] c, out lsfitreport rep)
{
int n;
int m;
int k;
if( (ap.len(y)!=ap.rows(fmatrix)))
throw new alglibexception("Error while calling 'lsfitlinearc': looks like one of arguments has wrong size");
if( (ap.cols(fmatrix)!=ap.cols(cmatrix)-1))
throw new alglibexception("Error while calling 'lsfitlinearc': looks like one of arguments has wrong size");
info = 0;
c = new double[0];
rep = new lsfitreport();
n = ap.len(y);
m = ap.cols(fmatrix);
k = ap.rows(cmatrix);
lsfit.lsfitlinearc(y, fmatrix, cmatrix, n, m, k, ref info, ref c, rep.innerobj);
return;
}
/*************************************************************************
Nonlinear least squares fitting results.
Called after LSFitNonlinearIteration() returned False.
INPUT PARAMETERS:
State - algorithm state (used by LSFitNonlinearIteration).
OUTPUT PARAMETERS:
Info - completetion code:
* -1 incorrect parameters were specified
* 1 relative function improvement is no more than
EpsF.
* 2 relative step is no more than EpsX.
* 4 gradient norm is no more than EpsG
* 5 MaxIts steps was taken
C - array[0..K-1], solution
Rep - optimization report. Following fields are set:
* Rep.TerminationType completetion code:
* RMSError rms error on the (X,Y).
* AvgError average error on the (X,Y).
* AvgRelError average relative error on the non-zero Y
* MaxError maximum error
NON-WEIGHTED ERRORS ARE CALCULATED
-- ALGLIB --
Copyright 17.08.2009 by Bochkanov Sergey
*************************************************************************/
public static void lsfitnonlinearresults(ref lsfitstate state,
ref int info,
ref double[] c,
ref lsfitreport rep)
{
int i_ = 0;
info = state.repterminationtype;
if( info>0 )
{
c = new double[state.k];
for(i_=0; i_<=state.k-1;i_++)
{
c[i_] = state.c[i_];
}
rep.rmserror = state.reprmserror;
rep.avgerror = state.repavgerror;
rep.avgrelerror = state.repavgrelerror;
rep.maxerror = state.repmaxerror;
}
}
/*************************************************************************
Internal fitting subroutine
*************************************************************************/
private static void lsfitlinearinternal(ref double[] y,
ref double[] w,
ref double[,] fmatrix,
int n,
int m,
ref int info,
ref double[] c,
ref lsfitreport rep)
{
double threshold = 0;
double[,] ft = new double[0,0];
double[,] q = new double[0,0];
double[,] l = new double[0,0];
double[,] r = new double[0,0];
double[] b = new double[0];
double[] wmod = new double[0];
double[] tau = new double[0];
int i = 0;
int j = 0;
double v = 0;
double[] sv = new double[0];
double[,] u = new double[0,0];
double[,] vt = new double[0,0];
double[] tmp = new double[0];
double[] utb = new double[0];
double[] sutb = new double[0];
int relcnt = 0;
int i_ = 0;
if( n<1 | m<1 )
{
info = -1;
return;
}
info = 1;
threshold = Math.Sqrt(AP.Math.MachineEpsilon);
//
// Degenerate case, needs special handling
//
if( n<m )
{
//
// Create design matrix.
//
ft = new double[n, m];
b = new double[n];
wmod = new double[n];
for(j=0; j<=n-1; j++)
{
v = w[j];
for(i_=0; i_<=m-1;i_++)
{
ft[j,i_] = v*fmatrix[j,i_];
}
b[j] = w[j]*y[j];
wmod[j] = 1;
}
//
// LQ decomposition and reduction to M=N
//
c = new double[m];
for(i=0; i<=m-1; i++)
{
c[i] = 0;
}
rep.taskrcond = 0;
ortfac.rmatrixlq(ref ft, n, m, ref tau);
ortfac.rmatrixlqunpackq(ref ft, n, m, ref tau, n, ref q);
ortfac.rmatrixlqunpackl(ref ft, n, m, ref l);
lsfitlinearinternal(ref b, ref wmod, ref l, n, n, ref info, ref tmp, ref rep);
if( info<=0 )
{
return;
}
for(i=0; i<=n-1; i++)
{
v = tmp[i];
for(i_=0; i_<=m-1;i_++)
{
c[i_] = c[i_] + v*q[i,i_];
}
}
return;
}
//
// N>=M. Generate design matrix and reduce to N=M using
// QR decomposition.
//
ft = new double[n, m];
b = new double[n];
for(j=0; j<=n-1; j++)
{
v = w[j];
//.........这里部分代码省略.........
/*************************************************************************
This is internal function for Chebyshev fitting.
It assumes that input data are normalized:
* X/XC belong to [-1,+1],
* mean(Y)=0, stddev(Y)=1.
It does not checks inputs for errors.
This function is used to fit general (shifted) Chebyshev models, power
basis models or barycentric models.
INPUT PARAMETERS:
X - points, array[0..N-1].
Y - function values, array[0..N-1].
W - weights, array[0..N-1]
N - number of points, N>0.
XC - points where polynomial values/derivatives are constrained,
array[0..K-1].
YC - values of constraints, array[0..K-1]
DC - array[0..K-1], types of constraints:
* DC[i]=0 means that P(XC[i])=YC[i]
* DC[i]=1 means that P'(XC[i])=YC[i]
K - number of constraints, 0<=K<M.
K=0 means no constraints (XC/YC/DC are not used in such cases)
M - number of basis functions (= polynomial_degree + 1), M>=1
OUTPUT PARAMETERS:
Info- same format as in LSFitLinearW() subroutine:
* Info>0 task is solved
* Info<=0 an error occured:
-4 means inconvergence of internal SVD
-3 means inconsistent constraints
C - interpolant in Chebyshev form; [-1,+1] is used as base interval
Rep - report, same format as in LSFitLinearW() subroutine.
Following fields are set:
* RMSError rms error on the (X,Y).
* AvgError average error on the (X,Y).
* AvgRelError average relative error on the non-zero Y
* MaxError maximum error
NON-WEIGHTED ERRORS ARE CALCULATED
IMPORTANT:
this subroitine doesn't calculate task's condition number for K<>0.
-- ALGLIB PROJECT --
Copyright 10.12.2009 by Bochkanov Sergey
*************************************************************************/
private static void internalchebyshevfit(double[] x,
double[] y,
double[] w,
int n,
double[] xc,
double[] yc,
int[] dc,
int k,
int m,
ref int info,
ref double[] c,
lsfitreport rep)
{
double[] y2 = new double[0];
double[] w2 = new double[0];
double[] tmp = new double[0];
double[] tmp2 = new double[0];
double[] tmpdiff = new double[0];
double[] bx = new double[0];
double[] by = new double[0];
double[] bw = new double[0];
double[,] fmatrix = new double[0,0];
double[,] cmatrix = new double[0,0];
int i = 0;
int j = 0;
double mx = 0;
double decay = 0;
int i_ = 0;
xc = (double[])xc.Clone();
yc = (double[])yc.Clone();
info = 0;
c = new double[0];
clearreport(rep);
//
// weight decay for correct handling of task which becomes
// degenerate after constraints are applied
//
decay = 10000*math.machineepsilon;
//
// allocate space, initialize/fill:
// * FMatrix- values of basis functions at X[]
// * CMatrix- values (derivatives) of basis functions at XC[]
// * fill constraints matrix
// * fill first N rows of design matrix with values
// * fill next M rows of design matrix with regularizing term
// * append M zeros to Y
// * append M elements, mean(abs(W)) each, to W
//
//.........这里部分代码省略.........
/*************************************************************************
Single-threaded stub. HPC ALGLIB replaces it by multithreaded code.
*************************************************************************/
public static void _pexec_lsfitlinearc(double[] y,
double[,] fmatrix,
double[,] cmatrix,
int n,
int m,
int k,
ref int info,
ref double[] c,
lsfitreport rep)
{
lsfitlinearc(y,fmatrix,cmatrix,n,m,k,ref info,ref c,rep);
}
/*************************************************************************
Nonlinear least squares fitting results.
Called after return from LSFitFit().
INPUT PARAMETERS:
State - algorithm state
OUTPUT PARAMETERS:
Info - completetion code:
* -7 gradient verification failed.
See LSFitSetGradientCheck() for more information.
* 1 relative function improvement is no more than
EpsF.
* 2 relative step is no more than EpsX.
* 4 gradient norm is no more than EpsG
* 5 MaxIts steps was taken
* 7 stopping conditions are too stringent,
further improvement is impossible
C - array[0..K-1], solution
Rep - optimization report. Following fields are set:
* Rep.TerminationType completetion code:
* RMSError rms error on the (X,Y).
* AvgError average error on the (X,Y).
* AvgRelError average relative error on the non-zero Y
* MaxError maximum error
NON-WEIGHTED ERRORS ARE CALCULATED
* WRMSError weighted rms error on the (X,Y).
-- ALGLIB --
Copyright 17.08.2009 by Bochkanov Sergey
*************************************************************************/
public static void lsfitresults(lsfitstate state,
ref int info,
ref double[] c,
lsfitreport rep)
{
int i_ = 0;
info = 0;
c = new double[0];
info = state.repterminationtype;
rep.varidx = state.repvaridx;
if( info>0 )
{
c = new double[state.k];
for(i_=0; i_<=state.k-1;i_++)
{
c[i_] = state.c[i_];
}
rep.rmserror = state.reprmserror;
rep.wrmserror = state.repwrmserror;
rep.avgerror = state.repavgerror;
rep.avgrelerror = state.repavgrelerror;
rep.maxerror = state.repmaxerror;
rep.iterationscount = state.repiterationscount;
}
}
//.........这里部分代码省略.........
Parameter CnstrLeft contains left constraint (or NAN for unconstrained
fitting), and CnstrRight contains right one. For 4PL, left constraint
ALWAYS corresponds to parameter A, and right one is ALWAYS constraint on
D. That's because 4PL model is normalized in such way that B>=0.
For 5PL model things are different. Unlike 4PL one, 5PL model is NOT
symmetric with respect to change in sign of B. Thus, negative B's are
possible, and left constraint may constrain parameter A (for positive B's)
- or parameter D (for negative B's). Similarly changes meaning of right
constraint.
You do not have to decide what parameter to constrain - algorithm will
automatically determine correct parameters as fitting progresses. However,
question highlighted above is important when you interpret fitting results.
-- ALGLIB PROJECT --
Copyright 14.02.2014 by Bochkanov Sergey
*************************************************************************/
public static void logisticfit45x(double[] x,
double[] y,
int n,
double cnstrleft,
double cnstrright,
bool is4pl,
double lambdav,
double epsx,
int rscnt,
ref double a,
ref double b,
ref double c,
ref double d,
ref double g,
lsfitreport rep)
{
int i = 0;
int k = 0;
int innerit = 0;
int outerit = 0;
int nz = 0;
double v = 0;
double b00 = 0;
double b01 = 0;
double b10 = 0;
double b11 = 0;
double b30 = 0;
double b31 = 0;
double[] p0 = new double[0];
double[] p1 = new double[0];
double[] p2 = new double[0];
double[] bndl = new double[0];
double[] bndu = new double[0];
double[] s = new double[0];
double[,] z = new double[0,0];
hqrnd.hqrndstate rs = new hqrnd.hqrndstate();
minlm.minlmstate state = new minlm.minlmstate();
minlm.minlmreport replm = new minlm.minlmreport();
int maxits = 0;
double fbest = 0;
double flast = 0;
double flast2 = 0;
double scalex = 0;
double scaley = 0;
double[] bufx = new double[0];
double[] bufy = new double[0];
double rss = 0;
/*************************************************************************
Single-threaded stub. HPC ALGLIB replaces it by multithreaded code.
*************************************************************************/
public static void _pexec_lsfitlinearw(double[] y,
double[] w,
double[,] fmatrix,
int n,
int m,
ref int info,
ref double[] c,
lsfitreport rep)
{
lsfitlinearw(y,w,fmatrix,n,m,ref info,ref c,rep);
}
//.........这里部分代码省略.........
different models - one with B>0 and one with B<0.
* after fitting is done, we compare results with best values found so far,
rewrite "best solution" if needed, and move to next random location.
Overall algorithm is very stable and is not prone to bad local extrema.
Furthermore, it automatically scales when input data have very large or
very small range.
INPUT PARAMETERS:
X - array[N], stores X-values.
MUST include only non-negative numbers (but may include
zero values). Can be unsorted.
Y - array[N], values to fit.
N - number of points. If N is less than length of X/Y, only
leading N elements are used.
CnstrLeft- optional equality constraint for model value at the left
boundary (at X=0). Specify NAN (Not-a-Number) if you do
not need constraint on the model value at X=0 (in C++ you
can pass alglib::fp_nan as parameter, in C# it will be
Double.NaN).
See below, section "EQUALITY CONSTRAINTS" for more
information about constraints.
CnstrRight- optional equality constraint for model value at X=infinity.
Specify NAN (Not-a-Number) if you do not need constraint
on the model value (in C++ you can pass alglib::fp_nan as
parameter, in C# it will be Double.NaN).
See below, section "EQUALITY CONSTRAINTS" for more
information about constraints.
OUTPUT PARAMETERS:
A,B,C,D,G- parameters of 5PL model
Rep - fitting report. This structure has many fields, but ONLY
ONES LISTED BELOW ARE SET:
* Rep.IterationsCount - number of iterations performed
* Rep.RMSError - root-mean-square error
* Rep.AvgError - average absolute error
* Rep.AvgRelError - average relative error (calculated for
non-zero Y-values)
* Rep.MaxError - maximum absolute error
* Rep.R2 - coefficient of determination, R-squared. This
coefficient is calculated as R2=1-RSS/TSS (in case
of nonlinear regression there are multiple ways to
define R2, each of them giving different results).
NOTE: after you obtained coefficients, you can evaluate model with
LogisticCalc5() function.
NOTE: if you need better control over fitting process than provided by this
function, you may use LogisticFit45X().
NOTE: step is automatically scaled according to scale of parameters being
fitted before we compare its length with EpsX. Thus, this function
can be used to fit data with very small or very large values without
changing EpsX.
EQUALITY CONSTRAINTS ON PARAMETERS
5PL solver supports equality constraints on model values at the left
boundary (X=0) and right boundary (X=infinity). These constraints are
completely optional and you can specify both of them, only one - or no
constraints at all.
Parameter CnstrLeft contains left constraint (or NAN for unconstrained
fitting), and CnstrRight contains right one.
Unlike 4PL one, 5PL model is NOT symmetric with respect to change in sign
of B. Thus, negative B's are possible, and left constraint may constrain
parameter A (for positive B's) - or parameter D (for negative B's).
Similarly changes meaning of right constraint.
You do not have to decide what parameter to constrain - algorithm will
automatically determine correct parameters as fitting progresses. However,
question highlighted above is important when you interpret fitting results.
-- ALGLIB PROJECT --
Copyright 14.02.2014 by Bochkanov Sergey
*************************************************************************/
public static void logisticfit5ec(double[] x,
double[] y,
int n,
double cnstrleft,
double cnstrright,
ref double a,
ref double b,
ref double c,
ref double d,
ref double g,
lsfitreport rep)
{
x = (double[])x.Clone();
y = (double[])y.Clone();
a = 0;
b = 0;
c = 0;
d = 0;
g = 0;
logisticfit45x(x, y, n, cnstrleft, cnstrright, false, 0.0, 0.0, 0, ref a, ref b, ref c, ref d, ref g, rep);
}
/*************************************************************************
Nonlinear least squares fitting results.
Called after return from LSFitFit().
INPUT PARAMETERS:
State - algorithm state
OUTPUT PARAMETERS:
Info - completetion code:
* 1 relative function improvement is no more than
EpsF.
* 2 relative step is no more than EpsX.
* 4 gradient norm is no more than EpsG
* 5 MaxIts steps was taken
* 7 stopping conditions are too stringent,
further improvement is impossible
C - array[0..K-1], solution
Rep - optimization report. Following fields are set:
* Rep.TerminationType completetion code:
* RMSError rms error on the (X,Y).
* AvgError average error on the (X,Y).
* AvgRelError average relative error on the non-zero Y
* MaxError maximum error
NON-WEIGHTED ERRORS ARE CALCULATED
-- ALGLIB --
Copyright 17.08.2009 by Bochkanov Sergey
*************************************************************************/
public static void lsfitresults(lsfitstate state, out int info, out double[] c, out lsfitreport rep)
{
info = 0;
c = new double[0];
rep = new lsfitreport();
lsfit.lsfitresults(state.innerobj, ref info, ref c, rep.innerobj);
return;
}
/*************************************************************************
Nonlinear least squares fitting results.
Called after return from LSFitFit().
INPUT PARAMETERS:
State - algorithm state
OUTPUT PARAMETERS:
Info - completion code:
* -7 gradient verification failed.
See LSFitSetGradientCheck() for more information.
* 1 relative function improvement is no more than
EpsF.
* 2 relative step is no more than EpsX.
* 4 gradient norm is no more than EpsG
* 5 MaxIts steps was taken
* 7 stopping conditions are too stringent,
further improvement is impossible
C - array[0..K-1], solution
Rep - optimization report. On success following fields are set:
* R2 non-adjusted coefficient of determination
(non-weighted)
* RMSError rms error on the (X,Y).
* AvgError average error on the (X,Y).
* AvgRelError average relative error on the non-zero Y
* MaxError maximum error
NON-WEIGHTED ERRORS ARE CALCULATED
* WRMSError weighted rms error on the (X,Y).
ERRORS IN PARAMETERS
This solver also calculates different kinds of errors in parameters and
fills corresponding fields of report:
* Rep.CovPar covariance matrix for parameters, array[K,K].
* Rep.ErrPar errors in parameters, array[K],
errpar = sqrt(diag(CovPar))
* Rep.ErrCurve vector of fit errors - standard deviations of empirical
best-fit curve from "ideal" best-fit curve built with
infinite number of samples, array[N].
errcurve = sqrt(diag(J*CovPar*J')),
where J is Jacobian matrix.
* Rep.Noise vector of per-point estimates of noise, array[N]
IMPORTANT: errors in parameters are calculated without taking into
account boundary/linear constraints! Presence of constraints
changes distribution of errors, but there is no easy way to
account for constraints when you calculate covariance matrix.
NOTE: noise in the data is estimated as follows:
* for fitting without user-supplied weights all points are
assumed to have same level of noise, which is estimated from
the data
* for fitting with user-supplied weights we assume that noise
level in I-th point is inversely proportional to Ith weight.
Coefficient of proportionality is estimated from the data.
NOTE: we apply small amount of regularization when we invert squared
Jacobian and calculate covariance matrix. It guarantees that
algorithm won't divide by zero during inversion, but skews
error estimates a bit (fractional error is about 10^-9).
However, we believe that this difference is insignificant for
all practical purposes except for the situation when you want
to compare ALGLIB results with "reference" implementation up
to the last significant digit.
NOTE: covariance matrix is estimated using correction for degrees
of freedom (covariances are divided by N-M instead of dividing
by N).
-- ALGLIB --
Copyright 17.08.2009 by Bochkanov Sergey
*************************************************************************/
public static void lsfitresults(lsfitstate state,
ref int info,
ref double[] c,
lsfitreport rep)
{
int i = 0;
int j = 0;
int i_ = 0;
info = 0;
c = new double[0];
clearreport(rep);
info = state.repterminationtype;
rep.varidx = state.repvaridx;
if( info>0 )
{
c = new double[state.k];
for(i_=0; i_<=state.k-1;i_++)
{
c[i_] = state.c[i_];
}
rep.rmserror = state.reprmserror;
rep.wrmserror = state.repwrmserror;
rep.avgerror = state.repavgerror;
rep.avgrelerror = state.repavgrelerror;
//.........这里部分代码省略.........
public override void init()
{
s = new double[0];
bndl = new double[0];
bndu = new double[0];
taskx = new double[0,0];
tasky = new double[0];
taskw = new double[0];
x = new double[0];
c = new double[0];
g = new double[0];
h = new double[0,0];
wcur = new double[0];
tmp = new double[0];
tmpf = new double[0];
tmpjac = new double[0,0];
tmpjacw = new double[0,0];
invrep = new matinv.matinvreport();
rep = new lsfitreport();
optstate = new minlm.minlmstate();
optrep = new minlm.minlmreport();
rstate = new rcommstate();
}
请发表评论