• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C# sparse类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C#中sparse的典型用法代码示例。如果您正苦于以下问题:C# sparse类的具体用法?C# sparse怎么用?C# sparse使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



sparse类属于命名空间,在下文中一共展示了sparse类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C#代码示例。

示例1: Blah

 static extern int Blah(
     [MarshalAs(UnmanagedType.I4)]
     int i,
     string s,
     union u,
     sparse sp,
     [In, Out] string inout,
     StringBuilder sb,
     IntPtr ip,
     out bool ob,
     ref bytes bs);
开发者ID:darthwalsh,项目名称:dotNetBytes,代码行数:11,代码来源:PInvoke.cs


示例2: mlpinitpreprocessorsparse

        /*************************************************************************
        Internal subroutine.
        Initialization for preprocessor based on a sample.

        INPUT
            Network -   initialized neural network;
            XY      -   sample, given by sparse matrix;
            SSize   -   sample size.

        OUTPUT
            Network -   neural network with initialised preprocessor.

          -- ALGLIB --
             Copyright 26.07.2012 by Bochkanov Sergey
        *************************************************************************/
        public static void mlpinitpreprocessorsparse(multilayerperceptron network,
            sparse.sparsematrix xy,
            int ssize)
        {
            int jmax = 0;
            int nin = 0;
            int nout = 0;
            int wcount = 0;
            int ntotal = 0;
            int istart = 0;
            int offs = 0;
            int ntype = 0;
            double[] means = new double[0];
            double[] sigmas = new double[0];
            double s = 0;
            int i = 0;
            int j = 0;

            mlpproperties(network, ref nin, ref nout, ref wcount);
            ntotal = network.structinfo[3];
            istart = network.structinfo[5];
            
            //
            // Means/Sigmas
            //
            if( mlpissoftmax(network) )
            {
                jmax = nin-1;
            }
            else
            {
                jmax = nin+nout-1;
            }
            means = new double[jmax+1];
            sigmas = new double[jmax+1];
            for(i=0; i<=jmax; i++)
            {
                means[i] = 0;
                sigmas[i] = 0;
            }
            for(i=0; i<=ssize-1; i++)
            {
                sparse.sparsegetrow(xy, i, ref network.xyrow);
                for(j=0; j<=jmax; j++)
                {
                    means[j] = means[j]+network.xyrow[j];
                }
            }
            for(i=0; i<=jmax; i++)
            {
                means[i] = means[i]/ssize;
            }
            for(i=0; i<=ssize-1; i++)
            {
                sparse.sparsegetrow(xy, i, ref network.xyrow);
                for(j=0; j<=jmax; j++)
                {
                    sigmas[j] = sigmas[j]+math.sqr(network.xyrow[j]-means[j]);
                }
            }
            for(i=0; i<=jmax; i++)
            {
                sigmas[i] = Math.Sqrt(sigmas[i]/ssize);
            }
            
            //
            // Inputs
            //
            for(i=0; i<=nin-1; i++)
            {
                network.columnmeans[i] = means[i];
                network.columnsigmas[i] = sigmas[i];
                if( (double)(network.columnsigmas[i])==(double)(0) )
                {
                    network.columnsigmas[i] = 1;
                }
            }
            
            //
            // Outputs
            //
            if( !mlpissoftmax(network) )
            {
                for(i=0; i<=nout-1; i++)
                {
//.........这里部分代码省略.........
开发者ID:Kerbas-ad-astra,项目名称:MechJeb2,代码行数:101,代码来源:dataanalysis.cs


示例3: format

        /*************************************************************************
        Procedure for solution of A*x=b with sparse A.

        INPUT PARAMETERS:
            State   -   algorithm state
            A       -   sparse M*N matrix in the CRS format (you MUST contvert  it 
                        to CRS format  by  calling  SparseConvertToCRS()  function
                        BEFORE you pass it to this function).
            B       -   right part, array[M]

        RESULT:
            This function returns no result.
            You can get solution by calling LinCGResults()
            
        NOTE: this function uses lightweight preconditioning -  multiplication  by
              inverse of diag(A). If you want, you can turn preconditioning off by
              calling LinLSQRSetPrecUnit(). However, preconditioning cost is   low
              and preconditioner is very important for solution  of  badly  scaled
              problems.

          -- ALGLIB --
             Copyright 30.11.2011 by Bochkanov Sergey
        *************************************************************************/
        public static void linlsqrsolvesparse(linlsqrstate state,
            sparse.sparsematrix a,
            double[] b)
        {
            int n = 0;
            int i = 0;
            int j = 0;
            int t0 = 0;
            int t1 = 0;
            double v = 0;

            n = state.n;
            alglib.ap.assert(!state.running, "LinLSQRSolveSparse: you can not call this function when LinLSQRIteration is running");
            alglib.ap.assert(alglib.ap.len(b)>=state.m, "LinLSQRSolveSparse: Length(B)<M");
            alglib.ap.assert(apserv.isfinitevector(b, state.m), "LinLSQRSolveSparse: B contains infinite or NaN values");
            
            //
            // Allocate temporaries
            //
            apserv.rvectorsetlengthatleast(ref state.tmpd, n);
            apserv.rvectorsetlengthatleast(ref state.tmpx, n);
            
            //
            // Compute diagonal scaling matrix D
            //
            if( state.prectype==0 )
            {
                
                //
                // Default preconditioner - inverse of column norms
                //
                for(i=0; i<=n-1; i++)
                {
                    state.tmpd[i] = 0;
                }
                t0 = 0;
                t1 = 0;
                while( sparse.sparseenumerate(a, ref t0, ref t1, ref i, ref j, ref v) )
                {
                    state.tmpd[j] = state.tmpd[j]+math.sqr(v);
                }
                for(i=0; i<=n-1; i++)
                {
                    if( (double)(state.tmpd[i])>(double)(0) )
                    {
                        state.tmpd[i] = 1/Math.Sqrt(state.tmpd[i]);
                    }
                    else
                    {
                        state.tmpd[i] = 1;
                    }
                }
            }
            else
            {
                
                //
                // No diagonal scaling
                //
                for(i=0; i<=n-1; i++)
                {
                    state.tmpd[i] = 1;
                }
            }
            
            //
            // Solve.
            //
            // Instead of solving A*x=b we solve preconditioned system (A*D)*(inv(D)*x)=b.
            // Transformed A is not calculated explicitly, we just modify multiplication
            // by A or A'. After solution we modify State.RX so it will store untransformed
            // variables
            //
            linlsqrsetb(state, b);
            linlsqrrestart(state);
            while( linlsqriteration(state) )
            {
//.........这里部分代码省略.........
开发者ID:orlovk,项目名称:PtProject,代码行数:101,代码来源:solvers.cs


示例4: min

        /*************************************************************************
        This function runs QPBLEIC solver; it returns after optimization   process
        was completed. Following QP problem is solved:

            min(0.5*(x-x_origin)'*A*(x-x_origin)+b'*(x-x_origin))
            
        subject to boundary constraints.

        INPUT PARAMETERS:
            AC          -   for dense problems (AKind=0), A-term of CQM object
                            contains system matrix. Other terms are unspecified
                            and should not be referenced.
            SparseAC    -   for sparse problems (AKind=1
            AKind       -   sparse matrix format:
                            * 0 for dense matrix
                            * 1 for sparse matrix
            SparseUpper -   which triangle of SparseAC stores matrix  -  upper  or
                            lower one (for dense matrices this  parameter  is  not
                            actual).
            AbsASum     -   SUM(|A[i,j]|)
            AbsASum2    -   SUM(A[i,j]^2)
            BC          -   linear term, array[NC]
            BndLC       -   lower bound, array[NC]
            BndUC       -   upper bound, array[NC]
            SC          -   scale vector, array[NC]:
                            * I-th element contains scale of I-th variable,
                            * SC[I]>0
            XOriginC    -   origin term, array[NC]. Can be zero.
            NC          -   number of variables in the  original  formulation  (no
                            slack variables).
            CLEICC      -   linear equality/inequality constraints. Present version
                            of this function does NOT provide  publicly  available
                            support for linear constraints. This feature  will  be
                            introduced in the future versions of the function.
            NEC, NIC    -   number of equality/inequality constraints.
                            MUST BE ZERO IN THE CURRENT VERSION!!!
            Settings    -   QPBLEICSettings object initialized by one of the initialization
                            functions.
            SState      -   object which stores temporaries:
                            * if uninitialized object was passed, FirstCall parameter MUST
                              be set to True; object will be automatically initialized by the
                              function, and FirstCall will be set to False.
                            * if FirstCall=False, it is assumed that this parameter was already
                              initialized by previous call to this function with same
                              problem dimensions (variable count N).
            FirstCall   -   whether it is first call of this function for this specific
                            instance of SState, with this number of variables N specified.
            XS          -   initial point, array[NC]
            
            
        OUTPUT PARAMETERS:
            XS          -   last point
            FirstCall   -   uncondtionally set to False
            TerminationType-termination type:
                            *
                            *
                            *

          -- ALGLIB --
             Copyright 14.05.2011 by Bochkanov Sergey
        *************************************************************************/
        public static void qpbleicoptimize(cqmodels.convexquadraticmodel a,
            sparse.sparsematrix sparsea,
            int akind,
            bool sparseaupper,
            double absasum,
            double absasum2,
            double[] b,
            double[] bndl,
            double[] bndu,
            double[] s,
            double[] xorigin,
            int n,
            double[,] cleic,
            int nec,
            int nic,
            qpbleicsettings settings,
            qpbleicbuffers sstate,
            ref bool firstcall,
            ref double[] xs,
            ref int terminationtype)
        {
            int i = 0;
            double d2 = 0;
            double d1 = 0;
            double d0 = 0;
            double v = 0;
            double v0 = 0;
            double v1 = 0;
            double md = 0;
            double mx = 0;
            double mb = 0;
            int d1est = 0;
            int d2est = 0;
            int i_ = 0;

            terminationtype = 0;

            alglib.ap.assert(akind==0 || akind==1, "QPBLEICOptimize: unexpected AKind");
            sstate.repinneriterationscount = 0;
//.........这里部分代码省略.........
开发者ID:orlovk,项目名称:PtProject,代码行数:101,代码来源:optimization.cs


示例5: format

        /*************************************************************************
        Procedure for solution of A*x=b with sparse A.

        INPUT PARAMETERS:
            State   -   algorithm state
            A       -   sparse matrix in the CRS format (you MUST contvert  it  to 
                        CRS format by calling SparseConvertToCRS() function).
            IsUpper -   whether upper or lower triangle of A is used:
                        * IsUpper=True  => only upper triangle is used and lower
                                           triangle is not referenced at all 
                        * IsUpper=False => only lower triangle is used and upper
                                           triangle is not referenced at all
            B       -   right part, array[N]

        RESULT:
            This function returns no result.
            You can get solution by calling LinCGResults()

          -- ALGLIB --
             Copyright 14.11.2011 by Bochkanov Sergey
        *************************************************************************/
        public static void lincgsolvesparse(lincgstate state,
            sparse.sparsematrix a,
            bool isupper,
            double[] b)
        {
            double vmv = 0;
            int i_ = 0;

            alglib.ap.assert(alglib.ap.len(b)>=state.n, "LinCGSetB: Length(B)<N");
            alglib.ap.assert(apserv.isfinitevector(b, state.n), "LinCGSetB: B contains infinite or NaN values!");
            lincgrestart(state);
            lincgsetb(state, b);
            while( lincgiteration(state) )
            {
                if( state.needmv )
                {
                    sparse.sparsesmv(a, isupper, state.x, ref state.mv);
                }
                if( state.needvmv )
                {
                    sparse.sparsesmv(a, isupper, state.x, ref state.mv);
                    vmv = 0.0;
                    for(i_=0; i_<=state.n-1;i_++)
                    {
                        vmv += state.x[i_]*state.mv[i_];
                    }
                    state.vmv = vmv;
                }
                if( state.needprec )
                {
                    for(i_=0; i_<=state.n-1;i_++)
                    {
                        state.pv[i_] = state.x[i_];
                    }
                }
            }
        }
开发者ID:lgatto,项目名称:proteowizard,代码行数:58,代码来源:solvers.cs


示例6: sparsematrix

 public sparsematrix(sparse.sparsematrix obj)
 {
     _innerobj = obj;
 }
开发者ID:tablee,项目名称:TabBox,代码行数:4,代码来源:linalg.cs


示例7: sparsebuffers

 public sparsebuffers(sparse.sparsebuffers obj)
 {
     _innerobj = obj;
 }
开发者ID:Kerbas-ad-astra,项目名称:MechJeb2,代码行数:4,代码来源:linalg.cs


示例8: SparseCholeskyX

        /*************************************************************************
        Sparse Cholesky decomposition for skyline matrixm using in-place algorithm
        without allocating additional storage.

        The algorithm computes Cholesky decomposition  of  a  symmetric  positive-
        definite sparse matrix. The result of an algorithm is a representation  of
        A as A=U^T*U or A=L*L^T

        This  function  is  a  more  efficient alternative to general, but  slower
        SparseCholeskyX(), because it does not  create  temporary  copies  of  the
        target. It performs factorization in-place, which gives  best  performance
        on low-profile matrices. Its drawback, however, is that it can not perform
        profile-reducing permutation of input matrix.

        INPUT PARAMETERS:
            A       -   sparse matrix in skyline storage (SKS) format.
            N       -   size of matrix A (can be smaller than actual size of A)
            IsUpper -   if IsUpper=True, then factorization is performed on  upper
                        triangle. Another triangle is ignored (it may contant some
                        data, but it is not changed).
            

        OUTPUT PARAMETERS:
            A       -   the result of factorization, stored in SKS. If IsUpper=True,
                        then the upper  triangle  contains  matrix  U,  such  that
                        A = U^T*U. Lower triangle is not changed.
                        Similarly, if IsUpper = False. In this case L is returned,
                        and we have A = L*(L^T).
                        Note that THIS function does not  perform  permutation  of
                        rows to reduce bandwidth.

        RESULT:
            If  the  matrix  is  positive-definite,  the  function  returns  True.
            Otherwise, the function returns False. Contents of A is not determined
            in such case.

        NOTE: for  performance  reasons  this  function  does NOT check that input
              matrix  includes  only  finite  values. It is your responsibility to
              make sure that there are no infinite or NAN values in the matrix.

          -- ALGLIB routine --
             16.01.2014
             Bochkanov Sergey
        *************************************************************************/
        public static bool sparsecholeskyskyline(sparse.sparsematrix a,
            int n,
            bool isupper)
        {
            bool result = new bool();
            int i = 0;
            int j = 0;
            int k = 0;
            int jnz = 0;
            int jnza = 0;
            int jnzl = 0;
            double v = 0;
            double vv = 0;
            double a12 = 0;
            int nready = 0;
            int nadd = 0;
            int banda = 0;
            int offsa = 0;
            int offsl = 0;

            alglib.ap.assert(n>=0, "SparseCholeskySkyline: N<0");
            alglib.ap.assert(sparse.sparsegetnrows(a)>=n, "SparseCholeskySkyline: rows(A)<N");
            alglib.ap.assert(sparse.sparsegetncols(a)>=n, "SparseCholeskySkyline: cols(A)<N");
            alglib.ap.assert(sparse.sparseissks(a), "SparseCholeskySkyline: A is not stored in SKS format");
            result = false;
            
            //
            // transpose if needed
            //
            if( isupper )
            {
                sparse.sparsetransposesks(a);
            }
            
            //
            // Perform Cholesky decomposition:
            // * we assume than leading NReady*NReady submatrix is done
            // * having Cholesky decomposition of NReady*NReady submatrix we
            //   obtain decomposition of larger (NReady+NAdd)*(NReady+NAdd) one.
            //
            // Here is algorithm. At the start we have
            //
            //     (      |   )
            //     (  L   |   )
            // S = (      |   )
            //     (----------)
            //     (  A   | B )
            //
            // with L being already computed Cholesky factor, A and B being
            // unprocessed parts of the matrix. Of course, L/A/B are stored
            // in SKS format.
            //
            // Then, we calculate A1:=(inv(L)*A')' and replace A with A1.
            // Then, we calculate B1:=B-A1*A1'     and replace B with B1
            //
            // Finally, we calculate small NAdd*NAdd Cholesky of B1 with
//.........这里部分代码省略.........
开发者ID:Kerbas-ad-astra,项目名称:MechJeb2,代码行数:101,代码来源:linalg.cs


示例9: _pexec_mlpgradbatchsparsesubset

 /*************************************************************************
 Single-threaded stub. HPC ALGLIB replaces it by multithreaded code.
 *************************************************************************/
 public static void _pexec_mlpgradbatchsparsesubset(multilayerperceptron network,
     sparse.sparsematrix xy,
     int setsize,
     int[] idx,
     int subsetsize,
     ref double e,
     ref double[] grad)
 {
     mlpgradbatchsparsesubset(network,xy,setsize,idx,subsetsize,ref e,ref grad);
 }
开发者ID:Kerbas-ad-astra,项目名称:MechJeb2,代码行数:13,代码来源:dataanalysis.cs


示例10: support

        /*************************************************************************
        Batch gradient calculation for a set of inputs/outputs  for  a  subset  of
        dataset given by set of indexes.


        FOR USERS OF COMMERCIAL EDITION:

          ! Commercial version of ALGLIB includes two  important  improvements  of
          ! this function:
          ! * multicore support (C++ and C# computational cores)
          ! * SSE support 
          !
          ! First improvement gives close-to-linear speedup on multicore  systems.
          ! Second improvement gives constant speedup (2-3x depending on your CPU)
          !
          ! In order to use multicore features you have to:
          ! * use commercial version of ALGLIB
          ! * call  this  function  with  "smp_"  prefix,  which  indicates  that
          !   multicore code will be used (for multicore support)
          !
          ! In order to use SSE features you have to:
          ! * use commercial version of ALGLIB on Intel processors
          ! * use C++ computational core
          !
          ! This note is given for users of commercial edition; if  you  use  GPL
          ! edition, you still will be able to call smp-version of this function,
          ! but all computations will be done serially.
          !
          ! We recommend you to carefully read ALGLIB Reference  Manual,  section
          ! called 'SMP support', before using parallel version of this function.


        INPUT PARAMETERS:
            Network -   network initialized with one of the network creation funcs
            XY      -   original dataset in sparse format; one sample = one row:
                        * MATRIX MUST BE STORED IN CRS FORMAT
                        * first NIn columns contain inputs,
                        * for regression problem, next NOut columns store
                          desired outputs.
                        * for classification problem, next column (just one!)
                          stores class number.
            SetSize -   real size of XY, SetSize>=0;
            Idx     -   subset of SubsetSize elements, array[SubsetSize]:
                        * Idx[I] stores row index in the original dataset which is
                          given by XY. Gradient is calculated with respect to rows
                          whose indexes are stored in Idx[].
                        * Idx[]  must store correct indexes; this function  throws
                          an  exception  in  case  incorrect index (less than 0 or
                          larger than rows(XY)) is given
                        * Idx[]  may  store  indexes  in  any  order and even with
                          repetitions.
            SubsetSize- number of elements in Idx[] array:
                        * positive value means that subset given by Idx[] is processed
                        * zero value results in zero gradient
                        * negative value means that full dataset is processed
            Grad      - possibly  preallocated array. If size of array is  smaller
                        than WCount, it will be reallocated. It is  recommended to
                        reuse  previously  allocated  array  to  reduce allocation
                        overhead.

        OUTPUT PARAMETERS:
            E       -   error function, SUM(sqr(y[i]-desiredy[i])/2,i)
            Grad    -   gradient  of  E  with  respect   to  weights  of  network,
                        array[WCount]

        NOTE: when  SubsetSize<0 is used full dataset by call MLPGradBatchSparse
              function.
            
          -- ALGLIB --
             Copyright 26.07.2012 by Bochkanov Sergey
        *************************************************************************/
        public static void mlpgradbatchsparsesubset(multilayerperceptron network,
            sparse.sparsematrix xy,
            int setsize,
            int[] idx,
            int subsetsize,
            ref double e,
            ref double[] grad)
        {
            int i = 0;
            int nin = 0;
            int nout = 0;
            int wcount = 0;
            int npoints = 0;
            int subset0 = 0;
            int subset1 = 0;
            int subsettype = 0;
            smlpgrad sgrad = null;

            e = 0;

            alglib.ap.assert(setsize>=0, "MLPGradBatchSparseSubset: SetSize<0");
            alglib.ap.assert(subsetsize<=alglib.ap.len(idx), "MLPGradBatchSparseSubset: SubsetSize>Length(Idx)");
            alglib.ap.assert(sparse.sparseiscrs(xy), "MLPGradBatchSparseSubset: sparse matrix XY must be in CRS format.");
            npoints = setsize;
            if( subsetsize<0 )
            {
                subset0 = 0;
                subset1 = setsize;
                subsettype = 0;
//.........这里部分代码省略.........
开发者ID:Kerbas-ad-astra,项目名称:MechJeb2,代码行数:101,代码来源:dataanalysis.cs


示例11: _pexec_mlpgradbatchsparse

 /*************************************************************************
 Single-threaded stub. HPC ALGLIB replaces it by multithreaded code.
 *************************************************************************/
 public static void _pexec_mlpgradbatchsparse(multilayerperceptron network,
     sparse.sparsematrix xy,
     int ssize,
     ref double e,
     ref double[] grad)
 {
     mlpgradbatchsparse(network,xy,ssize,ref e,ref grad);
 }
开发者ID:Kerbas-ad-astra,项目名称:MechJeb2,代码行数:11,代码来源:dataanalysis.cs


示例12: _pexec_mlpavgrelerrorsparse

 /*************************************************************************
 Single-threaded stub. HPC ALGLIB replaces it by multithreaded code.
 *************************************************************************/
 public static double _pexec_mlpavgrelerrorsparse(multilayerperceptron network,
     sparse.sparsematrix xy,
     int npoints)
 {
     return mlpavgrelerrorsparse(network,xy,npoints);
 }
开发者ID:Kerbas-ad-astra,项目名称:MechJeb2,代码行数:9,代码来源:dataanalysis.cs


示例13: index

        /*************************************************************************
        Internal subroutine.
        Initialization for preprocessor based on a subsample.

        INPUT PARAMETERS:
            Network -   network initialized with one of the network creation funcs
            XY      -   original dataset, given by sparse matrix;
                        one sample = one row;
                        first NIn columns contain inputs,
                        next NOut columns - desired outputs.
            SetSize -   real size of XY, SetSize>=0;
            Idx     -   subset of SubsetSize elements, array[SubsetSize]:
                        * Idx[I] stores row index in the original dataset which is
                          given by XY. Gradient is calculated with respect to rows
                          whose indexes are stored in Idx[].
                        * Idx[]  must store correct indexes; this function  throws
                          an  exception  in  case  incorrect index (less than 0 or
                          larger than rows(XY)) is given
                        * Idx[]  may  store  indexes  in  any  order and even with
                          repetitions.
            SubsetSize- number of elements in Idx[] array.
            
        OUTPUT:
            Network -   neural network with initialised preprocessor.
            
        NOTE: when SubsetSize<0 is used full dataset by call
              MLPInitPreprocessorSparse function.
              
          -- ALGLIB --
             Copyright 26.07.2012 by Bochkanov Sergey
        *************************************************************************/
        public static void mlpinitpreprocessorsparsesubset(multilayerperceptron network,
            sparse.sparsematrix xy,
            int setsize,
            int[] idx,
            int subsetsize)
        {
            int jmax = 0;
            int nin = 0;
            int nout = 0;
            int wcount = 0;
            int ntotal = 0;
            int istart = 0;
            int offs = 0;
            int ntype = 0;
            double[] means = new double[0];
            double[] sigmas = new double[0];
            double s = 0;
            int npoints = 0;
            int i = 0;
            int j = 0;

            alglib.ap.assert(setsize>=0, "MLPInitPreprocessorSparseSubset: SetSize<0");
            if( subsetsize<0 )
            {
                mlpinitpreprocessorsparse(network, xy, setsize);
                return;
            }
            alglib.ap.assert(subsetsize<=alglib.ap.len(idx), "MLPInitPreprocessorSparseSubset: SubsetSize>Length(Idx)");
            npoints = setsize;
            for(i=0; i<=subsetsize-1; i++)
            {
                alglib.ap.assert(idx[i]>=0, "MLPInitPreprocessorSparseSubset: incorrect index of XY row(Idx[I]<0)");
                alglib.ap.assert(idx[i]<=npoints-1, "MLPInitPreprocessorSparseSubset: incorrect index of XY row(Idx[I]>Rows(XY)-1)");
            }
            mlpproperties(network, ref nin, ref nout, ref wcount);
            ntotal = network.structinfo[3];
            istart = network.structinfo[5];
            
            //
            // Means/Sigmas
            //
            if( mlpissoftmax(network) )
            {
                jmax = nin-1;
            }
            else
            {
                jmax = nin+nout-1;
            }
            means = new double[jmax+1];
            sigmas = new double[jmax+1];
            for(i=0; i<=jmax; i++)
            {
                means[i] = 0;
                sigmas[i] = 0;
            }
            for(i=0; i<=subsetsize-1; i++)
            {
                sparse.sparsegetrow(xy, idx[i], ref network.xyrow);
                for(j=0; j<=jmax; j++)
                {
                    means[j] = means[j]+network.xyrow[j];
                }
            }
            for(i=0; i<=jmax; i++)
            {
                means[i] = means[i]/subsetsize;
            }
            for(i=0; i<=subsetsize-1; i++)
//.........这里部分代码省略.........
开发者ID:Kerbas-ad-astra,项目名称:MechJeb2,代码行数:101,代码来源:dataanalysis.cs


示例14: mlpeallerrorssparse

        /*************************************************************************
        Calculation of all types of errors on dataset given by sparse matrix

          -- ALGLIB --
             Copyright 10.09.2012 by Bochkanov Sergey
        *************************************************************************/
        public static void mlpeallerrorssparse(mlpensemble ensemble,
            sparse.sparsematrix xy,
            int npoints,
            ref double relcls,
            ref double avgce,
            ref double rms,
            ref double avg,
            ref double avgrel)
        {
            int i = 0;
            double[] buf = new double[0];
            double[] workx = new double[0];
            double[] y = new double[0];
            double[] dy = new double[0];
            int nin = 0;
            int nout = 0;
            int i_ = 0;
            int i1_ = 0;

            relcls = 0;
            avgce = 0;
            rms = 0;
            avg = 0;
            avgrel = 0;

            nin = mlpbase.mlpgetinputscount(ensemble.network);
            nout = mlpbase.mlpgetoutputscount(ensemble.network);
            if( mlpbase.mlpissoftmax(ensemble.network) )
            {
                dy = new double[1];
                bdss.dserrallocate(nout, ref buf);
            }
            else
            {
                dy = new double[nout];
                bdss.dserrallocate(-nout, ref buf);
            }
            for(i=0; i<=npoints-1; i++)
            {
                sparse.sparsegetrow(xy, i, ref workx);
                mlpeprocess(ensemble, workx, ref y);
                if( mlpbase.mlpissoftmax(ensemble.network) )
                {
                    dy[0] = workx[nin];
                }
                else
                {
                    i1_ = (nin) - (0);
                    for(i_=0; i_<=nout-1;i_++)
                    {
                        dy[i_] = workx[i_+i1_];
                    }
                }
                bdss.dserraccumulate(ref buf, y, dy);
            }
            bdss.dserrfinish(ref buf);
            relcls = buf[0];
            avgce = buf[1];
            rms = buf[2];
            avg = buf[3];
            avgrel = buf[4];
        }
开发者ID:Kerbas-ad-astra,项目名称:MechJeb2,代码行数:68,代码来源:dataanalysis.cs


示例15: user

        /*************************************************************************
        This function sets "current dataset" of the trainer object to  one  passed
        by user (sparse matrix is used to store dataset).

        INPUT PARAMETERS:
            S           -   trainer object
            XY          -   training  set,  see  below  for  information  on   the
                            training set format. This function checks  correctness
                            of  the  dataset  (no  NANs/INFs,  class  numbers  are
                            correct) and throws exception when  incorrect  dataset
                            is passed. Any  sparse  storage  format  can be  used:
                            Hash-table, CRS...
            NPoints     -   points count, >=0

        DATASET FORMAT:

        This  function  uses  two  different  dataset formats - one for regression
        networks, another one for classification networks.

        For regression networks with NIn inputs and NOut outputs following dataset
        format is used:
        * dataset is given by NPoints*(NIn+NOut) matrix
        * each row corresponds to one example
        * first NIn columns are inputs, next NOut columns are outputs

        For classification networks with NIn inputs and NClasses clases  following
        datasetformat is used:
        * dataset is given by NPoints*(NIn+1) matrix
        * each row corresponds to one example
        * first NIn columns are inputs, last column stores class number (from 0 to
          NClasses-1).
          
          -- ALGLIB --
             Copyright 23.07.2012 by Bochkanov Sergey
        *************************************************************************/
        public static void mlpsetsparsedataset(mlptrainer s,
            sparse.sparsematrix xy,
            int npoints)
        {
            double v = 0;
            int t0 = 0;
            int t1 = 0;
            int i = 0;
            int j = 0;

            
            //
            // Check correctness of the data
            //
            alglib.ap.assert(s.nin>0, "MLPSetSparseDataset: possible parameter S is not initialized or spoiled(S.NIn<=0).");
            alglib.ap.assert(npoints>=0, "MLPSetSparseDataset: NPoint<0");
            alglib.ap.assert(npoints<=sparse.sparsegetnrows(xy), "MLPSetSparseDataset: invalid size of sparse matrix XY(NPoint more then rows of matrix XY)");
            if( npoints>0 )
            {
                t0 = 0;
                t1 = 0;
                if( s.rcpar )
                {
                    alglib.ap.assert(s.nout>=1, "MLPSetSparseDataset: possible parameter S is not initialized or is spoiled(NOut<1 for regression).");
                    alglib.ap.assert(s.nin+s.nout<=sparse.sparsegetncols(xy), "MLPSetSparseDataset: invalid size of sparse matrix XY(too few columns in sparse matrix XY).");
                    while( sparse.sparseenumerate(xy, ref t0, ref t1, ref i, ref j, ref v) )
                    {
                        if( i<npoints && j<s.nin+s.nout )
                        {
                            alglib.ap.assert(math.isfinite(v), "MLPSetSparseDataset: sparse matrix XY contains Infinite or NaN.");
                        }
                    }
                }
                else
                {
                    alglib.ap.assert(s.nout>=2, "MLPSetSparseDataset: possible parameter S is not initialized or is spoiled(NClasses<2 for classifier).");
                    alglib.ap.assert(s.nin+1<=sparse.sparsegetncols(xy), "MLPSetSparseDataset: invalid size of sparse matrix XY(too few columns in sparse matrix XY).");
                    while( sparse.sparseenumerate(xy, ref t0, ref t1, ref i, ref j, ref v) )
                    {
                        if( i<npoints && j<=s.nin )
                        {
                            if( j!=s.nin )
                            {
                                alglib.ap.assert(math.isfinite(v), "MLPSetSparseDataset: sparse matrix XY contains Infinite or NaN.");
                            }
                            else
                            {
                                alglib.ap.assert((math.isfinite(v) && (int)Math.Round(v)>=0) && (int)Math.Round(v)<s.nout, "MLPSetSparseDataset: invalid sparse matrix XY(in classifier used nonexistent class number: either XY[.,NIn]<0 or XY[.,NIn]>=NClasses).");
                            }
                        }
                    }
                }
            }
            
            //
            // Set dataset
            //
            s.datatype = 1;
            s.npoints = npoints;
            sparse.sparsecopytocrs(xy, s.sparsexy);
        }
开发者ID:Kerbas-ad-astra,项目名称:MechJeb2,代码行数:96,代码来源:dataanalysis.cs


示例16: mlpgradbatchx

        /*************************************************************************
        Internal function which actually calculates batch gradient for a subset or
        full dataset, which can be represented in different formats.

        THIS FUNCTION IS NOT INTENDED TO BE USED BY ALGLIB USERS!

          -- ALGLIB --
             Copyright 26.07.2012 by Bochkanov Sergey
        *************************************************************************/
        public static void mlpgradbatchx(multilayerperceptron network,
            double[,] densexy,
            sparse.sparsematrix sparsexy,
            int datasetsize,
            int datasettype,
            int[] idx,
            int subset0,
            int subset1,
            int subsettype,
            alglib.smp.shared_pool buf,
            alglib.smp.shared_pool gradbuf)
        {
            int nin = 0;
            int nout = 0;
            int wcount = 0;
            int rowsize = 0;
            int srcidx = 0;
            int cstart = 0;
            int csize = 0;
            int j = 0;
            double problemcost = 0;
            hpccores.mlpbuffers buf2 = null;
            int len0 = 0;
            int len1 = 0;
            hpccores.mlpbuffers pbuf = null;
            smlpgrad sgrad = null;
            int i_ = 0;

            alglib.ap.assert(datasetsize>=0, "MLPGradBatchX: SetSize<0");
            alglib.ap.assert(datasettype==0 || datasettype==1, "MLPGradBatchX: DatasetType is incorrect");
            alglib.ap.assert(subsettype==0 || subsettype==1, "MLPGradBatchX: SubsetType is incorrect");
            
            //
            // Determine network and dataset properties
            //
            mlpproperties(network, ref nin, ref nout, ref wcount);
            if( mlpissoftmax(network) )
            {
                rowsize = nin+1;
            }
            else
            {
                rowsize = nin+nout;
            }
            
            //
            // Split problem.
            //
            // Splitting problem allows us to reduce  effect  of  single-precision
            // arithmetics (SSE-optimized version of MLPChunkedGradient uses single
            // precision  internally, but converts them to  double precision after
            // results are exported from HPC buffer to network). Small batches are
            // calculated in single precision, results are  aggregated  in  double
            // precision, and it allows us to avoid accumulation  of  errors  when
            // we process very large batches (tens of thousands of items).
            //
            // NOTE: it is important to use real arithmetics for ProblemCost
            //       because ProblemCost may be larger than MAXINT.
            //
            problemcost = subset1-subset0;
            problemcost = problemcost*wcount;
            if( subset1-subset0>=2*microbatchsize && (double)(problemcost)>(double)(gradbasecasecost) )
            {
                apserv.splitlength(subset1-subset0, microbatchsize, ref len0, ref len1);
                mlpgradbatchx(network, densexy, sparsexy, datasetsize, datasettype, idx, subset0, subset0+len0, subsettype, buf, gradbuf);
                mlpgradbatchx(network, densexy, sparsexy, datasetsize, datasettype, idx, subset0+len0, subset1, subsettype, buf, gradbuf);
                return;
            }
            
            //
            // Chunked processing
            //
            alglib.smp.ae_shared_pool_retrieve(gradbuf, ref sgrad);
            alglib.smp.ae_shared_pool_retrieve(buf, ref pbuf);
            hpccores.hpcpreparechunkedgradient(network.weights, wcount, mlpntotal(network), nin, nout, pbuf);
            cstart = subset0;
            while( cstart<subset1 )
            {
                
                //
                // Determine size of current chunk and copy it to PBuf.XY
                //
                csize = Math.Min(subset1, cstart+pbuf.chunksize)-cstart;
                for(j=0; j<=csize-1; j++)
                {
                    srcidx = -1;
                    if( subsettype==0 )
                    {
                        srcidx = cstart+j;
                    }
                    if( subsettype==1 )
//.........这里部分代码省略.........
开发者ID:Kerbas-ad-astra,项目名称:MechJeb2,代码行数:101,代码来源:dataanalysis.cs


示例17: A

该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C# sparsematrix类代码示例发布时间:2022-05-24
下一篇:
C# software类代码示例发布时间:2022-05-24
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap