• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python minimize.minimize函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中minimize.minimize函数的典型用法代码示例。如果您正苦于以下问题:Python minimize函数的具体用法?Python minimize怎么用?Python minimize使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了minimize函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: step

 def step(self,*args):
     from minimize import minimize
     updateparams(self.model, minimize(\
                  self.model.params.copy(),self.cost,self.grad,\
                  args=args,maxnumfuneval=self.maxfuneval,
                  verbose=False)[0].copy())
     Trainer.step(self,*args)
开发者ID:JohnPaton,项目名称:Master-Thesis,代码行数:7,代码来源:train.py


示例2: minimize

def minimize(text):
    try:
        import jsmin
        return jsmin.jsmin(text)
    except Exception, e:
        import minimize
        return minimize.minimize(text)
开发者ID:psycrow117,项目名称:OpenLayerer,代码行数:7,代码来源:build.py


示例3: fit_nce

    def fit_nce(self, X, k=1, mu_noise=None, L_noise=None,
                mu0=None, L0=None, c0=None, method='minimize',
                maxnumlinesearch=None, maxnumfuneval=None, verbose=False):
        _class = self.__class__
        D, Td = X.shape
        self._init_params(D, mu_noise, L_noise, mu0, L0, c0)

        noise = self._params_noise
        Y = mvn.rvs(noise.mu, noise.L, k * Td).T

        maxnumlinesearch = maxnumlinesearch or DEFAULT_MAXNUMLINESEARCH
        obj = lambda u: _class.J(X, Y, noise.mu, noise.L, *vec_to_params(u))
        grad = lambda u: params_to_vec(
            *_class.dJ(X, Y, noise.mu, noise.L, *vec_to_params(u)))

        t0 = params_to_vec(*self._params_nce)
        if method == 'minimize':
            t_star = minimize(t0, obj, grad,
                              maxnumlinesearch=maxnumlinesearch,
                              maxnumfuneval=maxnumfuneval, verbose=verbose)[0]
        else:
            t_star = sp_minimize(obj, t0, method='BFGS', jac=grad,
                                 options={'disp': verbose,
                                          'maxiter': maxnumlinesearch}).x
        self._params_nce = GaussParams(*vec_to_params(t_star))
        return (self._params_nce, Y)
开发者ID:mcobzarenco,项目名称:nce-models,代码行数:26,代码来源:ncegauss.py


示例4: Module

def Module(name, filename, munge_globals=True):
    with open(filename, "rb" if p.PY2 else "r") as f:
        code = f.read()
    if args.minimize:
        # in modules only locals are worth optimizing
        code = minimize.minimize(code, True, args.obfuscate and munge_globals, args.obfuscate, args.obfuscate)
    return p.Module(name, code)
开发者ID:AndrewSkat,项目名称:unrpyc,代码行数:7,代码来源:compile.py


示例5: _fit_with_minimize

 def _fit_with_minimize(self, learning_rate=0.1, weight_decay=0, momentum=0, verbose = True, max_lr_iter = 5, isnorm = True):
     big_weight = weight_extend(self)
     big_weight, _,_ = minimize.minimize(big_weight, helper_func_eval, (self, isnorm), maxnumlinesearch=3, verbose = False)
     weight_compress(big_weight, self)
     if verbose:
         self.feed_forward()
         return self.empirical_error()
开发者ID:umutekmekci,项目名称:deepNN,代码行数:7,代码来源:NeuralNetwork.py


示例6: trainNN

def trainNN(inputSize, hid1Size, hid2Size, numClasses, lambda_, inputData, labels, n_iterations=100, displ=True):
    if displ:
       sel = np.random.permutation(inputData.shape[1])
       sel = sel[0:100]
       rbm.displayData(inputData[:, sel].T)
    T1 = debugInitializeWeights(hid1Size, inputSize)
    T2 = debugInitializeWeights(hid2Size, hid1Size)
    T3 = debugInitializeWeights(numClasses, hid2Size)
    b1 = np.zeros((hid1Size, 1))
    b2 = np.zeros((hid2Size, 1))
    b3 = np.zeros((numClasses, 1))
    T = np.concatenate((T1.reshape(len(T1.flatten(1)), 1), 
                        T2.reshape(len(T2.flatten(1)), 1), 
                        T3.reshape(len(T3.flatten(1)), 1),
                        b1, b2, b3))

    NNCost = lambda p: CostFunction(p, inputSize, hid1Size, hid2Size, numClasses, inputData, labels, lambda_)
    T, cost, iteration = minimize.minimize(NNCost, T, n_iterations)

    T1 = T[0:(hid1Size*inputSize)].reshape(hid1Size,inputSize)
    T2 = T[(hid1Size*inputSize):(hid1Size*inputSize)+(hid2Size*hid1Size)].reshape(hid2Size,hid1Size)
    T3 = T[(hid1Size*inputSize)+(hid2Size*hid1Size):(hid1Size*inputSize)+(hid2Size*hid1Size)+(
         hid2Size*numClasses)].reshape(numClasses,hid2Size)

    pred = predict(T1, T2, T3, inputData)
    return pred
开发者ID:andfoy,项目名称:NNLib,代码行数:26,代码来源:NNLib.py


示例7: process_data

def process_data(inputs, values): #Funcion que ejecuta la red neuronal como tal
    _beta = 2 #penalidad de la dispersión de datos, limite de dispersion del modelo
    _lambda = 1e-4 #limita la variación de los pesos o weight decay
    _epsilon = 0.1 #evita tener valores propios en la matriz iguales a cero
    _sparsityParam = 0.6 #la activación promedio deseada en cada neurona, entre 0 y 1
    num_iter = 5000 #número máximo de iteraciones

    inputSize = inputs.shape[0] #cantidad de variables de entrada, 6 en este caso
    m = inputs.shape[1]#cantidad de casos de entrenamiento
    hiddenSize = 180 #cantidad de neuronas ocultas, ocultas porque no se sabe bien que hacen
    outputSize = 1 #las dimensiones de salida, en este caso, 1, porque es un problema de regresión

    theta = initializeParameters(outputSize, hiddenSize, inputSize) #inicializa los pesos y los sesgos de la red
    #y retorna un vector de dimension hidden*input + hidden*output + hidden + output
    inputs, meanInput, ZCAWhite = preProcess(inputs, _epsilon)# inicialización de los parámetros
    #retorna números aleatorios como una primera aproximacion
    costF = lambda p: cost.sparseLinearNNCost(p, inputSize, hiddenSize, outputSize, _lambda, _sparsityParam, _beta, inputs, values) #define la función de costo, la cual recibe por parámetro al vector de parámetros theta

    optTheta,costV,i = minimize.minimize(costF,theta,maxnumlinesearch=num_iter)
    pred = cost.predict(inputs, optTheta, inputSize, hiddenSize, outputSize)

    diff = np.linalg.norm(pred-values)/np.linalg.norm(pred+values) #peso de los parametros

    print "RMSE: %g" % (diff)
    

    np.savez('parameters.npz', optTheta = optTheta, meanInput = meanInput, ZCAWhite = ZCAWhite)
开发者ID:valentinaqf94,项目名称:CM20151_HW8_ValentinaQuiroga,代码行数:27,代码来源:linear.py


示例8: optimize_gp_with_minimize

def optimize_gp_with_minimize( gp, params ):
  objective_function = progapy.gp.gp_neglogposterior_using_free_params
  grad_function      = progapy.gp.gp_neglogposterior_grad_wrt_free_params
  
  best_p, v, t = minimize( gp.get_free_params(), \
                          objective_function, \
                          grad_function, \
                          [gp], \
                          maxnumlinesearch=params["maxnumlinesearch"] \
                         )
  print best_p
  gp.set_free_params( best_p )
开发者ID:tedmeeds,项目名称:progapy,代码行数:12,代码来源:optimize.py


示例9: minimizeLayer3

    def minimizeLayer3(self, inputData, targets, max_iter):
        layer2out = self.recognize012(inputData)

        #### Flatten all of our parameters into a 1-D array
        (VV, Dim) = multiFlatten(( self.W[3], self.hB[3] ))

        (X, fX, iters) = cg.minimize(VV, backprop_only3, (Dim, layer2out, targets), max_iter)

        #### Un-Flatten all of our parameters from the 1-D array
        matrices = multiUnFlatten(X, Dim)
        self.W[3]  = matrices[0]
        self.hB[3] = matrices[1]
开发者ID:Wizcorp,项目名称:Eruditio,代码行数:12,代码来源:NeuralNetwork.py


示例10: train_cg

    def train_cg(self, features, labels, weightcost, maxnumlinesearch=numpy.inf, verbose=False):
        """Train the model using conjugate gradients.
  
           Like train() but faster. Uses minimize.py for the optimization. 
        """

        from minimize import minimize
        p, g, numlinesearches = minimize(self.params.copy(), 
                                         self.f, 
                                         self.g, 
                                         (features, labels, weightcost), maxnumlinesearch, verbose=verbose)
        self.updateparams(p)
        return numlinesearches
开发者ID:LeonBai,项目名称:lisa_emotiw-1,代码行数:13,代码来源:logreg.py


示例11: learn

def learn(shape_theta, shape_x, y, r, reg_lambda, n_iter):
    num_movies = y.shape[0]
    num_users = y.shape[1]

    # Normalize Ratings
    y_mean = (y.sum(axis=1)/r.sum(axis=1)).reshape((-1, 1))
    y = y - y_mean.dot(np.ones((1, num_users)))

    param_0 = np.random.randn(np.product(shape_theta) + np.product(shape_x))

    # optimize
    opt, cost, i = minimize(lambda dna: cost_function(dna, shape_theta, shape_x, y, r, reg_lambda),
                            param_0,
                            n_iter)

    theta, x = fold(opt, shape_theta, shape_x)
    return theta, x, y_mean
开发者ID:Seratna,项目名称:Machine-Learning,代码行数:17,代码来源:collaborative_filtering.py


示例12: minimizeAllLayers

    def minimizeAllLayers(self, inputData, targets, max_iter):
        #### Flatten all of our parameters into a 1-D array
        (VV, Dim) = multiFlatten((  self.W[0], self.hB[0],
                                    self.W[1], self.hB[1],
                                    self.W[2], self.hB[2],
                                    self.W[3], self.hB[3]  ))

        (X, fX, iters) = cg.minimize(VV, backprop, (Dim, inputData, targets), max_iter)

        #### Un-Flatten all of our parameters from the 1-D array
        matrices = multiUnFlatten(X, Dim)
        self.W[0]  = matrices[0]
        self.hB[0] = matrices[1]
        self.W[1]  = matrices[2]
        self.hB[1] = matrices[3]
        self.W[2]  = matrices[4]
        self.hB[2] = matrices[5]
        self.W[3]  = matrices[6]
        self.hB[3] = matrices[7]
开发者ID:Wizcorp,项目名称:Eruditio,代码行数:19,代码来源:NeuralNetwork.py


示例13: train

    def train(self, x, y, reg_lambda, n_iter):
        """
        ues optimization algorithm to learn a good set of parameters from the training data x and answer y
        """
        # initiate gradient and gradient entries
        grad = np.zeros_like(self.dna)
        for layer in self.layers:
            # for each layer, set the entry of gradient, through which the gradient will be updated
            layer.grad = grad[layer.pointer: layer.pointer+layer.theta.size].reshape(layer.theta.shape)

        # optimize
        opt, cost, i = minimize(lambda dna: (self.learn(dna, x, y, reg_lambda), np.array(grad)), self.dna, n_iter)
        # TODO optimize.fmin_cg implementation
        # opt = optimize.fmin_cg(f=lambda dna: self.learn(dna, x, y, reg_lambda),  # cost function
        #                        x0=self.dna,  # initial set of parameters
        #                        fprime=lambda t: (np.array(grad),)[0],  # gradient
        #                        maxiter=n_iter)  # number of iteration

        # update dna
        self.dna[:] = opt
开发者ID:Seratna,项目名称:Machine-Learning,代码行数:20,代码来源:bp_network.py


示例14: manifold_traversal

def manifold_traversal(F,N,M,weights,max_iter=5,rbf_var=1e4,verbose=True,checkgrad=True,checkrbf=True):
  # returns two arrays, xpr and r
  #   xpr is optimized x+r
  #   r is optimized r
  # multiply by F to get latent space vector
  if verbose:
    print('manifold_traversal()')
    print('F',F.shape,F.dtype,F.min(),F.max())
    print('N',N)
    print('M',M)
    print('weights',weights)

  xpr_result=[]
  r_result=[]
  r=np.zeros(len(F))
  x=np.zeros(len(F))
  FFT=F.dot(F.T) # K x K
  x[-1]=1
  for weight in weights:

    if checkgrad:
      def f(*args):
        return witness_fn2(*args)[0]
      def g(*args):
        return witness_fn2(*args)[1]
      print('Checking gradient ...')
      err=scipy.optimize.check_grad(f,g,r,*(x,FFT,N,M,rbf_var,weight,False,True))
      print('gradient error',err)
      assert err<1e-5

    r_opt,loss_opt,iter_opt=minimize.minimize(r,witness_fn2,(x,FFT,N,M,rbf_var,weight,verbose,checkrbf),maxnumlinesearch=50,maxnumfuneval=None,red=1.0,verbose=True)
    if verbose:
      print('r_opt',r_opt.shape,r_opt.dtype,r_opt.min(),r_opt.max(),np.linalg.norm(r_opt))
      print('r_opt values',r_opt[:5],'...',r_opt[N:N+5],'...',r_opt[-1])
    xpr_result.append(x+r_opt)
    r_result.append(r_opt)
    r=r_opt
  return np.asarray(xpr_result),np.asarray(r_result)
开发者ID:awg66,项目名称:deepmanifold,代码行数:38,代码来源:matchmmd.py


示例15: set_params

    set_params(model_ft.models_stack[-1], tmp)
    return result

fun_grad = theano.function(
    [model_ft.varin, model_ft.models_stack[-1].vartruth],
    T.grad(model_ft.models_stack[-1].cost() + model_ft.models_stack[-1].weightdecay(weightdecay),
           model_ft.models_stack[-1].params)
)
def return_grad(test_params, input_x, truth_y):
    tmp = get_params(model_ft.models_stack[-1])
    set_params(model_ft.models_stack[-1], test_params)
    result = numpy.concatenate([numpy.array(i).flatten() for i in fun_grad(input_x, truth_y)])
    set_params(model_ft.models_stack[-1], tmp)
    return result
p, g, numlinesearches = minimize(
    get_params(model_ft.models_stack[-1]), return_cost, return_grad,
    (train_x.get_value(), train_y.get_value()), logreg_epc, verbose=False
)
set_params(model_ft.models_stack[-1], p)
save_params(model_ft, 'ZLIN_4000_1000_4000_1000_4000_1000_4000_10_normhid_nolinb_cae1_dropout.npy')
print "***error rate: train: %f, test: %f" % (
    train_set_error_rate(), test_set_error_rate()
)

#############
# FINE-TUNE #
#############

"""
print "\n\n... fine-tuning the whole network"
truth = T.lmatrix('truth')
trainer = GraddescentMinibatch(
开发者ID:hantek,项目名称:zlinnet,代码行数:32,代码来源:expr_cifar10_ZLIN_normhid_nolinb_dropout.py


示例16: len

            configFilename = config_file + ".cfg"

    if output_file:
        outputFilename = output_file

    print "Merging libraries."
    if use_compressor == "closure":
        sourceFiles = mergejs.getNames(sourceDirectory, configFilename)
    else:
        merged = mergejs.run(sourceDirectory, None, configFilename)

    print "Compressing using %s" % use_compressor
    if use_compressor == "jsmin":
        minimized = jsmin.jsmin(merged)
    elif use_compressor == "minimize":
        minimized = minimize.minimize(merged)
    elif use_compressor == "closure_ws":
        if len(merged) > 1000000: # The maximum file size for this web service is 1000 KB.
            print "\nPre-compressing using jsmin"
            merged = jsmin.jsmin(merged)
        print "\nIs being compressed using Closure Compiler Service."
        try:
            minimized = closure_ws.minimize(merged)
        except Exception, E:
            print "\nAbnormal termination."
            sys.exit("ERROR: Closure Compilation using Web service failed!\n%s" % E)
        if len(minimized) <= 2:
            print "\nAbnormal termination due to compilation errors."
            sys.exit("ERROR: Closure Compilation using Web service failed!")
        else:
            print "Closure Compilation using Web service has completed successfully."
开发者ID:ACassells,项目名称:openlayers,代码行数:31,代码来源:build.py


示例17: manifold_traversal2

def manifold_traversal2(FFT,N,M,L,weights,max_iter=5,rbf_var=1e4,verbose=False,checkgrad=True,checkrbf=True,maxnumlinesearch=25,initialize_KQ=None):
  # returns two arrays, xpr and r
  #   xpr is optimized x+r
  #   r is optimized r
  # multiply by F to get latent space vector
  if verbose:
    print('manifold_traversal2()')
    print('FFT',FFT.shape,FFT.dtype,FFT.min(),FFT.max())
    print('N',N)
    print('M',M)
    print('L',L)
    print('weights',weights)

  #FFT=F.dot(F.T) # K x K
  xpr_result=[]
  r_result=[]
  r=np.zeros(len(FFT))
  x=np.zeros(len(FFT))
  x[-1]=1
  K=N+M+L+1
  P=np.eye(N,K)
  Q=np.concatenate([np.zeros((M,N)),np.eye(M,M+L+1)],axis=1)
  BP=FFT[:,:N] # FFT.dot(P.T) # K x N
  BQ=FFT[:,N:N+M] # FFT.dot(Q.T) # K x M
  CP=np.array([FFT[i,i] for i in range(N)]) # np.array([P[i].dot(FFT).dot(P[i].T) for i in range(N)])
  CQ=np.array([FFT[N+i,N+i] for i in range(M)]) # np.array([Q[i].dot(FFT).dot(Q[i].T) for i in range(M)])

  if not initialize_KQ is None:
    assert initialize_KQ>0 and initialize_KQ<1
    KQ=witness_fn3_KQ(r,x,FFT,BQ,CQ,N,M,L,rbf_var)
    rbf_var*=math.log(KQ.mean())/math.log(initialize_KQ)
    if verbose:
      print('Setting sigma^2 = {}'.format(rbf_var))

  for weight in weights:

    if checkgrad and weight==weights[0]:
      def f(*args):
        return witness_fn3(*args)[0]
      def g(*args):
        return witness_fn3(*args)[1]
      print('Checking gradient ...')
      est_grad=scipy.optimize.approx_fprime(r,f,math.sqrt(np.finfo(float).eps),*(x,FFT,BP,BQ,CP,CQ,N,M,L,rbf_var,weight,False,False))
      #print('est. gradient',est_grad)
      fn_grad=g(r,x,FFT,BP,BQ,CP,CQ,N,M,L,rbf_var,weight,False,True)
      #print('gradient',fn_grad)
      #print('isclose',np.isclose(est_grad,fn_grad,rtol=1e-4,atol=1e-7))
      assert np.allclose(est_grad,fn_grad,rtol=1e-4,atol=1e-5)
      #err=scipy.optimize.check_grad(f,g,r,*(x,FFT,BP,BQ,CP,CQ,N,M,L,rbf_var,weight,False,False))
      #print('gradient error',err)
      #assert err<1e-5
      print('passed.')

    t0=time.time()
    r_opt,loss_opt,iter_opt=minimize.minimize(r,witness_fn3,(x,FFT,BP,BQ,CP,CQ,N,M,L,rbf_var,weight,verbose,checkrbf),maxnumlinesearch=maxnumlinesearch,maxnumfuneval=None,red=1.0,verbose=False)
    t1=time.time()
    if verbose:
      #print('r_opt',r_opt.shape,r_opt.dtype)
      print('r_opt mean P value',r_opt[:N].mean(),r_opt[:N].var())
      print('r_opt mean Q value',r_opt[N:N+M].mean(),r_opt[N:N+M].var())
      if L>0:
        print('r_opt mean T value',r_opt[N+M:N+M+L].mean(),r_opt[N+M:N+M+L].var())
      print('r_opt X value',r_opt[-1])
      print('Optimized in {} minutes.'.format((t1-t0)/60.0))
    xpr_result.append(x+r_opt)
    r_result.append(r_opt)
    r=r_opt
  return np.asarray(xpr_result),np.asarray(r_result)
开发者ID:paulu,项目名称:deepmanifold,代码行数:68,代码来源:matchmmd.py


示例18: filtering


#.........这里部分代码省略.........
    #compute 3 master curves of data set
    masters = np.zeros((3, 20))
    medge = np.zeros((4, 20))
    n_b = 3
    
    #bin the gate values of all curves at every gate
    for gate in range(20):
        bmd, bed, c = stats.binned_statistic(np.sort(data_c[:,9+gate], axis=0),
        np.sort(data_c[:,9+gate], axis=0), statistic=np.median, bins=n_b)
        masters[:,gate] = bmd
        medge[:,gate] = bed
    
    #compute the integral chargeability of the master curves -> used for next steps   
    masters_ints = np.zeros((3,1))
    for ints in range(3):
        masters_ints[ints] = np.mean(masters[ints,:])
    
    #with filtered data, compute mean decay curve of data set
    m_mean = np.zeros(20)
    ms = data_c[:,9:29]
    for ll in range(len(m_mean)):
        m_mean[ll] = np.median(ms[:,ll])
            
    #compute deviation of single decay to mean decay, before calculate fit    
    for line in range(len(data)):
        mi = data[line,9:29]    
        if fit_param[line, -1] == 0:
            f = pow2(ipw, fit_param[line, 0],  fit_param[line, 1], fit_param[line, 2])
        else:
            f = pow2m(ipw, fit_param[line, 0],  fit_param[line, 1], fit_param[line, 2])
        
            
        #compute rms between mean decay and fit on data    
        rmsfm[line], nn = mz.minimize(m_mean, ipw, f)
    
        #compute rms between mean decay and measured data  
        rmsmm[line], ipw_misfit_mm[line] = mz.minimize(m_mean, ipw, mi)
        
        #compute distances of measured intregal chargeability to the master curves
        #in order to find the nearest master curve
        dists = np.zeros((3,1))
        for dist in range(3):
            dists[dist] = abs(np.mean(mi)-masters_ints[dist])
            
        #get index of shortest distance    
        idx = np.argmin(dists) 
        
        #compute rms between measured decay and nearest master curve
        rmsmmaster[line], x = mz.minimize(masters[idx], ipw, mi)
        
        #compute rms between fit on measured decay and nearest master curve
        rmsfmaster[line], x = mz.minimize(masters[idx], ipw, f)
            
    #storing rms/deviation values
    mean_xc = (xc1 + xc2)/2
    mean_rms = (rms_1 + rms_2)/2
    error = np.concatenate( \
    (rms_1, rms_2, mean_rms, xc1, xc2, mean_xc, rmsfm, rms_misfit,
    linrg, rmsmmaster, dev_res, rmsfmaster, dev_pha_af, rmsmm, ipw_misfit, ipw_misfit_mm),
    axis=1)


    frags = path.split('/')
    lid = frags[-1][:-4]
    
    #write error parameters to file
开发者ID:commun108,项目名称:dca_master,代码行数:67,代码来源:filtering_func.py


示例19: conjgrad

def conjgrad(im, maxnumlinesearch=10, imshape=styleimage.shape):
    import minimize
    im_flat, fs, numlinesearches = minimize.minimize(im.flatten(), lambda x: cost(x.reshape(imshape)), lambda x: grad(x.reshape(imshape)).flatten(), args=[], maxnumlinesearch=maxnumlinesearch, verbose=False)
    return im_flat.reshape(imshape)
开发者ID:Godweed,项目名称:artify,代码行数:4,代码来源:artify_interactive.py


示例20: Exec

def Exec(code):
    if args.minimize:
        # In exec, we should always munge globals
        code = minimize.minimize(code, True, True, args.obfuscate, args.obfuscate)
    return p.Exec(code)
开发者ID:AndrewSkat,项目名称:unrpyc,代码行数:5,代码来源:compile.py



注:本文中的minimize.minimize函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python minimock.assert_same_trace函数代码示例发布时间:2022-05-27
下一篇:
Python minicontest.contestClassifier函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap