• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python theano.shared函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.shared函数的典型用法代码示例。如果您正苦于以下问题:Python shared函数的具体用法?Python shared怎么用?Python shared使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了shared函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __init__

    def __init__(self, input, n_in, n_out):
        """ Initialize the parameters of the logistic regression

        :type input: theano.tensor.TensorType
        :param input: symbolic variable that describes the input of the
                      architecture (one minibatch)

        :type n_in: int
        :param n_in: number of input units, the dimension of the space in
                     which the datapoints lie

        :type n_out: int
        :param n_out: number of output units, the dimension of the space in
                      which the labels lie

        """

        # initialize with 0 the weights W as a matrix of shape (n_in, n_out)
        self.W = theano.shared(value=numpy.zeros((n_in, n_out),
                                                 dtype=theano.config.floatX),
                                name='W', borrow=True)
        # initialize the baises b as a vector of n_out 0s
        self.b = theano.shared(value=numpy.zeros((n_out,),
                                                 dtype=theano.config.floatX),
                               name='b', borrow=True)

        # compute vector of class-membership probabilities in symbolic form
        self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)

        # compute prediction as class whose probability is maximal in
        # symbolic form
        self.y_pred = T.argmax(self.p_y_given_x, axis=1)

        # parameters of the model
        self.params = [self.W, self.b]
开发者ID:vivanac,项目名称:DeepLearningTutorials,代码行数:35,代码来源:logistic_sgd.py


示例2: adam

def adam(loss, all_params, learn_rate=0.001, b1=0.9, b2=0.999, e=1e-8, gamma=1-1e-8):
    """ADAM update rules

    Kingma, Diederik, and Jimmy Ba. "Adam: A Method for Stochastic Optimization." arXiv preprint arXiv:1412.6980 (2014). http://arxiv.org/pdf/1412.6980v4.pdf
    """
    updates = []
    all_grads = theano.grad(loss, all_params)
    alpha = learn_rate
    t = theano.shared(np.float32(1.))
    b1_t = b1 * gamma ** (t - 1.)   # decay the first moment running average coefficient

    for theta_prev, g in zip(all_params, all_grads):
        m_prev = theano.shared(np.zeros(theta_prev.get_value().shape, dtype=theano.config.floatX))
        v_prev = theano.shared(np.zeros(theta_prev.get_value().shape, dtype=theano.config.floatX))

        m = b1_t * m_prev + (1. - b1_t) * g  # update biased first moment estimate
        v = b2 * v_prev + (1. - b2) * g ** 2  # update biased second raw moment estimate
        m_hat = m / (1. - b1 ** t)  # compute bias-corrected first moment estimate
        v_hat = v / (1. - b2 ** t)  # compute bias-corrected second raw moment estimate
        theta = theta_prev - (alpha * m_hat) / (T.sqrt(v_hat) + e)  # update parameters

        updates.append((m_prev, m))
        updates.append((v_prev, v))
        updates.append((theta_prev, theta) )
    updates.append((t, t + 1.))
    return updates
开发者ID:gw0,项目名称:conll15st-v02-single-tagger,代码行数:26,代码来源:run.py


示例3: __init__

    def __init__(self, n_in, n_out, W_init=None, b_init=None,
                 activation=T.tanh):
        self.activation = activation
        if W_init is None:
            rng = numpy.random.RandomState(1234)
            W_values = numpy.asarray(rng.uniform(
                    low=-numpy.sqrt(6. / (n_in + n_out)),
                    high=numpy.sqrt(6. / (n_in + n_out)),
                    size=(n_in, n_out)
                ),
                dtype=theano.config.floatX
            )
            if activation == theano.tensor.nnet.sigmoid:
                W_values *= 4

            W_init = theano.shared(value=W_values, name='W', borrow=True)

        if b_init is None:
            b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
            b_init = theano.shared(value=b_values, name='b', borrow=True)

        self.W = W_init
        self.b = b_init
        # parameters of the model
        self.params = [self.W, self.b]
开发者ID:hx364,项目名称:Synonym_Extraction,代码行数:25,代码来源:mlp.py


示例4: shared

 def shared(data):
     """ Place the data into shared variables. This allows Theano to copy
     the data to the GPU, if one is available.
     """
     shared_x = theano.shared(numpy.asarray(data[:,0].tolist(), dtype=theano.config.floatX), borrow=True)
     shared_y = theano.shared(numpy.asarray(data[:,1].tolist(), dtype=theano.config.floatX), borrow=True)
     return shared_x, T.cast(shared_y, "int32")
开发者ID:Nueard,项目名称:Genetic-algorithms-neural-networks,代码行数:7,代码来源:DataLoader.py


示例5: __init__

    def __init__(self, rng, input, n_in, n_out, W=None, b=None,
                 activation=T.tanh):
        self.input = input[0]

        # initialize weights into this layer
        if W is None:
            W_values = np.asarray(
                rng.uniform(
                    size=(n_in, n_out),
                    low=-np.sqrt(6. / (n_in + n_out)),
                    high=np.sqrt(6. / (n_in + n_out)),
                ),
                dtype=theano.config.floatX
            )
            if activation == theano.tensor.nnet.sigmoid:
                W_values *= 4

            W = theano.shared(value=W_values, name='W', borrow=True)

        # initialize bias term weights into this layer
        if b is None:
            b_values = np.zeros((n_out,), dtype=theano.config.floatX)
            b = theano.shared(value=b_values, name='b', borrow=True)

        self.W = W
        self.b = b

        lin_output = T.dot(self.input, self.W) + self.b
        self.output = (
            lin_output if activation is None
            else activation(lin_output)
        )

        self.params = [self.W, self.b]
开发者ID:frw,项目名称:2048-DRL,代码行数:34,代码来源:neural_network.py


示例6: __init__

 def __init__(self, filter_shape, image_shape, poolsize=(2, 2),
              activation_fn=sigmoid):
     """`filter_shape` is a tuple of length 4, whose entries are the number
     of filters, the number of input feature maps, the filter height, and the
     filter width.
     `image_shape` is a tuple of length 4, whose entries are the
     mini-batch size, the number of input feature maps, the image
     height, and the image width.
     `poolsize` is a tuple of length 2, whose entries are the y and
     x pooling sizes.
     """
     self.filter_shape = filter_shape
     self.image_shape = image_shape
     self.poolsize = poolsize
     self.activation_fn=activation_fn
     # initialize weights and biases
     n_out = (filter_shape[0]*np.prod(filter_shape[2:])/np.prod(poolsize))
     self.w = theano.shared(
         np.asarray(
             np.random.normal(loc=0, scale=np.sqrt(1.0/n_out), size=filter_shape),
             dtype=theano.config.floatX),
         borrow=True)
     self.b = theano.shared(
         np.asarray(
             np.random.normal(loc=0, scale=1.0, size=(filter_shape[0],)),
             dtype=theano.config.floatX),
         borrow=True)
     self.params = [self.w, self.b]
开发者ID:abhmul,项目名称:Gender-Identifier,代码行数:28,代码来源:network3.py


示例7: init_conv_filters

    def init_conv_filters(self, numpy_rng, D, poolsize):
        ''' Convolutional Filters '''
        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        fan_in = np.prod(self.filter_shape[1:])

        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width" pooling size
        fan_out = (self.filter_shape[0] * np.prod(self.filter_shape[2:]) /
                   np.prod(poolsize))

        # initialize weights with random weights
        W_bound = np.sqrt(6. / (fan_in + fan_out))

        self.W = theano.shared(
                init_conv_weights(-W_bound, W_bound, \
                        self.filter_shape, numpy_rng),borrow=True, name='W_conv')

        #b_values = np.zeros((self.filter_shape[0],), dtype=theano.config.floatX)
        #self.b = theano.shared(value=b_values, borrow=True, name='b_conv')

        c_values = np.zeros((self.filter_shape[1],), dtype=theano.config.floatX)
        self.c = theano.shared(value=c_values, borrow=True, name='b_conv')

        self.params = [self.W, self.c]
开发者ID:Thelordofdream,项目名称:GRAN,代码行数:25,代码来源:conv_layer.py


示例8: stack_and_shared

def stack_and_shared(input):
    """
    This will take a list of input variables, turn them into theano shared variables, and return them stacked
    in a single tensor.

    Parameters
    ----------
    input : list or object
        List of input variables to stack into a single shared tensor.

    Returns
    -------
    tensor
        Symbolic tensor of the input variables stacked, or None if input was None.
    """
    if input is None:
        return None
    elif isinstance(input, list):
        shared_ins = []
        for _in in input:
            try:
                shared_ins.append(theano.shared(_in))
            except TypeError as _:
                shared_ins.append(_in)
        return T.stack(shared_ins)
    else:
        try:
            _output = [theano.shared(input)]
        except TypeError as _:
            _output = [input]
        return T.stack(_output)
开发者ID:JediKoder,项目名称:OpenDeep,代码行数:31,代码来源:misc.py


示例9: check_parameter

def check_parameter(name, value):
    parameters = set()
    constants = set()
    observeds = set()

    if isinstance(value, SharedVariable):
        parameters.add(value)
    elif isinstance(value, T.TensorConstant):
        constants.add(value)
    elif isinstance(value, T.TensorVariable):
        inputs = graph.inputs([value])

        for var in inputs:
            if isinstance(var, SharedVariable):
                parameters.add(var)
            elif isinstance(var, T.TensorConstant):
                constants.add(var)
            elif isinstance(var, T.TensorVariable):
                if not var.name:
                    raise ValueError("Observed variables must be named.")
                observeds.add(var)
    else:
        # XXX allow for lists and convert them to ndarray

        if isinstance(value, np.ndarray):
            value = theano.shared(value, name=name)
        else:
            value = theano.shared(float(value), name=name)

        parameters.add(value)

    return value, parameters, constants, observeds
开发者ID:glouppe,项目名称:carl,代码行数:32,代码来源:base.py


示例10: setup

 def setup(self, prev_layer):
     self.input_layer = prev_layer
     self.input = prev_layer.output
     self.W = theano.shared(np.random.random((self.input_layer.output_shape, self.output_shape)).astype(theano.config.floatX)*.01)
     self.b = theano.shared(np.zeros(self.output_shape,dtype=theano.config.floatX))
     self.params = (self.W, self.b)
     self.output = self.activation(T.dot(self.input, self.W) + self.b.dimshuffle('x', 0))
开发者ID:madisonmay,项目名称:TheanoAutoencoder,代码行数:7,代码来源:layers.py


示例11: __init__

  def __init__(self, kernel, max_iter = 10, max_diff = None):
    """

    :param kernel: a function with a signature (expected, observed) -> a similarity measure
    that accepts symbolic theano expressions and returns them accordingly.
    See `crayimage.hotornot.em.kernels` for examples.
    :param max_iter: maximal number of iteration
    :param max_diff: stop iterations if maximal difference in weights from the previous iteration is smaller than `max_diff`.
    If None the check is not performed.
    """
    self.original_shape = None

    self.kernel = kernel
    self.max_iter = max_iter
    self.max_diff = max_diff

    self.X = theano.shared(
      np.zeros(shape=(0, 0), dtype='float32')
    )

    self.weights = theano.shared(
      np.ones(shape=(0, ), dtype='float32')
    )

    canonical = T.sum(self.weights[:, None] * self.X, axis=0) / T.sum(self.weights)

    weights_updates = self.kernel(canonical, self.X)
    weights_diff = T.max(abs(weights_updates - self.weights))

    upd = {
      self.weights : weights_updates
    }

    self.iteration = theano.function([], weights_diff if max_diff is not None else [], updates=upd)
    self.get_canonical = theano.function([], canonical)
开发者ID:maxim-borisyak,项目名称:crayimage,代码行数:35,代码来源:fast_em.py


示例12: adadelta

def adadelta(lr, tparams, grads, inp, cost, extra_ups=[], extra_outs=[],
             exclude_params=set([])):
    '''Adadelta'''
    zipped_grads = [theano.shared(p.get_value() * np.float32(0.), name='%s_grad'%k)
                    for k, p in tparams.iteritems()]
    running_up2 = [theano.shared(p.get_value() * np.float32(0.), name='%s_rup2'%k)
                   for k, p in tparams.iteritems()]
    running_grads2 = [theano.shared(p.get_value() * np.float32(0.), name='%s_rgrad2'%k)
                      for k, p in tparams.iteritems()]

    zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
    rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
        for rg2, g in zip(running_grads2, grads)]

    f_grad_shared = theano.function(
        inp, [cost]+extra_outs, updates=zgup+rg2up+extra_ups, profile=profile)

    updir = [-T.sqrt(ru2 + 1e-6) / T.sqrt(rg2 + 1e-6) * zg
             for zg, ru2, rg2 in zip(zipped_grads, running_up2, running_grads2)]
    ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
        for ru2, ud in zip(running_up2, updir)]
    param_up = [(p, p + ud) for p, ud in zip(tools.itemlist(tparams), updir)
        if p.name not in exclude_params]

    if not isinstance(lr, list): lr = [lr]
    f_update = theano.function(lr, [], updates=ru2up+param_up,
        on_unused_input='ignore', profile=profile)

    return f_grad_shared, f_update
开发者ID:Jeremy-E-Johnson,项目名称:cortex,代码行数:29,代码来源:op.py


示例13: _init_params

    def _init_params(self):
        self.W_hhs = []
        self.W_shortp = []
        for dx in xrange(self.n_layers):
            W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
                                        self.n_hids[dx],
                                        self.sparsity[dx],
                                        self.scale[dx],
                                        rng=self.rng)
            self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
                                       (dx,self.name)))

            if dx > 0:
                W_shp = self.init_fn[dx](self.n_hids[self.n_layers-1],
                                         self.n_hids[dx],
                                         self.sparsity[dx],
                                         self.scale[dx],
                                         rng=self.rng)
                self.W_shortp.append(theano.shared(value=W_shp,
                                               name='W_s%d_%s'%(dx,self.name)))
        self.params = [x for x in self.W_hhs] +\
                [x for x in self.W_shortp]

        self.params_grad_scale = [self.grad_scale for x in self.params]
        self.restricted_params = [x for x in self.params]

        if self.weight_noise:
            self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
            self.nW_shortp = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_shortp]

            self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nW_shortp]
            self.noise_params_shape_fn = [constant_shape(x.get_value().shape) for x in self.noise_params]
开发者ID:mkudinov,项目名称:GroundHog,代码行数:32,代码来源:rec_layers.py


示例14: __init__

    def __init__(self, network, **kwargs):
        # due to the way that theano handles updates, we cannot update a
        # parameter twice during the same function call. so, instead of handling
        # everything in the updates for self.f_learn(...), we split the
        # parameter updates into two function calls. the first "prepares" the
        # parameters for the gradient computation by moving the entire model one
        # step according to the current velocity. then the second computes the
        # gradient at that new model position and performs the usual velocity
        # and parameter updates.

        self.params = network.params(**kwargs)
        self.momentum = kwargs.get('momentum', 0.5)

        # set up space for temporary variables used during learning.
        self._steps = []
        self._velocities = []
        for param in self.params:
            v = param.get_value()
            n = param.name
            self._steps.append(theano.shared(np.zeros_like(v), name=n + '_step'))
            self._velocities.append(theano.shared(np.zeros_like(v), name=n + '_vel'))

        # step 1. move to the position in parameter space where we want to
        # compute our gradient.
        prepare = []
        for param, step, velocity in zip(self.params, self._steps, self._velocities):
            prepare.append((step, self.momentum * velocity))
            prepare.append((param, param + step))

        logging.info('compiling NAG adjustment function')
        self.f_prepare = theano.function([], [], updates=prepare)

        super(NAG, self).__init__(network, **kwargs)
开发者ID:majidaldo,项目名称:theano-nets,代码行数:33,代码来源:trainer.py


示例15: optimizer

    def optimizer(loss, param):
        updates = OrderedDict()
        if param is not list:
            param = list(param)

        for param_ in param:
            i = theano.shared(np.array(0, dtype=theano.config.floatX))
            i_int = i.astype('int64')
            value = param_.get_value(borrow=True)
            accu = theano.shared(
                np.zeros(value.shape + (n_win,), dtype=value.dtype))
            grad = tt.grad(loss, param_)

            # Append squared gradient vector to accu_new
            accu_new = tt.set_subtensor(accu[:, i_int], grad ** 2)
            i_new = tt.switch((i + 1) < n_win, i + 1, 0)

            updates[accu] = accu_new
            updates[i] = i_new

            accu_sum = accu_new.sum(axis=1)
            updates[param_] = param_ - (learning_rate * grad /
                                        tt.sqrt(accu_sum + epsilon))

        return updates
开发者ID:sjtu2008,项目名称:pymc3,代码行数:25,代码来源:advi.py


示例16: __init__

    def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
        """
        Allocate a LeNetConvPoolLayer with shared variable internal parameters.

        :type rng: numpy.random.RandomState
        :param rng: a random number generator used to initialize weights

        :type input: theano.tensor.dtensor4
        :param input: symbolic image tensor, of shape image_shape

        :type filter_shape: tuple or list of length 4
        :param filter_shape: (number of filters, num input feature maps,
                              filter height,filter width)

        :type image_shape: tuple or list of length 4
        :param image_shape: (batch size, num input feature maps,
                             image height, image width)

        :type poolsize: tuple or list of length 2
        :param poolsize: the downsampling (pooling) factor (#rows,#cols)
        """

        assert image_shape[1] == filter_shape[1]
        self.input = input

        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        fan_in = numpy.prod(filter_shape[1:])
        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width" /
        #   pooling size
        fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
                   numpy.prod(poolsize))
        # initialize weights with random weights
        W_bound = numpy.sqrt(6. / (fan_in + fan_out))
        self.W = theano.shared(numpy.asarray(
            rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
            dtype=theano.config.floatX),
                               borrow=True)

        # the bias is a 1D tensor -- one bias per output feature map
        b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
        self.b = theano.shared(value=b_values, borrow=True)

        # convolve input feature maps with filters
        conv_out = conv.conv2d(input=input, filters=self.W,
                filter_shape=filter_shape, image_shape=image_shape)

        # downsample each feature map individually, using maxpooling
        pooled_out = downsample.max_pool_2d(input=conv_out,
                                            ds=poolsize, ignore_border=True)

        # add the bias term. Since the bias is a vector (1D array), we first
        # reshape it to a tensor of shape (1,n_filters,1,1). Each bias will
        # thus be broadcasted across mini-batches and feature map
        # width & height
        self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))

        # store parameters of this layer
        self.params = [self.W, self.b]
开发者ID:Erotemic,项目名称:local,代码行数:60,代码来源:convolutional_mlp.py


示例17: __init__

    def __init__(self, input_variable, rng, n_in=None, n_out=None, weights=None,
                 biases=None, activation=T.tanh):
        self.input_variable = input_variable
        if not weights:
            assert n_in is not None
            assert n_out is not None
            W_values = np.asarray(rng.uniform(
                low=-np.sqrt(6. / (n_in + n_out)),
                high=np.sqrt(6. / (n_in + n_out)),
                size=(n_in, n_out)), dtype=theano.config.floatX)
            if activation == theano.tensor.nnet.sigmoid:
                W_values *= 4

            W = theano.shared(value=W_values, name='W', borrow=True)
            b_values = np.zeros((n_out,), dtype=theano.config.floatX)
            b = theano.shared(value=b_values, name='b', borrow=True)
        else:
            W = weights
            b = biases

        self.W = W
        self.b = b

        linear_output = T.dot(self.input_variable, self.W) + self.b
        self.output = (linear_output if activation is None
                       else activation(linear_output))
        self.params = [self.W, self.b]
开发者ID:youralien,项目名称:minet,代码行数:27,代码来源:minet.py


示例18: __init__

    def __init__(self, rng, input, n_in, n_out, W=None, b=None,
                 activation=T.tanh):
        if W is None:
            W_values = numpy.asarray(
                rng.uniform(
                    low=-numpy.sqrt(6. / (n_in + n_out)),
                    high=numpy.sqrt(6. / (n_in + n_out)),
                    size=(n_in, n_out)
                ),
                dtype=theano.config.floatX
            )
            if activation == theano.tensor.nnet.sigmoid:
                W_values *= 4
                
            W_branches = theano.shared(value=W_values, name='W_branches', borrow=True)
                
            if b is None:
                    b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
                    b_1 = theano.shared(value=b_values, name='b_1', borrow=True)

        self.W_branches = W_branches
        self.b_1 = b_1

        sub_branch_type = ""
        z_i = T.concatenate(self.W_branches[sub_branch_type] + self.W_branches[sub_branch_dist])
        
        # self.output = 

        self.params = [self.W_branches, self.b_1]
开发者ID:yechaowang,项目名称:DeepVS,代码行数:29,代码来源:DeepVs.py


示例19: __init__

    def __init__(self, input, n_in, n_out):
        self.W = theano.shared(
            value = numpy.zeros(
                (n_in, n_out),
                dtype = theano.config.floatX
            ),
            name = 'W',
            borrow = True
        )

        self.b = theano.shared(
            value = numpy.zeros(
                (n_out,),
                dtype = theano.config.floatX
            ),
            name = 'b',
            borrow = True
        )

        self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)

        self.y_pred = T.argmax(self.p_y_given_x, axis = 1)

        self.params = [self.W, self.b]

        self.input = input
开发者ID:WillySchu,项目名称:theano_tutorials,代码行数:26,代码来源:logReg2.py


示例20: generate_beta_arr

 def generate_beta_arr(self, step1_beta):
     """
     Generate the noise covariances, beta_t, for the forward trajectory.
     """
     # lower bound on beta
     min_beta_val = 1e-6
     min_beta_values = np.ones((self.trajectory_length,))*min_beta_val
     min_beta_values[0] += step1_beta
     min_beta = theano.shared(value=min_beta_values.astype(theano.config.floatX),
         name='min beta')
     # (potentially learned) function for how beta changes with timestep
     # TODO add beta_perturb_coefficients to the parameters to be learned
     beta_perturb_coefficients_values = np.zeros((self.n_temporal_basis,))
     beta_perturb_coefficients = theano.shared(
         value=beta_perturb_coefficients_values.astype(theano.config.floatX),
         name='beta perturb coefficients')
     beta_perturb = T.dot(self.temporal_basis.T, beta_perturb_coefficients)
     # baseline behavior of beta with time -- destroy a constant fraction
     # of the original data variance each time step
     # NOTE 2 below means a fraction ~1/T of the variance will be left at the end of the
     # trajectory
     beta_baseline = 1./np.linspace(self.trajectory_length, 2., self.trajectory_length)
     beta_baseline_offset = util.logit_np(beta_baseline).astype(theano.config.floatX)
     # and the actual beta_t, restricted to be between min_beta and 1-[small value]
     beta_arr = T.nnet.sigmoid(beta_perturb + beta_baseline_offset)
     beta_arr = min_beta + beta_arr * (1 - min_beta - 1e-5)
     beta_arr = beta_arr.reshape((self.trajectory_length, 1))
     return beta_arr
开发者ID:Sohl-Dickstein,项目名称:Diffusion-Probabilistic-Models,代码行数:28,代码来源:model.py



注:本文中的theano.shared函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python theano.Op类代码示例发布时间:2022-05-27
下一篇:
Python theano.scan函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap