• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensor.constant函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.constant函数的典型用法代码示例。如果您正苦于以下问题:Python constant函数的具体用法?Python constant怎么用?Python constant使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了constant函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_constant

 def test_constant(self):
     ## Re-init counter
     Variable.__count__ = count(0)
     r1 = tensor.constant(1.5)
     r2 = tensor.constant(1.5)
     assert r1.auto_name == "auto_0"
     assert r2.auto_name == "auto_1"
开发者ID:gotomypc,项目名称:Theano,代码行数:7,代码来源:test_graph.py


示例2: _init_params_

    def _init_params_(self, kbm, kbm_mask, emb, word_size=100, hidden_size=400, prefix='KBMN_'):
        # L2-normalize the embedding matrix
        emb_ = np.sqrt(np.sum(emb ** 2, axis=1))
        emb = emb / np.dot(emb_.reshape(-1, 1), np.ones((1, emb.shape[1])))
        emb[0, :] = 0.

        self.emb = theano.shared(
            value=np.asarray(emb, dtype=theano.config.floatX),
            name=prefix + 'emb',
            borrow=True
        )

        self.kbm = T.constant(
            x=kbm,
            name=prefix + 'kbm',
            ndim=2,
            dtype='int32'
        )

        self.kbm_mask = T.constant(
            x=kbm_mask,
            name=prefix + 'kbm_mask',
            ndim=2,
            dtype=theano.config.floatX
        )

        def _random_weights(x_dim, y_dim):
            return np.random.uniform(
                low=-np.sqrt(6. / (x_dim + y_dim)),
                high=np.sqrt(6. / (x_dim + y_dim)),
                size=(x_dim, y_dim)
            ).astype(theano.config.floatX)

        self.gru_W = theano.shared(
            value=np.concatenate(
                [_random_weights(word_size, hidden_size),
                 _random_weights(word_size, hidden_size),
                 _random_weights(word_size, hidden_size)],
                axis=1
            ).astype(theano.config.floatX),
            name=prefix+'gru_W',
            borrow=True
        )

        self.gru_U = theano.shared(
            value=np.concatenate(
                [_random_weights(hidden_size, hidden_size),
                 _random_weights(hidden_size, hidden_size),
                 _random_weights(hidden_size, hidden_size)],
                axis=1
            ).astype(theano.config.floatX),
            name=prefix+'gru_U',
            borrow=True
        )

        self.gru_B = theano.shared(
            value=np.zeros((3 * hidden_size,)).astype(theano.config.floatX),
            name=prefix+'b',
            borrow=True
        )
开发者ID:Air-Fighter,项目名称:NN4Gaokao,代码行数:60,代码来源:models.py


示例3: add_param

 def add_param(self, param, name="", constraints=True,
               custom_update=None, custom_update_normalized=False, custom_update_exp_average=0,
               custom_update_condition=None, custom_update_accumulate_batches=None):
   """
   :type param: theano.SharedVariable
   :type name: str
   :rtype: theano.SharedVariable
   """
   param = super(Layer, self).add_param(param, name)
   if custom_update:
     # Handled in Device and Updater.
     param.custom_update = custom_update
     param.custom_update_normalized = custom_update_normalized
     param.custom_update_exp_average = custom_update_exp_average
     param.custom_update_condition = custom_update_condition
     param.custom_update_accumulate_batches = custom_update_accumulate_batches
   if constraints:
     if 'L1' in self.attrs and self.attrs['L1'] > 0:
       self.constraints += T.constant(self.attrs['L1'], name="L1", dtype='floatX') * abs(param).sum()
     if 'L2' in self.attrs and self.attrs['L2'] > 0:
       self.constraints += T.constant(self.attrs['L2'], name="L2", dtype='floatX') * (param**2).sum()
     if self.attrs.get('L2_eye', 0) > 0:
       L2_eye = T.constant(self.attrs['L2_eye'], name="L2_eye", dtype='floatX')
       if param.ndim == 2:
         eye = tiled_eye(param.shape[0], param.shape[1], dtype=param.dtype)
         self.constraints += L2_eye * ((param - eye)**2).sum()
       else:  # standard L2
         self.constraints += L2_eye * (param**2).sum()
     if 'varreg' in self.attrs and self.attrs['varreg'] > 0:
       self.constraints += self.attrs['varreg'] * (1.0 * T.sqrt(T.var(param)) - 1.0 / numpy.sum(param.get_value().shape))**2
   return param
开发者ID:rwth-i6,项目名称:returnn,代码行数:31,代码来源:NetworkBaseLayer.py


示例4: hard_sigmoid

def hard_sigmoid(x):
    out_dtype = scalar.upgrade_to_float(scalar.Scalar(dtype=x.dtype))[0].dtype
    slope = T.constant(0.2, dtype=out_dtype)
    shift = T.constant(0.5, dtype=out_dtype)
    x = (x * slope) + shift
    x = T.clip(x, 0, 1)
    return x
开发者ID:CyrusChiu,项目名称:Theano-lightweight,代码行数:7,代码来源:layers.py


示例5: softmax

 def softmax(self, D, I):
   D = D * T.constant(self.attrs['sharpening'], 'float32')
   if self.attrs['norm'] == 'exp':
     E = T.exp(-D) * I
     E = E / T.maximum(T.sum(E,axis=0,keepdims=True),T.constant(1e-20,'float32'))
   elif self.attrs['norm'] == 'sigmoid':
     E = (numpy.float32(1) - T.tanh(D)**2) * I
   elif self.attrs['norm'] == 'lstm':
     n_out = self.attrs['template']
     def lstm(z, i_t, s_p, h_p):
       z += T.dot(h_p, self.N_re)
       i = T.outer(i_t, T.alloc(numpy.cast['int8'](1), n_out))
       ingate = T.nnet.sigmoid(z[:,n_out: 2 * n_out])
       forgetgate = T.nnet.sigmoid(z[:,2 * n_out:3 * n_out])
       outgate = T.nnet.sigmoid(z[:,3 * n_out:])
       input = T.tanh(z[:,:n_out])
       s_t = input * ingate + s_p * forgetgate
       h_t = T.tanh(s_t) * outgate
       return theano.gradient.grad_clip(s_t * i, -50, 50), h_t * i
     E, _ = theano.scan(lstm, sequences=[D,I], outputs_info=[T.zeros((n_out,), 'float32'), T.zeros((n_out,), 'int32')])
     E = T.nnet.sigmoid(T.dot(E,self.N_out))
   else:
     raise NotImplementedError()
   if self.attrs['nbest'] > 1:
     opt = T.minimum(self.attrs['nbest'], E.shape[0])
     score = (T.sort(E, axis=0)[-opt]).dimshuffle('x',0).repeat(E.shape[0],axis=0)
     E = T.switch(T.lt(E,score), T.zeros_like(E), E)
   return E
开发者ID:atuxhe,项目名称:returnn,代码行数:28,代码来源:RecurrentTransform.py


示例6: compute_output

    def compute_output(self, network):
        hyperparameter_name = network.find_hyperparameter(["hyperparameter"])
        # TODO add default hyperparameter
        res = network.find_hyperparameter([hyperparameter_name])
        if utils.is_number(res):
            var = T.constant(res)
            shape = ()
        elif utils.is_ndarray(res):
            var = T.constant(res)
            shape = res.shape
        elif utils.is_shared_variable(res):
            var = res
            shape = res.get_value().shape
        elif utils.is_nonshared_variable(res):
            var = res
            if res.ndim == 0:
                shape = ()
            else:
                shape = network.find_hyperparameter(["shape"])
        else:
            raise ValueError("Unknown hyperparameter type of %s" % res)

        network.create_vw(
            "default",
            variable=var,
            shape=shape,
            tags={"output"},
        )
开发者ID:nsauder,项目名称:treeano,代码行数:28,代码来源:hyperparameter.py


示例7: rmsprop

    def rmsprop(self, lr, tparams, grads, inp_list, cost, params):
        clip = params["grad_clip"]
        decay_rate = tensor.constant(params["decay_rate"], dtype=theano.config.floatX)
        smooth_eps = tensor.constant(params["smooth_eps"], dtype=theano.config.floatX)
        zipped_grads = [theano.shared(np.zeros_like(p.get_value()), name="%s_grad" % k) for k, p in tparams.iteritems()]
        running_grads2 = [
            theano.shared(np.zeros_like(p.get_value()), name="%s_rgrad2" % k) for k, p in tparams.iteritems()
        ]
        zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
        if clip > 0.0:
            rg2up = [
                (
                    rg2,
                    tensor.clip(decay_rate * rg2 + (1 - decay_rate) * (tensor.clip(g, -clip, clip) ** 2), 0.0, np.inf),
                )
                for rg2, g in zip(running_grads2, grads)
            ]
        else:
            rg2up = [
                (rg2, tensor.clip(decay_rate * rg2 + (1 - decay_rate) * (g ** 2), 0.0, np.inf))
                for rg2, g in zip(running_grads2, grads)
            ]

        f_grad_shared = theano.function(inp_list, cost, updates=zgup + rg2up, name="rmsprop_f_grad_shared")

        updir = [theano.shared(p.get_value() * numpy_floatX(0.0), name="%s_updir" % k) for k, p in tparams.iteritems()]
        updir_new = [
            (ud, -lr * zg / (tensor.sqrt(rg2) + smooth_eps)) for ud, zg, rg2 in zip(updir, zipped_grads, running_grads2)
        ]
        param_up = [(p, p + udn[1]) for p, udn in zip(tparams.values(), updir_new)]
        f_update = theano.function(
            [lr], [], updates=updir_new + param_up, on_unused_input="ignore", name="rmsprop_f_update"
        )

        return f_grad_shared, f_update, zipped_grads, running_grads2, updir
开发者ID:bajibabu,项目名称:dnn-speech,代码行数:35,代码来源:solver.py


示例8: lcn

def lcn(x,ishape,size=9):
    # Function borrowed from bengioe_util
    """
    expects x to be tensor{3|4}, the first dimension being the number
    of images, and the two last the shape of the image (which should be
    given anyways for optimization purposes
    """
    inshape = (x.shape[0],1,ishape[0],ishape[1])
    p = x.reshape(inshape)
    #p = (p-TT.mean(p))/T.std(p)
    g = gaussian(size,1.591/size)
    g/=g.sum()
    g = numpy.float32(g.reshape((1,1,size,size)))
    mean = TT.nnet.conv.conv2d(p,TT.constant(g),
                              None,
                              (1,1,size,size),
                              'full').reshape(
                                  (x.shape[0],1)+(ishape[0]+size-1,)*2)
    mean = mean[:,:,
                size/2:ishape[0]+size/2,
                size/2:ishape[1]+size/2]
    v = (p - mean)#.dimshuffle('x','x',0,1)
    var = TT.nnet.conv.conv2d(TT.sqr(v),TT.constant(g),
                             None,
                             (1,1,size,size),
                             'full').reshape(
                                  (x.shape[0],1)+(ishape[0]+size-1,)*2)
    var = var[:,:,
              size/2:ishape[0]+size/2,
              size/2:ishape[1]+size/2]
    std = TT.sqrt(var)
    std_mean = TT.mean(TT.mean(std,axis=3),axis=2).dimshuffle(0,1,'x','x')
    out = v / TT.maximum(std,std_mean)
    return (out + 2.5 )/5# - out.min()
开发者ID:cc13ny,项目名称:galatea,代码行数:34,代码来源:utils.py


示例9: _allocate

    def _allocate(self):
        input_dim = ((self.input_dim,)
                     if not isinstance(self.input_dim, collections.Sequence)
                     else self.input_dim)
        broadcastable = (tuple(False for _ in input_dim)
                         if self.broadcastable is None else self.broadcastable)
        if len(input_dim) != len(broadcastable):
            raise ValueError("input_dim and broadcastable must be same length")
        var_dim = tuple(1 if broadcast else dim for dim, broadcast in
                        equizip(input_dim, broadcastable))
        broadcastable = broadcastable

        # "beta", from the Ioffe & Szegedy manuscript.
        if self.learn_shift:
            self.shift = shared_floatx_nans(var_dim, name='batch_norm_shift',
                                            broadcastable=broadcastable)
            add_role(self.shift, BATCH_NORM_SHIFT_PARAMETER)
            self.parameters.append(self.shift)
        else:
            self.shift = tensor.constant(0, dtype=theano.config.floatX)

        if self.learn_scale and not self.mean_only:
            # "gamma", from the Ioffe & Szegedy manuscript.
            self.scale = shared_floatx_nans(var_dim, name='batch_norm_scale',
                                            broadcastable=broadcastable)

            add_role(self.scale, BATCH_NORM_SCALE_PARAMETER)
            self.parameters.append(self.scale)
        else:
            self.scale = tensor.constant(1., dtype=theano.config.floatX)

        self._allocate_population_statistics(var_dim, broadcastable)
开发者ID:Excalibur269,项目名称:blocks,代码行数:32,代码来源:bn.py


示例10: get_cost_updates

    def get_cost_updates(self, corruption_level, learning_rate):
        """ This function computes the cost and the updates for one trainng
        step of the dA """

        # this is how if-then-else is written in Theano
        tilde_x = T.switch(T.gt(corruption_level, 0), self.get_corrupted_input(self.x, corruption_level), self.x)
        y = self.get_hidden_values(tilde_x)
        z = self.get_reconstructed_input(y)
        act = T.dot(tilde_x, self.W) + self.b
        # note : we sum over the size of a datapoint; if we are using
        #        minibatches, L will be a vector, with one entry per
        #        example in minibatch
        # L = - T.sum(self.x * T.log(z) + (1 - self.x) * T.log(1 - z), axis=1)
        # note : L is now a vector, where each element is the
        #        cross-entropy cost of the reconstruction of the
        #        corresponding example of the minibatch. We need to
        #        compute the average of all these to get the cost of
        #        the minibatch
        
        L = T.sqrt(T.sum(T.sqr(T.sub(self.x, z)), axis=1))
        reg = T.sum(y, axis=0) / T.shape(y)[0] # sum over training set
        rho = T.constant(0.05)
        beta = T.constant(self.beta)
        reg1 = T.sum(rho * T.log(rho / reg) + (1-rho) * T.log((1-rho) / (1-reg)))
        cost = T.mean(L) + beta * reg1

        # compute the gradients of the cost of the `dA` with respect
        # to its parameters
        gparams = T.grad(cost, self.params)
        # generate the list of updates
        updates = {}
        for param, gparam in zip(self.params, gparams):
            updates[param] = param - learning_rate * gparam
        
        return (cost, collections.OrderedDict(updates.items()))
开发者ID:nglazyrin,项目名称:chordest-py,代码行数:35,代码来源:dA_modified.py


示例11: __init__

    def __init__(self, name, data, distribution, model):
        """
        Parameters
        ----------

        type : theano type (optional)
        owner : theano owner (optional)

        name : str
        distribution : Distribution
        model : Model
        """
        self.name = name
        data = getattr(data, 'values', data) #handle pandas
        args = as_iterargs(data)

        if len(args) > 1:
            params = getargspec(distribution.logp).args
            args = [t.constant(d, name=name + "_" + param)
                    for d,param in zip(args,params) ]
        else:
            args = [t.constant(args[0], name=name)]

        self.logp_elemwiset = distribution.logp(*args)
        self.model = model
开发者ID:DrDark,项目名称:pymc,代码行数:25,代码来源:model.py


示例12: __init__

 def __init__(self, img_height, img_width, obj_type='circle', obj_scale=0.2):
     """
     A class for drawing a few simple objects with subpixel resolution.
     """
     self.img_height = img_height
     self.img_width = img_width
     self.obj_type = obj_type
     self.obj_scale = obj_scale
     # make coordinate system for points in the object to render
     obj_x_coords, obj_y_coords = self._construct_obj_coords( \
                 obj_type=self.obj_type, obj_scale=self.obj_scale)
     self.obj_x = T.constant(obj_x_coords)
     self.obj_y = T.constant(obj_y_coords)
     self.obj_x_range = [np.min(obj_x_coords), np.max(obj_x_coords)]
     self.obj_y_range = [np.min(obj_y_coords), np.max(obj_y_coords)]
     # make coordinate system for x and y location in the image.
     #   -- image coordinates for the smallest dimension range over
     #      [-init_scale....init_scale], and coordinates for the largest
     #      dimension are at the same scale, but over a larger range.
     img_x_coords, img_y_coords = self._construct_img_coords( \
                 x_dim=self.img_width, y_dim=self.img_height)
     self.img_x = T.constant(img_x_coords)
     self.img_y = T.constant(img_y_coords)
     self.img_x_range = [np.min(img_x_coords), np.max(img_x_coords)]
     self.img_y_range = [np.min(img_y_coords), np.max(img_y_coords)]
     return
开发者ID:Philip-Bachman,项目名称:Sequential-Generation,代码行数:26,代码来源:MotionRenderers.py


示例13: test_transform_thin_plate_spline_shift

 def test_transform_thin_plate_spline_shift(self):
     from lasagne.layers import InputLayer, TPSTransformerLayer
     from theano.tensor import constant
     batchsize = 5
     num_control_points = 16
     dest_offset = np.ones(shape=(batchsize, 2*num_control_points))
     l_in = InputLayer((batchsize, 3, 28, 28))
     l_loc = InputLayer((batchsize, 2*num_control_points))
     layer = TPSTransformerLayer(
             l_in, l_loc, control_points=num_control_points
     )
     image = np.zeros(shape=(28, 28))
     image[[0, -1], :] = 1
     image[:, [0, -1]] = 1
     inputs = np.tile(image, (batchsize, 3, 1, 1))
     shifted_input = np.ones(shape=(28, 28))
     shifted_input[:13, :13] = 0
     shifted_input[13, :13] = 0.50000271
     shifted_input[:13, 13] = 0.50000271
     shifted_input[13, 13] = 0.75000271
     shifted_input = np.tile(shifted_input, (batchsize, 3, 1, 1))
     outputs = layer.get_output_for([constant(inputs),
                                     constant(dest_offset)]).eval()
     np.testing.assert_allclose(shifted_input,
                                outputs, atol=1e-5)
开发者ID:hjweide,项目名称:Lasagne,代码行数:25,代码来源:test_special.py


示例14: lcn_std_diff

def lcn_std_diff(x,size=9):
    # Function borrowed from bengioe_util
    p = x.reshape((1,1,48,48))
    #p = (p-TT.mean(p))/T.std(p)
    g = gaussian(size,1.591/size)
    g/=g.sum()
    g = numpy.float32(g.reshape((1,1,size,size)))
    mean = TT.nnet.conv.conv2d(p,TT.constant(g),
                              (1,1,48,48),
                              (1,1,size,size),
                              'full').reshape((48+size-1,)*2)
    mean = mean[size/2:48+size/2,
                size/2:48+size/2]
    meansq = TT.nnet.conv.conv2d(TT.sqr(p),TT.constant(g),
                                (1,1,48,48),
                                (1,1,size,size),
                                'full').reshape((48+size-1,)*2)
    meansq = meansq[size/2:48+size/2,
                    size/2:48+size/2]
    var = meansq - TT.sqr(mean)
    var = TT.clip(var, 0, 1e30)
    std = TT.sqrt(var)
    std = TT.clip(std, TT.mean(std), 1e30)
    out = (p - mean) / std
    return out - out.min()
开发者ID:cc13ny,项目名称:galatea,代码行数:25,代码来源:utils.py


示例15: __init__

    def __init__(self, input_dim, N, init_scale=2.0):
        """
        A zoomable attention window for 1-dimensional inputs.

        Parameters
        ----------
        input_dim : int
            length of the input vectors
        N :
            length of the attention window
        init_scale :
            initial scaling for inputs vs. attention window
        """
        self.input_dim = input_dim
        self.N = N
        self.init_scale = init_scale
        # make offsets for internal dispersement of grid points.
        #   -- internal grid coordinates range over [-1...+1]
        offsets = np.arange(N) - (N / 2.0) + 0.5
        offsets = offsets / np.max(offsets)
        offsets = offsets.astype(theano.config.floatX)
        self.grid_offsets = T.constant(offsets)
        # make coordinate vectors for location in the input.
        #   -- coordinates for the smallest dimension are scaled to range over
        #      [-init_scale....init_scale].
        x_coords = (np.arange(input_dim) - (input_dim / 2.0) + 0.5)
        x_coords = (init_scale / np.max(x_coords)) * x_coords
        x_coords = x_coords.astype(theano.config.floatX)
        self.x_coords = T.constant(x_coords)
        return
开发者ID:capybaralet,项目名称:Sequential-Generation,代码行数:30,代码来源:BlocksAttention.py


示例16: test_transform_thin_plate_spline_variable_input

    def test_transform_thin_plate_spline_variable_input(self):
        import lasagne
        from lasagne.utils import floatX
        from theano.tensor import constant

        x = np.random.random((10, 3, 28, 28)).astype('float32')
        x_sym = theano.tensor.tensor4()

        l_in = lasagne.layers.InputLayer((None, 3, None, 28))
        l_loc = lasagne.layers.DenseLayer(
                lasagne.layers.ReshapeLayer(l_in, ([0], 3*28*28)),
                num_units=32)
        l_trans = lasagne.layers.TPSTransformerLayer(
                l_in, l_loc, precompute_grid='auto')

        # check that shape propagation works
        assert l_trans.output_shape[0] is None
        assert l_trans.output_shape[1] == 3
        assert l_trans.output_shape[2] is None
        assert l_trans.output_shape[3] == 28

        # check that data propagation works
        dest_offset = np.zeros(shape=(10, 32))
        inputs = floatX(np.arange(np.prod(x.shape)).reshape(x.shape))
        outputs = l_trans.get_output_for([constant(inputs),
                                          constant(dest_offset)]).eval()
        np.testing.assert_allclose(inputs, outputs, atol=5e-4)
开发者ID:hjweide,项目名称:Lasagne,代码行数:27,代码来源:test_special.py


示例17: test_mixture_api

def test_mixture_api():
    # Check basic API
    p1 = Normal(mu=0.0, sigma=T.constant(1.0))
    p2 = Normal(mu=1.0, sigma=2.0)
    m = Mixture(components=[p1, p2], weights=[0.25])

    assert len(m.components) == 2
    assert len(m.weights) == 2

    assert len(m.parameters_) == 4
    assert len(m.constants_) == 1
    assert len(m.observeds_) == 0

    assert p1.mu in m.parameters_
    assert p1.sigma in m.constants_
    assert p2.mu in m.parameters_
    assert p2.sigma in m.parameters_
    assert m.X == p1.X
    assert m.X == p2.X
    assert m.ndim == p1.ndim
    assert m.ndim == p2.ndim

    m = Mixture(components=[p1, p2])
    w = m.compute_weights()
    assert_array_equal(w, [0.5, 0.5])

    y = T.dscalar(name="y")
    w1 = T.constant(0.25)
    w2 = y * 2
    m = Mixture(components=[p1, p2], weights=[w1, w2])
    assert y in m.observeds_

    # Check errors
    assert_raises(ValueError, Mixture,
                  components=[p1, p1, p1], weights=[1.0])
开发者ID:ibab,项目名称:carl,代码行数:35,代码来源:test_mixture.py


示例18: test_alloc_memset_0

def test_alloc_memset_0():
    i = tensor.iscalar()
    z = numpy.zeros((1,), dtype='float32')
    o = numpy.ones((1,), dtype='float32')
    ones = numpy.ones((2,), dtype='float32')

    # Test with 0
    a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(z)), i)
    f = theano.function([i], a, mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert len(topo) == 1
    assert isinstance(topo[0].op, basic_ops.GpuAlloc) and topo[0].op.memset_0
    assert (numpy.asarray(f(6)) == 0).all()

    # Test with 1
    a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(o)), i)
    f = theano.function([i], a, mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert len(topo) == 1
    assert isinstance(topo[0].op, basic_ops.GpuAlloc)
    assert not topo[0].op.memset_0
    assert (numpy.asarray(f(6)) == 1).all()

    # Test with 1, 1
    a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(ones)), i)
    f = theano.function([i], a, mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert len(topo) == 1
    assert isinstance(topo[0].op, basic_ops.GpuAlloc)
    assert not topo[0].op.memset_0
    assert (numpy.asarray(f(2)) == 1).all()
开发者ID:Abioy,项目名称:Theano,代码行数:31,代码来源:test_opt.py


示例19: generate_forward_diffusion_sample

    def generate_forward_diffusion_sample(self, X_noiseless):
        """
        Corrupt a training image with t steps worth of Gaussian noise, and
        return the corrupted image, as well as the mean and covariance of the
        posterior q(x^{t-1}|x^t, x^0).
        """

        X_noiseless = X_noiseless.reshape(
            (-1, self.n_colors, self.spatial_width, self.spatial_width))

        n_images = X_noiseless.shape[0].astype('int16')
        rng = Random().theano_rng
        # choose a timestep in [1, self.trajectory_length-1].
        # note the reverse process is fixed for the very
        # first timestep, so we skip it.
        # TODO for some reason random_integer is missing from the Blocks
        # theano random number generator.
        t = T.floor(rng.uniform(size=(1,1), low=1, high=self.trajectory_length,
            dtype=theano.config.floatX))
        t_weights = self.get_t_weights(t)
        N = rng.normal(size=(n_images, self.n_colors, self.spatial_width, self.spatial_width),
            dtype=theano.config.floatX)

        # noise added this time step
        beta_forward = self.get_beta_forward(t)
        # decay in noise variance due to original signal this step
        alpha_forward = 1. - beta_forward
        # compute total decay in the fraction of the variance due to X_noiseless
        alpha_arr = 1. - self.beta_arr
        alpha_cum_forward_arr = T.extra_ops.cumprod(alpha_arr).reshape((self.trajectory_length,1))
        alpha_cum_forward = T.dot(t_weights.T, alpha_cum_forward_arr)
        # total fraction of the variance due to noise being mixed in
        beta_cumulative = 1. - alpha_cum_forward
        # total fraction of the variance due to noise being mixed in one step ago
        beta_cumulative_prior_step = 1. - alpha_cum_forward/alpha_forward

        # generate the corrupted training data
        X_uniformnoise = X_noiseless + (rng.uniform(size=(n_images, self.n_colors, self.spatial_width, self.spatial_width),
            dtype=theano.config.floatX)-T.constant(0.5,dtype=theano.config.floatX))*T.constant(self.uniform_noise,dtype=theano.config.floatX)
        X_noisy = X_uniformnoise*T.sqrt(alpha_cum_forward) + N*T.sqrt(1. - alpha_cum_forward)

        # compute the mean and covariance of the posterior distribution
        mu1_scl = T.sqrt(alpha_cum_forward / alpha_forward)
        mu2_scl = 1. / T.sqrt(alpha_forward)
        cov1 = 1. - alpha_cum_forward/alpha_forward
        cov2 = beta_forward / alpha_forward
        lam = 1./cov1 + 1./cov2
        mu = (
                X_uniformnoise * mu1_scl / cov1 +
                X_noisy * mu2_scl / cov2
            ) / lam
        sigma = T.sqrt(1./lam)
        sigma = sigma.reshape((1,1,1,1))

        mu.name = 'mu q posterior'
        sigma.name = 'sigma q posterior'
        X_noisy.name = 'X_noisy'
        t.name = 't'

        return X_noisy, t, mu, sigma
开发者ID:Sohl-Dickstein,项目名称:Diffusion-Probabilistic-Models,代码行数:60,代码来源:model.py


示例20: test_draw_value

def test_draw_value():
    npt.assert_equal(_draw_value(np.array([5, 6])), [5, 6])
    npt.assert_equal(_draw_value(np.array(5.)), 5)

    npt.assert_equal(_draw_value(tt.constant([5., 6.])), [5, 6])
    assert _draw_value(tt.constant(5)) == 5
    npt.assert_equal(_draw_value(2 * tt.constant([5., 6.])), [10, 12])

    val = theano.shared(np.array([5., 6.]))
    npt.assert_equal(_draw_value(val), [5, 6])
    npt.assert_equal(_draw_value(2 * val), [10, 12])

    a = tt.scalar('a')
    a.tag.test_value = 6
    npt.assert_equal(_draw_value(2 * a, givens=[(a, 1)]), 2)

    assert _draw_value(5) == 5
    assert _draw_value(5.) == 5
    assert isinstance(_draw_value(5.), type(5.))
    assert isinstance(_draw_value(5), type(5))

    with pm.Model():
        mu = 2 * tt.constant(np.array([5., 6.])) + theano.shared(np.array(5))
        a = pm.Normal('a', mu=mu, sd=5, shape=2)

    val1 = _draw_value(a)
    val2 = _draw_value(a)
    assert np.all(val1 != val2)

    with pytest.raises(ValueError) as err:
        _draw_value([])
    err.match('Unexpected type')
开发者ID:alexander-belikov,项目名称:pymc3,代码行数:32,代码来源:test_random.py



注:本文中的theano.tensor.constant函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.cos函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.concatenate函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap