• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python theano._asarray函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano._asarray函数的典型用法代码示例。如果您正苦于以下问题:Python _asarray函数的具体用法?Python _asarray怎么用?Python _asarray使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了_asarray函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_setitem_matrix_bad_ndim

def test_setitem_matrix_bad_ndim():
    a = numpy.arange(27)
    a.resize((3, 3, 3))
    a = theano._asarray(a, dtype='float32')
    _a = cuda_ndarray.CudaNdarray(a)

    b = theano._asarray([7, 8], dtype='float32')
    _b = cuda_ndarray.CudaNdarray(b)

    try:
        # attempt to assign the ndarray b with setitem
        _a[:, :, 1] = _b
        assert False
    except ValueError as e:
        # print e
        assert True

    # test direct transfert from numpy
    try:
        # attempt to assign the ndarray b with setitem
        _a[1, :, :] = b
        assert False
    except ValueError as e:
        # print e
        assert True
开发者ID:5730279821-TA,项目名称:Theano,代码行数:25,代码来源:test_cuda_ndarray.py


示例2: set_input_space

    def set_input_space(self, space):
	self.input_space = space
	
	if isinstance(space, VectorSpace):
	    self.requires_reformat = False
	    self.input_dim = space.dim
	else:
	    self.requires_reformat = True
	    self.input_dim = space.get_total_dimension()
	    self.desired_space = VectorSpace(self.input_dim)

	self.output_space = VectorSpace(self.dim)
	
	# we cannot set this in init() as we're not sure about input dimesnions yet
	if self.istdev is not None:
	    W = self.rng.randn(self.input_dim, self.dim) * self.istdev
	    b = self.rng.randn(self.dim,) * self.istdev
	else:
	    W = np.zeros((self.input_dim, self.dim))
	    b = np.zeros((self.dim,)) * self.istdev
	    
	self.W = theano.shared(theano._asarray(W,
	                        dtype=theano.config.floatX),
	                        name=(self.layer_name+'_W'))
	
	self.b = theano.shared(theano._asarray(b,
	                        dtype=theano.config.floatX),
	                        name=(self.layer_name + '_b'))		
开发者ID:mlisicki,项目名称:pylearn2,代码行数:28,代码来源:mike.py


示例3: __init__

    def __init__(self, input, n_in, n_out, activation, rng=RandomState(1234), 
        layer_name="HiddenLayer", W=None, b=None, borrow=True):

        if W!=None: self.W = shared(value=W, borrow=borrow, name=layer_name+'_W')
        elif activation in (relu,softplus): 
            W_val = _asarray(rng.normal(loc=0, scale=0.01, 
                size=(n_in, n_out)), dtype=floatX)
            self.W = shared(W_val, name=layer_name+"_W", borrow=borrow)    
        else: 
            # uniformly sampled W
            low = -sqrt(6. / (n_in + n_out))
            high = sqrt(6. / (n_in + n_out))
            values = rng.uniform(low=low, high=high, size=(n_in, n_out))
            W_val = _asarray(values, dtype=floatX)
            if activation == sigmoid: W_val *= 4
            self.W = shared(value=W_val, borrow=borrow, name=layer_name+'_W')
            

        if b != None: self.b = shared(b, name=layer_name+"_b", borrow=borrow)
        elif activation in (relu,softplus): 
            b_val = ones((n_out,), dtype=floatX)
            self.b = shared(value=b_val, borrow=True)
        else: 
            # Initialize b with zeros
            self.b = shared(value=zeros((n_out,), dtype=floatX), borrow=True)

        # Parameters of the model
        self.params = [self.W, self.b]
        # Output of the hidden layer
        self.output = activation(T.dot(input, self.W) + self.b)
开发者ID:rodion-zheludkov,项目名称:kaggle,代码行数:30,代码来源:mlp.py


示例4: gemm_directly

def gemm_directly(bs, ch, nf, rImg1, rImg2, rFlt1, rFlt2, subsx, subsy,
                  direction):
    ishape = (bs, ch, rImg1, rImg2)
    kshape = (nf, ch, rFlt1, rFlt2)
    subsample = (subsx, subsy)

    npy_img = theano._asarray(numpy.random.rand(*ishape), dtype='float32')
    npy_kern = theano._asarray(numpy.random.rand(*kshape), dtype='float32')

    i = cuda_tensor4()
    k = cuda_tensor4()

    if direction == 'fprop':
        cpuval = py_conv(npy_img, npy_kern, 'valid', subsample)
        op = theano.sandbox.cuda.blas.GpuCorrMM(border_mode='valid',
                                                subsample=subsample)(i, k)
        f = theano.function([i, k], op, mode=theano_mode)
        gpuval = f(npy_img, npy_kern[:,:,::-1,::-1])
    elif direction == 'bprop img':
        cpuval = py_conv(npy_img, npy_kern, 'full', subsample)
        op = theano.sandbox.cuda.blas.GpuCorrMM_gradInputs(
            border_mode='valid', subsample=subsample)(i, k)
        f = theano.function([i, k], op, mode=theano_mode)
        gpuval = f(npy_kern.transpose(1, 0, 2, 3), npy_img)
    elif direction == 'bprop kern':
        cpuval = py_conv(npy_img, npy_kern, 'valid', subsample)
        op = theano.sandbox.cuda.blas.GpuCorrMM_gradWeights(
            border_mode='valid', subsample=subsample)(i, k)
        f = theano.function([i, k], op, mode=theano_mode)
        gpuval = numpy.array(f(
                npy_img.transpose(1, 0, 2, 3),
                npy_kern.transpose(1, 0, 2, 3)[:,:,::-1,::-1])).transpose(
            1, 0, 2, 3)

    assert_allclose(cpuval, gpuval, rtol=1e-4)
开发者ID:alouisos,项目名称:Theano,代码行数:35,代码来源:test_conv_cuda_ndarray.py


示例5: test_elemwise2

def test_elemwise2():
    """ Several kinds of elemwise expressions with dimension permutations """
    rng = numpy.random.RandomState(int(time.time()))
    shape = (3, 5)
    for pattern in [(0, 1), (1, 0)]:
        a = tcn.shared_constructor(theano._asarray(rng.rand(*shape),
                                                   dtype='float32'), name=None)
        b = tensor.Tensor(dtype='float32', broadcastable=[0] * len(shape))()
        f = pfunc([b], [], updates=[(a, (a + b).dimshuffle(pattern))],
                  mode=mode_with_gpu)
        has_elemwise = False
        for i, node in enumerate(f.maker.env.toposort()):
            has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)
        assert not has_elemwise
        #let debugmode catch errors
        f(theano._asarray(rng.rand(*shape), dtype='float32') * .3)

    shape = (3, 4, 5, 6)
    a = tcn.shared_constructor(theano._asarray(rng.rand(*shape),
                                               dtype='float32'), 'a')
    b = tensor.Tensor(dtype='float32', broadcastable=[0] * len(shape))()
    f = pfunc([b], [], updates=[(a, (a + b).dimshuffle([2, 0, 3, 1]) *
        tensor.exp(b ** a).dimshuffle([2, 0, 3, 1]))], mode=mode_with_gpu)
    has_elemwise = False
    for i, node in enumerate(f.maker.env.toposort()):
        has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)
    assert not has_elemwise
    #let debugmode catch errors
    f(theano._asarray(rng.rand(*shape), dtype='float32'))
开发者ID:gexarcha,项目名称:Theano,代码行数:29,代码来源:test_basic_ops.py


示例6: get_updates

    def get_updates(self, grads):
        grads = OrderedDict(grads)
        updates = OrderedDict()

        for param in grads.keys():
            # mean_squared_grad := E[g^2]_{t-1}
            mean_square_grad = theano.shared(theano._asarray(param.get_value() * 0., dtype=theano.config.floatX), name='mean_square_grad_' + param.name, borrow=False)
            self.parameters.append(mean_square_grad)
            # mean_square_dx := E[(\Delta x)^2]_{t-1}
            mean_square_dx = theano.shared(theano._asarray(param.get_value() * 0., dtype=theano.config.floatX), name='mean_square_dx_' + param.name, borrow=False)
            self.parameters.append(mean_square_dx)

            # Accumulate gradient
            new_mean_squared_grad = self.decay * mean_square_grad + (1 - self.decay) * T.sqr(grads[param])

            # Compute update
            rms_dx_tm1 = T.sqrt(mean_square_dx + self.epsilon)
            rms_grad_t = T.sqrt(new_mean_squared_grad + self.epsilon)
            delta_x_t = - rms_dx_tm1 / rms_grad_t * grads[param]

            # Accumulate updates
            new_mean_square_dx = self.decay * mean_square_dx + (1 - self.decay) * T.sqr(delta_x_t)

            # Apply update
            updates[mean_square_grad] = new_mean_squared_grad
            updates[mean_square_dx] = new_mean_square_dx
            updates[param] = param + delta_x_t

        return updates
开发者ID:MarcCote,项目名称:TheanoNADE,代码行数:29,代码来源:momentums.py


示例7: test_setitem_rightvalue_ndarray_fails

def test_setitem_rightvalue_ndarray_fails():
    """
    Now we don't automatically add dimensions to broadcast
    """
    a = numpy.arange(3 * 4 * 5)
    a.resize((3, 4, 5))
    a = theano._asarray(a, dtype='float32')
    _a = cuda_ndarray.CudaNdarray(a)

    b = theano._asarray([7, 8, 9, 10], dtype='float32')
    _b = cuda_ndarray.CudaNdarray(b)
    b5 = theano._asarray([7, 8, 9, 10, 11], dtype='float32')
    _b5 = cuda_ndarray.CudaNdarray(b)

    # attempt to assign the ndarray b with setitem
    _a[:, :, 1] = _b
    a[:, :, 1] = b
    assert numpy.allclose(numpy.asarray(_a), a)

    #test direct transfert from numpy to contiguous region
    # attempt to assign the ndarray b with setitem
    # same number of dim
    mat = numpy.random.rand(4, 5).astype('float32')
    _a[2, :, :] = mat
    a[2, :, :] = mat
    assert numpy.allclose(numpy.asarray(_a), a)

    # without same number of dim
    try:
        _a[0, :, :] = mat
        #a[0, :, :] = mat
        #assert numpy.allclose(numpy.asarray(_a), a)
    except ValueError, e:
        pass
开发者ID:317070,项目名称:Theano,代码行数:34,代码来源:test_cuda_ndarray.py


示例8: test_elemwise1

def test_elemwise1():
    """ Several kinds of elemwise expressions with no broadcasting,
    non power-of-two shape """

    shape = (3, 4)
    a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),
                                               dtype='float32') + 0.5, 'a')
    b = tensor.fmatrix()

    #let debugmode catch any mistakes
    print >> sys.stdout, "STARTING FUNCTION 1"
    f = pfunc([b], [], updates=[(a, b ** a)], mode=mode_with_gpu)
    for i, node in enumerate(f.maker.env.toposort()):
        print i, node
    f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)

    print >> sys.stdout, "STARTING FUNCTION 2"
    #let debugmode catch any mistakes
    f = pfunc([b], [], updates=[(a, tensor.exp(b ** a))], mode=mode_with_gpu)
    for i, node in enumerate(f.maker.env.toposort()):
        print i, node
    f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)

    print >> sys.stdout, "STARTING FUNCTION 3"
    #let debugmode catch any mistakes
    f = pfunc([b], [], updates=[(a, a + b * tensor.exp(b ** a))],
              mode=mode_with_gpu)
    f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)
开发者ID:fivejjs,项目名称:Theano,代码行数:28,代码来源:test_basic_ops.py


示例9: test_setitem_matrix_tensor3

def test_setitem_matrix_tensor3():
    a = numpy.arange(27)
    a.resize((3,3,3))
    a = theano._asarray(a, dtype='float32')
    _a = cuda_ndarray.CudaNdarray(a)

    b = theano._asarray([7,8,9], dtype='float32')
    _b = cuda_ndarray.CudaNdarray(b)

    # set middle row through cube to 7,8,9
    _a[:,1,1] = _b

    a[:,1,1] = b
    assert numpy.allclose(a,numpy.asarray(_a))

    #test direct transfert from numpy
    try:
        _a[:,1,1] = b*100
        a[:,1,1] = b*100
        raise Exception("CudaNdarray.__setitem__ should have returned an error")
        assert numpy.allclose(a,numpy.asarray(_a))
    except NotImplementedError:
        pass

    row = theano._asarray([777,888,999], dtype='float32')
    _a[1,1,:] = row
    a[1,1,:] = row
    assert numpy.allclose(a,numpy.asarray(_a))
开发者ID:HaniAlmousli,项目名称:Theano,代码行数:28,代码来源:test_cuda_ndarray.py


示例10: test_sum

def test_sum():
    shape = (2,3)
    a0 = theano._asarray(numpy.arange(shape[0]*shape[1]).reshape(shape), dtype='float32')

    b0 = cuda_ndarray.CudaNdarray(a0)

    assert numpy.allclose(a0.sum(), numpy.asarray(b0.reduce_sum([1,1])))

    a0sum = a0.sum(axis=0)
    b0sum = b0.reduce_sum([1,0])

    print 'asum\n',a0sum
    print 'bsum\n',numpy.asarray(b0sum)

    assert numpy.allclose(a0.sum(axis=0), numpy.asarray(b0.reduce_sum([1,0])))
    assert numpy.allclose(a0.sum(axis=1), numpy.asarray(b0.reduce_sum([0,1])))
    assert numpy.allclose(a0, numpy.asarray(b0.reduce_sum([0,0])))

    shape = (3,4,5,6,7,8)
    a0 = theano._asarray(numpy.arange(3*4*5*6*7*8).reshape(shape), dtype='float32')
    b0 = cuda_ndarray.CudaNdarray(a0)
    assert numpy.allclose(a0.sum(axis=5).sum(axis=3).sum(axis=0), numpy.asarray(b0.reduce_sum([1,0,0,1,0,1])))

    shape = (16,2048)
    a0 = theano._asarray(numpy.arange(16*2048).reshape(shape), dtype='float32')
    b0 = cuda_ndarray.CudaNdarray(a0)
    assert numpy.allclose(a0.sum(axis=0), numpy.asarray(b0.reduce_sum([1,0])))

    shape = (16,10)
    a0 = theano._asarray(numpy.arange(160).reshape(shape), dtype='float32')
    b0 = cuda_ndarray.CudaNdarray(a0)
    assert numpy.allclose(a0.sum(), numpy.asarray(b0.reduce_sum([1,1])))
开发者ID:HaniAlmousli,项目名称:Theano,代码行数:32,代码来源:test_cuda_ndarray.py


示例11: subtest

    def subtest(shape_1, shape_2, rng):
        #print >> sys.stdout, "INFO: shapes", shape_1, shape_2
        a = theano._asarray(rng.randn(*shape_1), dtype='float32')
        b = cuda_ndarray.CudaNdarray(a)

        aa = a.reshape(shape_2)
        bb = b.reshape(shape_2)

        n_bb = numpy.asarray(bb)

        # print n_bb

        assert numpy.all(aa == n_bb)
        assert aa.shape == n_bb.shape

        # Test the not contiguous case
        shape_1_2x = (shape_1[0] * 2,) + shape_1[1:]
        a = theano._asarray(rng.randn(*shape_1_2x), dtype='float32')
        b = cuda_ndarray.CudaNdarray(a)
        a = a[::2]
        b = b[::2]

        aa = a.reshape(shape_2)
        bb = b.reshape(shape_2)

        n_bb = numpy.asarray(bb)

        # print n_bb

        assert numpy.all(aa == n_bb)
        assert aa.shape == n_bb.shape
开发者ID:5730279821-TA,项目名称:Theano,代码行数:31,代码来源:test_cuda_ndarray.py


示例12: new_filters_expbounds

    def new_filters_expbounds(
        cls, rng, input, n_in, n_out, n_terms, dtype=None, eps=1e-1, exponent_range=(1.0, 3.0), filter_range=1.0
    ):
        """Return a KouhLayer instance with random parameters

        The parameters are drawn on a range [typically] suitable for fine-tuning by gradient
        descent.


        :param input: a tensor of shape (n_examples, n_in)

        :type n_in: positive int
        :param n_in: number of input dimensions

        :type n_out: positive int
        :param n_out: number of dimensions in rval.output

        :param nterms: each (of n_out) complex-cell firing rate will be determined from this
        many 'simple cell' responses.

        :param eps: this amount is added to the softplus of filter responses as a baseline
        firing rate (that prevents a subsequent error from ``pow(0, p)``)

        :returns: KouhLayer instance with freshly-allocated random weights.

        """
        if input.type.ndim != 2:
            raise TypeError("matrix expected for input")

        if dtype is None:
            dtype = input.dtype
        _logger.debug("dtype %s" % dtype)

        def shared_uniform(low, high, size, name):
            return _shared_uniform(rng, low, high, size, dtype, name)

        f_list = [
            shared_uniform(
                low=-2.0 / numpy.sqrt(n_in), high=2.0 / numpy.sqrt(n_in), size=(n_in, n_out), name="f_%i" % i
            )
            for i in xrange(n_terms)
        ]

        b_list = [shared_uniform(low=0, high=0.01, size=(n_out,), name="b_%i" % i) for i in xrange(n_terms)]
        # x_list = [theano._asarray(eps, dtype=dtype)+softplus(tensor.dot(input, f_list[i])) for i in xrange(n_terms)]
        filter_range = theano._asarray(filter_range, dtype=dtype)
        half_filter_range = theano._asarray(filter_range / 2, dtype=dtype)
        x_list = [
            theano._asarray(filter_range + eps, dtype=dtype)
            + half_filter_range * softsign(tensor.dot(input, f_list[i]) + b_list[i])
            for i in xrange(n_terms)
        ]

        rval = cls.new_expbounds(rng, x_list, n_out, dtype=dtype, params=f_list + b_list, exponent_range=exponent_range)
        rval.f_list = f_list
        rval.input = input  # add the input to the returned object
        rval.filter_l1 = sum(abs(fi).sum() for fi in f_list)
        rval.filter_l2_sqr = sum((fi ** 2).sum() for fi in f_list)
        return rval
开发者ID:huamichaelchen,项目名称:Theano,代码行数:59,代码来源:test_bench_loopfusion.py


示例13: test_invalid_arg

 def test_invalid_arg(self):
     img = theano._asarray(numpy.empty((1, 1, 1, 1)), dtype='float32')
     kern = theano._asarray(numpy.empty((1, 1, 1, 1)), dtype='float32')
     for i in self.conv_ops:
         assert_raises(ValueError, i, img, kern,
                           border_mode=(-1, 0))
         assert_raises(ValueError, i, img, kern,
                           border_mode=(0, -1))
         assert_raises(ValueError, i, img, kern,
                           border_mode='not border')
开发者ID:tjadamlee,项目名称:Theano,代码行数:10,代码来源:test_conv_cuda_ndarray.py


示例14: sharedX

def sharedX(value, name=None, borrow=True, keep_on_cpu=False):
    """ Transform value into a shared variable of type floatX """
    if keep_on_cpu:
        return T._shared(theano._asarray(value, dtype=theano.config.floatX),
                         name=name,
                         borrow=borrow)

    return theano.shared(theano._asarray(value, dtype=theano.config.floatX),
                         name=name,
                         borrow=borrow)
开发者ID:ASalvail,项目名称:smartlearner,代码行数:10,代码来源:utils.py


示例15: learning_rates_setup

    def learning_rates_setup(self, base_lr, **kwargs):
        """
        Initializes parameter-specific learning rate dictionary and shared
        variables for the annealed base learning rate and iteration number.

        Parameters
        ----------
        base_lr : float
            The base learning rate before annealing or parameter-specific
            scaling.

        Notes
        -----
        Parameter-specific learning rates can be set by passing keyword
        arguments <name>_lr, where name is the .name attribute of a given
        parameter.
        """
        # Take care of learning rate scales for individual parameters
        self.learning_rates = {}
        # Base learning rate per example.
        self.base_lr = theano._asarray(base_lr, dtype=floatX)

        # Keep track of names already seen
        lr_names_seen = set()
        for parameter in self.params:
            lr_name = '%s_lr' % parameter.name
            if lr_name in lr_names_seen:
                print >> sys.stderr, ('Warning: In SGDOptimizer, '
                        'at least two parameters have the same name. '
                        'Both will be affected by the keyword argument '
                        '%s.' % lr_name)
            lr_names_seen.add(parameter.name)

            thislr = kwargs.get(lr_name, 1.)
            self.learning_rates[parameter] = sharedX(thislr, lr_name)

        # Verify that no ..._lr keyword argument is ignored
        for lr_name in lr_names_seen:
            if lr_name in kwargs:
                kwargs.pop(lr_name)
        for kw in kwargs.iterkeys():
            if kw[-3:] == '_lr':
                print >> sys.stderr, ('Warning: in SGDOptimizer, '
                        'keyword argument %s will be ignored, '
                        'because no parameter was found with name %s.'
                        % (kw, kw[:-3]))

        # A shared variable for storing the iteration number.
        self.iteration = sharedX(theano._asarray(0, dtype='int32'),
                                 name='iter')

        # A shared variable for storing the annealed base learning rate, used
        # to lower the learning rate gradually after a certain amount of time.
        self.annealed = sharedX(base_lr, 'annealed')
开发者ID:jaberg,项目名称:pylearn,代码行数:54,代码来源:optimizer.py


示例16: conv_grad

def conv_grad(mode, bs, ch, nf, rImg1, rImg2, rFlt1, rFlt2, subsample, op):
    ishape = (bs, ch, rImg1, rImg2)
    kshape = (nf, ch, rFlt1, rFlt2)

    npy_img = theano._asarray(numpy.random.rand(*ishape), dtype='float32')
    npy_kern = theano._asarray(numpy.random.rand(*kshape), dtype='float32')

    i = cuda.CudaNdarrayType(
        broadcastable=[sh == 1 for sh in npy_img.shape])()
    k = cuda.CudaNdarrayType(
        broadcastable=[sh == 1 for sh in npy_kern.shape])()

    # TODO: also test custom pad values
    corr_op = op(mode, subsample)(i, k)
    # try to compile reference implementation without shape,
    # so we don't have to compile hundreds of versions
    conv_op = tensor.nnet.conv2d(i, k[:, :, ::-1, ::-1],
                                 border_mode=mode, subsample=subsample)
    try:
        conv_op_di = theano.grad(conv_op.sum(), i)
        conv_op_dk = theano.grad(conv_op.sum(), k)
    except Exception:
        # compile with shape information only when needed
        conv_op = tensor.nnet.conv2d(i, k[:, :, ::-1, ::-1],
                                     ishape, kshape, mode, subsample)
    conv_op_di = theano.grad(conv_op.sum(), i)
    conv_op_dk = theano.grad(conv_op.sum(), k)
    corr_op_di = theano.grad(corr_op.sum(), i)
    corr_op_dk = theano.grad(corr_op.sum(), k)
    outputs = [corr_op, conv_op,
               corr_op_di, conv_op_di,
               corr_op_dk, conv_op_dk]
    try:
        conv_op_dik = theano.grad(conv_op_di.sum(), k)
        conv_op_dki = theano.grad(conv_op_dk.sum(), i)
        corr_op_dik = theano.grad(corr_op_di.sum(), k)
        corr_op_dki = theano.grad(corr_op_dk.sum(), i)
        outputs.extend([corr_op_dik, conv_op_dik,
                        corr_op_dki, conv_op_dki])
    except Exception:
        # skip if the reference implementation can't do it
        pass

    f = theano.function([i, k], outputs, mode=theano_mode.excluding('conv_dnn', 'conv_gemm'))

    allvals = f(npy_img, npy_kern)

    for a, b, oa, ob, p in zip(allvals[::2], allvals[1::2],
                               outputs[::2], outputs[1::2],
                               ('top', 'dtop/dbottom', 'dtop/dweight',
                                'dtop/dbottom/dweight', 'dtop/dweight/dbottom')):
        assert oa.type.broadcastable[:2] == ob.type.broadcastable[:2]

        assert_allclose(a, b, rtol=1e-4)
开发者ID:aalmah,项目名称:Theano,代码行数:54,代码来源:test_conv_cuda_ndarray.py


示例17: test_gemm_vector_vector

def test_gemm_vector_vector():
    a = theano._asarray(numpy.random.rand(5,1), dtype='float32')
    _a = cuda_ndarray.CudaNdarray(a)
    b = theano._asarray(numpy.random.rand(1,5), dtype='float32')
    _b = cuda_ndarray.CudaNdarray(b)

    _c = cuda_ndarray.dot(_a,_b)
    assert _c.shape == (5,5)
    assert numpy.allclose(_c, numpy.dot(a, b))

    _c = cuda_ndarray.dot(_b,_a)
    assert _c.shape == (1,1)
    assert numpy.allclose(_c, numpy.dot(b, a))
开发者ID:HaniAlmousli,项目名称:Theano,代码行数:13,代码来源:test_cuda_ndarray.py


示例18: test_dimshuffle

    def test_dimshuffle(self):
        utt.seed_rng()
        rng = numpy.random.RandomState(utt.fetch_seed())

        # 2d -> 0d
        a = theano._asarray(rng.randn(1,1), dtype='float32')
        b = cuda_ndarray.CudaNdarray(a)
        assert numpy.allclose(numpy.transpose(a), cuda_ndarray.dimshuffle(b,()))

        # Test when we drop a axis that don't have shape 1
        a = theano._asarray(rng.randn(2,1), dtype='float32')
        b = cuda_ndarray.CudaNdarray(a)
        self.assertRaises(ValueError, cuda_ndarray.dimshuffle, b,())

        # Test that we can't take a dimensions multiple time
        a = theano._asarray(rng.randn(2,1), dtype='float32')
        b = cuda_ndarray.CudaNdarray(a)
        self.assertRaises(ValueError, cuda_ndarray.dimshuffle, b,(1,1))

        # 1d
        a = theano._asarray(rng.randn(3,), dtype='float32')
        b = cuda_ndarray.CudaNdarray(a)
        assert numpy.allclose(numpy.transpose(a), cuda_ndarray.dimshuffle(b,(0,)))
        assert numpy.allclose(a[None,:,None], cuda_ndarray.dimshuffle(b,(-1,0,-1)))

        # 2d
        a = theano._asarray(rng.randn(3,11), dtype='float32')
        b = cuda_ndarray.CudaNdarray(a)
        assert numpy.allclose(numpy.transpose(a), cuda_ndarray.dimshuffle(b,(1,0)))
        assert numpy.allclose(numpy.transpose(a)[None,:,None,:,None], cuda_ndarray.dimshuffle(b,(-1,1,-1,0,-1)))

        # 2d -> 1d
        a = theano._asarray(rng.randn(1,11), dtype='float32')
        b = cuda_ndarray.CudaNdarray(a)
        assert numpy.allclose(a[:,], cuda_ndarray.dimshuffle(b,(1,)))
        a = theano._asarray(rng.randn(11,1), dtype='float32')
        b = cuda_ndarray.CudaNdarray(a)
        assert numpy.allclose(a.reshape((11,)), cuda_ndarray.dimshuffle(b,(0,)))

        # 3d
        a = theano._asarray(rng.randn(3,4,5), dtype='float32')
        b = cuda_ndarray.CudaNdarray(a)
        assert numpy.allclose(a, cuda_ndarray.dimshuffle(b,(0,1,2)))
        assert numpy.allclose(numpy.swapaxes(a,0,1), cuda_ndarray.dimshuffle(b,(1,0,2)))
        assert numpy.allclose(numpy.swapaxes(a,0,2), cuda_ndarray.dimshuffle(b,(2,1,0)))
        assert numpy.allclose(numpy.swapaxes(a,1,2), cuda_ndarray.dimshuffle(b,(0,2,1)))
        assert numpy.allclose(numpy.swapaxes(a,1,2)[None,:,None,:,:,None], cuda_ndarray.dimshuffle(b,(-1,0,-1,2,1,-1)))

        # 4d
        a = theano._asarray(rng.randn(3,11,4,5), dtype='float32')
        b = cuda_ndarray.CudaNdarray(a)
        assert numpy.allclose(numpy.swapaxes(a,0,1), cuda_ndarray.dimshuffle(b,(1,0,2,3)))
        assert numpy.allclose(numpy.swapaxes(a,0,2), cuda_ndarray.dimshuffle(b,(2,1,0,3)))
        assert numpy.allclose(numpy.swapaxes(a,0,3), cuda_ndarray.dimshuffle(b,(3,1,2,0)))
        assert numpy.allclose(numpy.swapaxes(a,0,3), cuda_ndarray.dimshuffle(b,(3,1,2,0)))
        assert numpy.allclose(numpy.swapaxes(a,0,3)[None,:,None,:,:,:], cuda_ndarray.dimshuffle(b,(-1,3,-1,1,2,0)))
开发者ID:HaniAlmousli,项目名称:Theano,代码行数:56,代码来源:test_cuda_ndarray.py


示例19: conv_grad

def conv_grad(mode, bs, ch, nf, rImg1, rImg2, rFlt1, rFlt2, subsample, op):
    ishape = (bs, ch, rImg1, rImg2)
    kshape = (nf, ch, rFlt1, rFlt2)

    npy_img = theano._asarray(numpy.random.rand(*ishape), dtype='float32')
    npy_kern = theano._asarray(numpy.random.rand(*kshape), dtype='float32')

    i = cuda.CudaNdarrayType(
        broadcastable=[sh == 1 for sh in npy_img.shape])()
    k = cuda.CudaNdarrayType(
        broadcastable=[sh == 1 for sh in npy_kern.shape])()

    # TODO: also test custom pad values
    corr_op = op(mode, subsample)(i, k)
    conv_op = tensor.nnet.conv2d(i, k[:, :, ::-1, ::-1],
                                 border_mode=mode, subsample=subsample)
    conv_op_di = theano.grad(conv_op.sum(), i)
    conv_op_dk = theano.grad(conv_op.sum(), k)
    corr_op_di = theano.grad(corr_op.sum(), i)
    corr_op_dk = theano.grad(corr_op.sum(), k)
    outputs = [corr_op, conv_op,
               corr_op_di, conv_op_di,
               corr_op_dk, conv_op_dk]

    conv_op_dik = theano.grad(conv_op_di.sum(), k)
    conv_op_dki = theano.grad(conv_op_dk.sum(), i)
    corr_op_dik = theano.grad(corr_op_di.sum(), k)
    corr_op_dki = theano.grad(corr_op_dk.sum(), i)
    outputs.extend([corr_op_dik, conv_op_dik,
                    corr_op_dki, conv_op_dki])

    if not theano.config.blas.ldflags:
        # Some of the operations are not transferred to the GPU,
        # and withoug BLAS, the abstract Op will not be optimized
        # to CorrMM either, so we have to accept the use of the
        # slow Python convolution in that case.
        mode = theano_mode.excluding('AbstractConvCheck')
    else:
        mode = theano_mode

    f = theano.function([i, k], outputs, mode=mode)

    allvals = f(npy_img, npy_kern)

    for a, b, oa, ob, p in zip(allvals[::2], allvals[1::2],
                               outputs[::2], outputs[1::2],
                               ('top', 'dtop/dbottom', 'dtop/dweight',
                                'dtop/dbottom/dweight', 'dtop/dweight/dbottom')):
        assert oa.type.broadcastable[:2] == ob.type.broadcastable[:2]

        assert_allclose(a, b, rtol=1e-4)
开发者ID:ErircChen2015,项目名称:Theano,代码行数:51,代码来源:test_conv_cuda_ndarray.py


示例20: test_elemwise_fusion

def test_elemwise_fusion():
    """ Test the the GpuElemwise fusion work correctly"""
    shape = (3,4)
    a = cuda.shared_constructor(theano._asarray(numpy.random.rand(*shape), dtype='float32'), 'a')
    b = tensor.fmatrix()
    c = tensor.fmatrix()
    f = pfunc([b,c], [a+b+c], mode=mode_with_gpu)
    topo = f.maker.env.toposort()
    for i, node in enumerate(topo):
        print >> sys.stdout, i, node
    assert len(topo)==4
    assert isinstance(topo[2].op.scalar_op,theano.scalar.basic.Composite)
    #let debugmode catch errors
    f(theano._asarray(numpy.random.rand(*shape), dtype='float32'), theano._asarray(numpy.random.rand(*shape), dtype='float32'))
开发者ID:mesnilgr,项目名称:Theano,代码行数:14,代码来源:test_opt.py



注:本文中的theano._asarray函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python theano.clone函数代码示例发布时间:2022-05-27
下一篇:
Python helpers.get_word_post_data函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap