• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python extra_ops.cumsum函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.extra_ops.cumsum函数的典型用法代码示例。如果您正苦于以下问题:Python cumsum函数的具体用法?Python cumsum怎么用?Python cumsum使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了cumsum函数的16个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_cumsumOp

    def test_cumsumOp(self):
        x = T.tensor3('x')
        a = np.random.random((3, 5, 2)).astype(config.floatX)

        f = theano.function([x], cumsum(x))
        assert np.allclose(np.cumsum(a), f(a))  # Test axis=None

        for axis in range(len(a.shape)):
            f = theano.function([x], cumsum(x, axis=axis))
            assert np.allclose(np.cumsum(a, axis=axis), f(a))
开发者ID:AtousaTorabi,项目名称:Theano,代码行数:10,代码来源:test_extra_ops.py


示例2: gradient

 def gradient(self, observed, at_risk):
     prediction = self.output
     risk = T.exp(prediction)
     product = self.input * (risk * T.ones((1, self.input.shape[0])))
     numerator = Te.cumsum(product[::-1])[::-1][at_risk]
     denominator = Te.cumsum(risk[::-1])[::-1][at_risk] * T.ones((1, self.input.shape[0]))
     numerator = numerator.flatten()
     denominator = denominator.flatten()
     gradient = T.dot(observed, self.input - (numerator / denominator))
     return gradient
开发者ID:fatemeh91,项目名称:SurvivalNet,代码行数:10,代码来源:RiskLayer.py


示例3: test_cumsumOp

    def test_cumsumOp(self):
        x = T.tensor3('x')
        a = np.random.random((3, 5, 2)).astype(config.floatX)

        # Test axis out of bounds
        self.assertRaises(ValueError, cumsum, x, axis=3)
        self.assertRaises(ValueError, cumsum, x, axis=-4)

        f = theano.function([x], cumsum(x))
        assert np.allclose(np.cumsum(a), f(a))  # Test axis=None

        for axis in range(-len(a.shape), len(a.shape)):
            f = theano.function([x], cumsum(x, axis=axis))
            assert np.allclose(np.cumsum(a, axis=axis), f(a))
开发者ID:Abioy,项目名称:Theano,代码行数:14,代码来源:test_extra_ops.py


示例4: cost

 def cost(self, observed, at_risk):
     prediction = self.output
     exp = T.exp(prediction)[::-1]
     partial_sum = Te.cumsum(exp)[::-1]  + 1 # get the reversed partial cumulative sum
     log_at_risk = T.log(partial_sum[at_risk])
     diff = prediction - log_at_risk
     cost = T.sum(T.dot(observed, diff))
     return cost
开发者ID:fatemeh91,项目名称:SurvivalNet,代码行数:8,代码来源:RiskLayer.py


示例5: test_infer_shape

    def test_infer_shape(self):
        x = T.tensor3("x")
        a = np.random.random((3, 5, 2)).astype(config.floatX)

        # Test axis=None
        self._compile_and_check([x], [self.op(x)], [a], self.op_class)

        for axis in range(-len(a.shape), len(a.shape)):
            self._compile_and_check([x], [cumsum(x, axis=axis)], [a], self.op_class)
开发者ID:amanrajdce,项目名称:Theano,代码行数:9,代码来源:test_extra_ops.py


示例6: test_infer_shape

    def test_infer_shape(self):
        x = T.tensor3('x')
        a = np.random.random((3, 5, 2)).astype(theano.config.floatX)

        for axis in range(-len(a.shape), len(a.shape)):
            self._compile_and_check([x],
                                    [cumsum(x, axis=axis)],
                                    [a],
                                    self.op_class)
开发者ID:kashif,项目名称:Theano,代码行数:9,代码来源:test_extra_ops.py


示例7: test_Strides3D

    def test_Strides3D(self):
        x = T.ftensor3('x')

        for axis in [0, 1, 2, None]:
            a = np.random.random((42, 30, 25)).astype("float32")
            cumsum_function = theano.function([x], cumsum(x, axis=axis), mode=self.mode)

            slicings = [slice(None, None, None),    # Normal strides
                        slice(None, None, 2),       # Stepped strides
                        slice(None, None, -1),      # Negative strides
                        ]

            # Cartesian product of all slicings to test.
            for slicing in itertools.product(slicings, repeat=x.ndim):
                f = theano.function([x], cumsum(x[slicing], axis=axis), mode=self.mode)
                assert [n for n in f.maker.fgraph.toposort()
                        if isinstance(n.op, GpuCumsum)]
                assert np.allclose(np.cumsum(a[slicing], axis=axis), f(a))
                assert np.allclose(np.cumsum(a[slicing], axis=axis), cumsum_function(a[slicing]))
开发者ID:317070,项目名称:Theano,代码行数:19,代码来源:test_extra_ops.py


示例8: test_GpuCumsum3D

    def test_GpuCumsum3D(self):
        block_max_size = self.max_threads_dim0 * 2

        x = T.ftensor3('x')
        for shape_axis, axis in zip([0, 1, 2, 0, 2, 1, 0], [0, 1, 2, None, -1, -2, -3]):
            f = theano.function([x], cumsum(x, axis=axis), mode=self.mode)
            assert [n for n in f.maker.fgraph.toposort()
                    if isinstance(n.op, GpuCumsum)]

            # Extensive testing for the first 1025 sizes
            a_shape = [5, 5, 5]
            a_shape[shape_axis] = 1025
            a = np.random.rand(*a_shape).astype("float32")
            slices = [slice(None), slice(None), slice(None)]
            for i in xrange(a.shape[shape_axis]):
                slices[shape_axis] = slice(i)
                fa = f(a[slices])
                npa = np.cumsum(a[slices], axis=axis)
                utt.assert_allclose(npa, fa)

            # Use multiple GPU threadblocks (along accumulation axis)
            a_shape = [2, 2, 2]
            a_shape[shape_axis] = block_max_size + 2
            a = np.random.random(a_shape).astype("float32")
            utt.assert_allclose(np.cumsum(a, axis=axis), f(a))

            # Use multiple GPU gridblocks (not along accumulation axis)
            a_shape = [5, 5, 5]
            a_shape[(shape_axis + 1) % 3] = self.max_grid_size1 + 1
            a = np.random.random(a_shape).astype("float32")
            if axis is None:
                # Avoid floating point error
                a = np.sign(a - 0.5).astype("float32")
            utt.assert_allclose(np.cumsum(a, axis=axis), f(a))

            a_shape = [5, 5, 5]
            a_shape[(shape_axis + 2) % 3] = self.max_grid_size1 + 1
            a = np.random.random(a_shape).astype("float32")
            if axis is None:
                # Avoid floating point error
                a = np.sign(a - 0.5).astype("float32")
            utt.assert_allclose(np.cumsum(a, axis=axis), f(a))

            # Use recursive cumsum (along accumulation axis)
            a_shape = [3, 3, 3]
            a_shape[shape_axis] = block_max_size * (
                block_max_size + 1) + 2
            a = np.random.random(a_shape).astype("float32")
            a = np.sign(a - 0.5).astype(
                "float32")  # Avoid floating point error
            utt.assert_allclose(np.cumsum(a, axis=axis), f(a))
开发者ID:12190143,项目名称:Theano,代码行数:51,代码来源:test_extra_ops.py


示例9: test_Strides1D

    def test_Strides1D(self):
        x = T.fvector('x')

        # Stepped strides
        f = theano.function([x], cumsum(x[::2]), mode=self.mode)
        assert [n for n in f.maker.fgraph.toposort()
                if isinstance(n.op, GpuCumsum)]
        a = np.random.randint(10, size=(42,)).astype("float32")
        assert np.allclose(np.cumsum(a[::2]), f(a))

        # Alternative stepped strides
        f = theano.function([x], cumsum(x), mode=self.mode)
        assert [n for n in f.maker.fgraph.toposort()
                if isinstance(n.op, GpuCumsum)]
        a = np.random.randint(10, size=(42,)).astype("float32")
        assert np.allclose(np.cumsum(a[::2]), f(a[::2]))

        # Negative strides
        f = theano.function([x], cumsum(x[::-1]), mode=self.mode)
        assert [n for n in f.maker.fgraph.toposort()
                if isinstance(n.op, GpuCumsum)]
        a = np.random.randint(10, size=(42,)).astype("float32")
        assert np.allclose(np.cumsum(a[::-1]), f(a))
开发者ID:AI-Cdrone,项目名称:Theano,代码行数:23,代码来源:test_extra_ops.py


示例10: test_infer_shape

    def test_infer_shape(self):
        # GpuCumSum is only defined for float32 for now, so we skip it
        # in the unsupported cases
        gpucumsum_supported_dtypes = ('float32',)
        if theano.config.floatX not in gpucumsum_supported_dtypes:
            raise SkipTest('GpuCumSum not implemented for dtype %s'
                           % theano.config.floatX)
        x = T.tensor3('x')
        a = np.random.random((3, 5, 2)).astype(theano.config.floatX)

        for axis in range(-len(a.shape), len(a.shape)):
            self._compile_and_check([x],
                                    [cumsum(x, axis=axis)],
                                    [a],
                                    self.op_class)
开发者ID:ChinaQuants,项目名称:Theano,代码行数:15,代码来源:test_extra_ops.py


示例11: test_GpuCumsum1D

    def test_GpuCumsum1D(self):
        block_max_size = self.max_threads_dim0 * 2

        x = T.fvector('x')
        f = theano.function([x], cumsum(x), mode=self.mode)
        assert [n for n in f.maker.fgraph.toposort()
                if isinstance(n.op, GpuCumsum)]

        # Extensive testing for the first 1025 sizes
        a = np.random.random(1025).astype("float32")
        for i in xrange(a.shape[0]):
            utt.assert_allclose(np.cumsum(a[:i]), f(a[:i]))

        # Use multiple GPU threadblocks
        a = np.random.random((block_max_size + 2, )).astype("float32")
        utt.assert_allclose(np.cumsum(a), f(a))

        # Use recursive cumsum
        a = np.ones((block_max_size * (block_max_size + 1) + 2,),
                    dtype="float32")
        utt.assert_allclose(np.cumsum(a), f(a))
开发者ID:ChinaQuants,项目名称:Theano,代码行数:21,代码来源:test_extra_ops.py


示例12: test_GpuCumsum2D

    def test_GpuCumsum2D(self):
        block_max_size = self.max_threads_dim0 * 2

        x = T.fmatrix('x')
        for shape_axis, axis in zip([0, 1, 0], [0, 1, None]):
            f = theano.function([x], cumsum(x, axis=axis), mode=self.mode)
            assert [n for n in f.maker.fgraph.toposort()
                    if isinstance(n.op, GpuCumsum)]

            # Extensive testing for the first 1025 sizes
            a_shape = [5, 5]
            a_shape[shape_axis] = 1025
            a = np.random.random(a_shape).astype("float32")
            slices = [slice(None), slice(None)]
            for i in xrange(a.shape[shape_axis]):
                slices[shape_axis] = slice(i)
                fa = f(a[slices])
                npa = np.cumsum(a[slices], axis=axis)
                utt.assert_allclose(npa, fa)

            # Use multiple GPU threadblocks
            a_shape = [5, 5]
            a_shape[shape_axis] = block_max_size+2
            a = np.random.random(a_shape).astype("float32")
            utt.assert_allclose(np.cumsum(a, axis=axis), f(a))

            # Use multiple GPU gridblocks
            a_shape = [5, 5]
            a_shape[1-shape_axis] = self.max_grid_size1+1
            a = np.random.random(a_shape).astype("float32")
            utt.assert_allclose(np.cumsum(a, axis=axis), f(a), rtol=5e-5)

            # Use recursive cumsum
            a_shape = [3, 3]
            a_shape[shape_axis] = block_max_size*(block_max_size+1)+2
            a = np.random.random(a_shape).astype("float32")
            a = np.sign(a-0.5).astype("float32")  # Avoid floating point error
            utt.assert_allclose(np.cumsum(a, axis=axis), f(a))
开发者ID:c0g,项目名称:Theano,代码行数:38,代码来源:test_extra_ops.py


示例13: test_Strides2D

    def test_Strides2D(self):
        x = T.fmatrix('x')

        for shape_axis, axis in zip([0, 1, 0], [0, 1, None]):
            a = np.random.random((42, 30)).astype("float32")

            # Stepped strides along axis=0
            f = theano.function([x], cumsum(x[::2], axis=axis), mode=self.mode)
            assert [n for n in f.maker.fgraph.toposort()
                    if isinstance(n.op, GpuCumsum)]
            assert np.allclose(np.cumsum(a[::2], axis=axis), f(a))

            # Stepped strides along axis=1
            f = theano.function([x], cumsum(x[:, ::2], axis=axis), mode=self.mode)
            assert [n for n in f.maker.fgraph.toposort()
                    if isinstance(n.op, GpuCumsum)]
            assert np.allclose(np.cumsum(a[:, ::2], axis=axis), f(a))

            # Alternative stepped strides along axis=0
            f = theano.function([x], cumsum(x), mode=self.mode)
            assert [n for n in f.maker.fgraph.toposort()
                    if isinstance(n.op, GpuCumsum)]
            assert np.allclose(np.cumsum(a[::2]), f(a[::2]))

            # Alternative stepped strides along axis=1
            f = theano.function([x], cumsum(x), mode=self.mode)
            assert [n for n in f.maker.fgraph.toposort()
                    if isinstance(n.op, GpuCumsum)]
            assert np.allclose(np.cumsum(a[:, ::2]), f(a[:, ::2]))

            # Negative strides along axis=0
            f = theano.function([x], cumsum(x[::-1], axis=axis), mode=self.mode)
            assert [n for n in f.maker.fgraph.toposort()
                    if isinstance(n.op, GpuCumsum)]
            assert np.allclose(np.cumsum(a[::-1], axis=axis), f(a))

            # Negative strides along axis=1
            f = theano.function([x], cumsum(x[:, ::-1], axis=axis), mode=self.mode)
            assert [n for n in f.maker.fgraph.toposort()
                    if isinstance(n.op, GpuCumsum)]
            assert np.allclose(np.cumsum(a[:, ::-1], axis=axis), f(a))
开发者ID:AI-Cdrone,项目名称:Theano,代码行数:41,代码来源:test_extra_ops.py


示例14: test_GpuCumsum4D

 def test_GpuCumsum4D(self):
     # Should not use the GPU version.
     x = T.ftensor4('x')
     f = theano.function([x], cumsum(x, axis=1), mode=self.mode)
     assert [n for n in f.maker.fgraph.toposort()
             if isinstance(n.op, CumsumOp)]
开发者ID:ChinaQuants,项目名称:Theano,代码行数:6,代码来源:test_extra_ops.py


示例15:

)

layer2 = ll.LSTMLayer(
    input = layer_embed.output,
    n_in  = layer_embed.n_out,
    n_out = 150,
    backwards=True
)

#gets only the output vectors whose indices are equal to the end of token
end_indices = T.nonzero(T.eq(T.argmax(x, axis=1),input_dim-1))
#gets only the output vectors whose indices are equal to the start of token
start_indices = T.nonzero(T.eq(T.argmax(x,axis=1),input_dim-2))

hl = T.concatenate((layer1.output, layer2.output[::-1]),axis=1)
hc = extra.cumsum(hl, axis=0)
hsub = hc[end_indices] - hc[start_indices]
diff_indices = T.as_tensor_variable(end_indices) - T.as_tensor_variable(start_indices)
diff_shuf = diff_indices.flatten().dimshuffle(0, 'x')

h = hsub / diff_shuf

h_size = (layer1.n_out + layer2.n_out)

#relationship between near words
layer_c = nl.NNLayer(
    input = h,
    n_in = h_size,
    n_out = 150,
    activation = T.tanh)
开发者ID:rocharhs,项目名称:nlp_nn,代码行数:30,代码来源:train_postag_viterbi.py


示例16: kl

def kl(Y, Y_hat, cost_mask=None,
       batch_vec=True,
       cost_matrix=False,
       sum_tru_time=False,
       normalize_by_outsize=True):

    """
    Warning: This function expects a sigmoid nonlinearity in the
    output layer. Returns a batch (vector) of mean across units of
    KL divergence for each example,
    KL(P || Q) where P is defined by Y and Q is defined by Y_hat:
    p log p - p log q + (1-p) log (1-p) - (1-p) log (1-q)
    For binary p, some terms drop out:
    - p log q - (1-p) log (1-q)
    - p log sigmoid(z) - (1-p) log sigmoid(-z)
    p softplus(-z) + (1-p) softplus(z)
    Parameters
    ----------
    Y : Variable
        targets for the sigmoid outputs. Currently Y must be purely binary.
        If it's not, you'll still get the right gradient, but the
        value in the monitoring channel will be wrong.
    Y_hat : Variable
        predictions made by the sigmoid layer. Y_hat must be generated by
        fprop, i.e., it must be a symbolic sigmoid.
    -------
    ave : Variable
        average kl divergence between Y and Y_hat.
    """

    assert hasattr(Y_hat, 'owner')

    owner = Y_hat.owner
    assert owner is not None
    op = owner.op

    if not hasattr(op, 'scalar_op'):
        raise ValueError("Expected Y_hat to be generated by an Elemwise "
                         "op, got "+str(op)+" of type "+str(type(op)))

    assert isinstance(op.scalar_op, TT.nnet.sigm.ScalarSigmoid)

    z, = owner.inputs
    z = z.reshape(Y.shape)
    term_1 = Y * TT.nnet.softplus(-z)
    term_2 = (1 - Y) * TT.nnet.softplus(z)

    total = term_1 + term_2

    if cost_mask is not None:
        if cost_mask.ndim != total.ndim:
            cost_mask = cost_mask.dimshuffle(0, 1, 'x')

        total = cost_mask * total
    if not sum_tru_time:
        if normalize_by_outsize:
            if cost_matrix:
                ave = total.sum(-1) / TT.cast((total.shape[2] - 2), "float32")
            else:
                if batch_vec:
                    ave = total.sum(0).sum(1) / TT.cast((total.shape[2] - 2), "float32")
                else:
                    ave = total.sum() / (total.shape[1] * (total.shape[2] - 2))
        else:
            if cost_matrix:
                ave = total.sum(-1)
            else:
                if batch_vec:
                    ave = total.sum(0).sum(1)
                else:
                    ave = total.sum() / TT.cast(total.shape[1], "float32")
    else:
        assert not cost_matrix
        if normalize_by_outsize:
            if batch_vec:
                ave = cumsum(total.sum(-1) / TT.cast((total.shape[2] - 2), "float32"),
                        axis=0)[::-1]
            else:
                ave = cumsum(total.sum((1, 2)) / (total.shape[1] * (total.shape[2] - 2)),
                        axis=0)[::-1]
        else:
            if batch_vec:
                ave = cumsum(total.sum(-1), axis=0)[::-1]
            else:
                ave = cumsum(total.sum((1, 2)) / TT.cast(total.shape[1], "float32"), axis=0)[::-1]

    return ave
开发者ID:BKJackson,项目名称:Attentive_reader,代码行数:87,代码来源:costs.py



注:本文中的theano.tensor.extra_ops.cumsum函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python extra_ops.diff函数代码示例发布时间:2022-05-27
下一篇:
Python blas.ldflags函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap