• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python Conv3D.conv3D函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.nnet.Conv3D.conv3D函数的典型用法代码示例。如果您正苦于以下问题:Python conv3D函数的具体用法?Python conv3D怎么用?Python conv3D使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了conv3D函数的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: local_conv3d_cpu

def local_conv3d_cpu(node):
    if not isinstance(node.op, AbstractConv3d):
        return None

    img, kern = node.inputs
    if ((not isinstance(img.type, TensorType) or
         not isinstance(kern.type, TensorType))):
        return None
    if node.op.border_mode not in ['valid', (0, 0, 0)]:
        return None
    if node.op.filter_dilation != (1, 1, 1):
        return None

    bias = theano.tensor.zeros_like(kern[:, 0, 0, 0, 0])

    # need to flip the kernel if necessary (conv3D does not flip)
    if node.op.filter_flip:
        kern = kern[:, :, ::-1, ::-1, ::-1]

    # conv3D expects shape (batch, row, column, time, channel)
    img = img.dimshuffle(0, 2, 3, 4, 1)
    kern = kern.dimshuffle(0, 2, 3, 4, 1)

    rval = conv3D(img, kern, bias, node.op.subsample)
    copy_stack_trace(node.outputs[0], rval)
    rval = rval.dimshuffle(0, 4, 1, 2, 3)

    return [rval]
开发者ID:Thrandis,项目名称:Theano,代码行数:28,代码来源:opt.py


示例2: apply

 def apply(self, dataset, can_fit=False):
     X = dataset.get_topological_view()
     d = len(X.shape) - 2
     assert d in [2, 3]
     assert X.dtype == 'float32' or X.dtype == 'float64'
     if d == 2:
         X = X.reshape([X.shape[0], X.shape[1], X.shape[2], 1, X.shape[3]])
     kernel_size = 1
     kernel_shape = [X.shape[-1]]
     for factor in self.sampling_factor:
         kernel_size *= factor
         kernel_shape.append(factor)
     if d == 2:
         kernel_shape.append(1)
     kernel_shape.append(X.shape[-1])
     kernel_value = 1. / float(kernel_size)
     kernel = np.zeros(kernel_shape, dtype=X.dtype)
     for i in xrange(X.shape[-1]):
         kernel[i, :, :, :, i] = kernel_value
     from theano.tensor.nnet.Conv3D import conv3D
     X_var = T.TensorType(broadcastable=[s == 1 for s in X.shape],
                          dtype=X.dtype)()
     downsampled = conv3D(X_var, kernel, np.zeros(X.shape[-1], X.dtype),
                          kernel_shape[1:-1])
     f = function([X_var], downsampled)
     X = f(X)
     if d == 2:
         X = X.reshape([X.shape[0], X.shape[1], X.shape[2], X.shape[4]])
     dataset.set_topological_view(X)
开发者ID:Alienfeel,项目名称:pylearn2,代码行数:29,代码来源:preprocessing.py


示例3: apply

    def apply(self, graph):
        in_vw = graph.read_key(key="input")
        num_filters = graph.read_key(key="num_filters")
        filter_size = graph.read_key(key="filter_size")
        stride = graph.read_key_with_default(key="stride", default=(1, 1, 1))
        pad = graph.read_key_with_default(key="pad", default="valid")
        include_bias = graph.read_key_with_default(key="include_bias",
                                                   default=False)
        assert len(filter_size) == 3
        assert pad == "valid"

        # create weight
        num_channels = in_vw.shape[1]
        filter_shape = (num_filters, num_channels) + tuple(filter_size)
        W = th_utils.read_key_with_state_default(
            graph=graph,
            key="weight",
            tags={"weight": True,
                  "linear_weight": True,
                  "in_axes": (1,),
                  "out_axes": (0,),
                  "shape": filter_shape,
                  "dtype": fX},
            state_tags={"parameter": True,
                        "state": True}
        ).var
        # create bias
        if include_bias:
            b = th_utils.read_key_with_state_default(
                graph=graph,
                key="bias",
                tags={"bias": True,
                      "shape": (num_filters,),
                      "dtype": fX},
                state_tags={"parameter": True,
                            "state": True}
            ).var
        else:
            b = T.zeros(num_filters)

        from theano.tensor.nnet.Conv3D import conv3D
        # conv3D takes V in order: (batch, row, column, time, in channel)
        # and W in order: (out channel, row, column, time ,in channel)
        # but we keep the dimensions that W is stored in consistent with other
        # convolutions, so we have to dimshuffle here
        out_var = conv3D(V=in_vw.variable.dimshuffle(0, 2, 3, 4, 1),
                         W=W.dimshuffle(0, 2, 3, 4, 1),
                         b=b,
                         d=stride)

        out_shape = conv_output_shape(input_shape=in_vw.shape,
                                      num_filters=num_filters,
                                      axes=(2, 3, 4),
                                      conv_shape=filter_size,
                                      strides=stride,
                                      pads=(0, 0, 0))

        out_vw = VariableWrapper(out_var, out_shape)
        graph.write_key(key="output", value=out_vw)
开发者ID:diogo149,项目名称:hooky,代码行数:59,代码来源:conv.py


示例4: compute_output

    def compute_output(self, network, in_vw):
        # gather hyperparameters
        num_filters = network.find_hyperparameter(["num_filters"])
        filter_size = network.find_hyperparameter(["filter_size"])
        stride = network.find_hyperparameter(["conv_stride", "stride"],
                                             (1, 1, 1))
        pad = network.find_hyperparameter(["conv_pad", "pad"], "valid")
        inits = list(toolz.concat(network.find_hyperparameters(
            ["inits"],
            [])))
        include_bias = network.find_hyperparameter(["include_bias"], False)
        assert len(filter_size) == 3
        assert pad == "valid"

        # create weight
        num_channels = in_vw.shape[1]
        filter_shape = (num_filters, num_channels) + tuple(filter_size)
        W = network.create_vw(
            name="weight",
            is_shared=True,
            shape=filter_shape,
            tags={"parameter", "weight"},
            inits=inits,
        ).variable
        # create bias
        if include_bias:
            b = network.create_vw(
                name="bias",
                is_shared=True,
                shape=(num_filters,),
                tags={"parameter", "bias"},
                inits=inits,
            ).variable
        else:
            b = T.zeros(num_filters)

        from theano.tensor.nnet.Conv3D import conv3D
        # conv3D takes V in order: (batch, row, column, time, in channel)
        # and W in order: (out channel, row, column, time ,in channel)
        # but we keep the dimensions that W is stored in consistent with other
        # convolutions, so we have to dimshuffle here
        out_var = conv3D(V=in_vw.variable.dimshuffle(0, 2, 3, 4, 1),
                         W=W.dimshuffle(0, 2, 3, 4, 1),
                         b=b,
                         d=stride)

        out_shape = conv_output_shape(input_shape=in_vw.shape,
                                      num_filters=num_filters,
                                      axes=(2, 3, 4),
                                      conv_shape=filter_size,
                                      strides=stride,
                                      pads=(0, 0, 0))

        network.create_vw(
            "default",
            variable=out_var,
            shape=out_shape,
            tags={"output"},
        )
开发者ID:rewonc,项目名称:treeano,代码行数:59,代码来源:conv.py


示例5: output

    def output(self, input, is_train):
        input = super(ConvLayer3d, self).output(input, is_train)

        u = conv3D(input.dimshuffle(0, 2, 3, 4, 1), self.filters,
                   self.b, d=self.s).dimshuffle(0, 4, 1, 2, 3)

        # TODO バイアスの挿入位置がここでほんとうに正しいのかテスト
        return self._activate(u, is_train)
开发者ID:kanairen,项目名称:CubicCNN,代码行数:8,代码来源:__conv.py


示例6: test_undefined_grad_grad

    def test_undefined_grad_grad(self):
        # tests that undefined grads are caught in the grad method

        V = theano.tensor.TensorType(dtype=config.floatX, broadcastable=(False, False, False, False, False))()
        W = theano.tensor.TensorType(dtype=config.floatX, broadcastable=(False, False, False, False, False))()
        b = theano.tensor.vector()
        d = theano.tensor.ivector()

        Z = conv3D(V, W, b, d)

        self.assertRaises(TypeError, theano.gradient.grad, Z.sum(), d)
开发者ID:igul222,项目名称:Theano,代码行数:11,代码来源:test_gradient.py


示例7: grad

    def grad(self, inputs, output_gradients):
        C, d, WShape, B = inputs
        dLdA, = output_gradients

        z = T.zeros_like(C[0, 0, 0, 0, :])
        dLdC = convTransp3D(dLdA, z, d, B, C.shape[1:4])
        # d actually does affect the outputs, so it's not disconnected
        dLdd = grad_undefined(self, 1, d)
        # The shape of the weights doesn't affect the output elements
        dLdWShape = DisconnectedType()()
        dLdB = conv3D(C, dLdA, T.zeros_like(B[0, 0, 0, 0, :]), d)

        return [dLdC, dLdd, dLdWShape, dLdB]
开发者ID:amanrajdce,项目名称:Theano,代码行数:13,代码来源:ConvGrad3D.py


示例8: test_undefined_grad_grad

def test_undefined_grad_grad():
    #tests that undefined grads are caught in the grad method

    V = theano.tensor.TensorType(dtype=config.floatX,
            broadcastable=(False, False, False, False, False))()
    W = theano.tensor.TensorType(dtype=config.floatX,
            broadcastable=(False, False, False, False, False))()
    b = theano.tensor.vector()
    d = theano.tensor.ivector()

    Z = conv3D(V, W, b, d)

    try:
        g = theano.gradient.grad(Z.sum(), d)
        assert False
    except TypeError:
        pass
开发者ID:jaberg,项目名称:Theano,代码行数:17,代码来源:test_gradient.py


示例9: setUp

    def setUp(self):
        super(TestConv3D, self).setUp()
        utt.seed_rng()
        self.rng = N.random.RandomState(utt.fetch_seed())

        mode = copy.copy(theano.compile.mode.get_default_mode())
        mode.check_py_code = False

        self.W = shared(N.ndarray(shape=(1, 1, 1, 1, 1), dtype=floatX))
        self.b = shared(N.zeros(1, dtype=floatX))
        self.rb = shared(N.zeros(1, dtype=floatX))
        self.V = shared(N.ndarray(shape=(1, 1, 1, 1, 1), dtype=floatX))
        self.d = shared(N.ndarray(shape=(3, ), dtype=int))

        self.H = conv3D(self.V, self.W, self.b, self.d)
        self.H_func = function([], self.H, mode=mode)
        self.H_shape_func = function([], self.H.shape, mode=mode)

        self.RShape = T.vector(dtype='int64')

        self.otherH = T.TensorType(floatX,
                        (False, False, False, False, False))(name='otherH')
        self.transp = convTransp3D(self.W, self.rb, self.d,
                                   self.otherH, self.RShape)
        self.transp_func = function([self.otherH, self.RShape],
                                    self.transp, mode=mode)

        self.R = convTransp3D(self.W, self.rb, self.d, self.H, self.RShape)
        self.R_func = function([self.RShape], self.R, mode=mode)
        self.R_shape_func = function([self.RShape], self.R.shape)

        self.reconsObj = T.sum(T.sqr(self.V - self.R))
        self.reconsObjFunc = function([self.RShape], self.reconsObj, mode=mode)

        self.gradientsFunc = function([self.RShape],
                        [T.grad(self.reconsObj, self.W), T.grad(self.reconsObj,
                        self.H), T.grad(self.reconsObj, self.V),
                         T.grad(self.reconsObj, self.b)], mode=mode)

        self.check_c_against_python = function([self.RShape],
                        [T.grad(self.reconsObj, self.W), T.grad(self.reconsObj,
                        self.H), T.grad(self.reconsObj, self.V),
                         T.grad(self.reconsObj, self.b)], mode='DEBUG_MODE')

        self.dCdW_shape_func = function([self.RShape],
                        T.grad(self.reconsObj, self.W).shape, mode=mode)
开发者ID:npinto,项目名称:Theano,代码行数:46,代码来源:test_conv3d.py


示例10: grad

    def grad(self, inputs, output_gradients):
        W, b, d, H, RShape = inputs
        dCdR, = output_gradients
        dCdH = conv3D(dCdR, W, T.zeros_like(H[0, 0, 0, 0, :]), d)
        WShape = W.shape
        dCdW = convGrad3D(dCdR, d, WShape, H)
        dCdb = T.sum(dCdR, axis=(0, 1, 2, 3))
        # not differentiable, since d affects the output elements
        dCdd = grad_undefined(self, 2, d)
        # disconnected, since RShape just determines the output shape
        dCdRShape = DisconnectedType()()

        if 'name' in dir(dCdR) and dCdR.name is not None:
            dCdR_name = dCdR.name
        else:
            dCdR_name = 'anon_dCdR'

        if 'name' in dir(H) and H.name is not None:
            H_name = H.name
        else:
            H_name = 'anon_H'

        if 'name' in dir(W) and W.name is not None:
            W_name = W.name
        else:
            W_name = 'anon_W'

        if 'name' in dir(b) and b.name is not None:
            b_name = b.name
        else:
            b_name = 'anon_b'


        dCdW.name = 'ConvTransp3D_dCdW.H='+H_name+',dCdR='+dCdR_name+',W='+W_name
        dCdb.name = 'ConvTransp3D_dCdb.H='+H_name+',dCdR='+dCdR_name+',W='+W_name+',b='+b_name
        dCdH.name = 'ConvTransp3D_dCdH.H=' + H_name + ',dCdR=' + dCdR_name

        return [dCdW,  dCdb, dCdd, dCdH, dCdRShape]
开发者ID:c0g,项目名称:Theano,代码行数:38,代码来源:ConvTransp3D.py


示例11: __call__

    def __call__(self, t):
        output = conv3D(self.V + t * self.dV, self.W + t * self.dW,
                        self.b + t * self.db, self.d)

        return output
开发者ID:athiwatp,项目名称:Theano,代码行数:5,代码来源:test_conv3d.py


示例12: _forward

    def _forward(self):
        inpt = self.inpt

        self.weights = self.declare(
            (self.n_output, self.filter_depth, self.n_inpt,
             self.filter_height, self.filter_width)
        )
        self.bias = self.declare((self.n_output,))

        if self.border_mode == 'same':
            pad_dim1 = self.filter_height - 1
            pad_dim2 = self.filter_width - 1
            pad_dim3 = self.filter_depth - 1

            if pad_dim1 > 0 or pad_dim2 > 0 or pad_dim3 > 0:
                output_shape = (
                    inpt.shape[0], inpt.shape[1] + pad_dim3,
                    inpt.shape[2], inpt.shape[3] + pad_dim1,
                    inpt.shape[4] + pad_dim2
                )
                big_zero = T.zeros(output_shape)
                indices = (
                    slice(None),
                    slice(pad_dim3 // 2, inpt.shape[1] + pad_dim3 // 2),
                    slice(None),
                    slice(pad_dim1 // 2, inpt.shape[3] + pad_dim1 // 2),
                    slice(pad_dim2 // 2, inpt.shape[4] + pad_dim2 // 2)
                )

                inpt = T.set_subtensor(big_zero[indices], inpt)

        #print '@basic.py implementation: ', self.implementation

        if self.implementation == 'conv3d2d':
            self.output_in = conv3d(
                signals=inpt,
                filters=self.weights
            )
            if self.use_bias:
                self.output_in = self.output_in + self.bias.dimshuffle('x', 'x', 0, 'x', 'x')
        elif self.implementation == 'conv3D':
            filters_flip = self.weights[:, ::-1, :, ::-1, ::-1]
            bias = self.bias if self.use_bias else T.zeros(self.bias.shape)
            self.output_in = conv3D(
                V=inpt.dimshuffle(0, 3, 4, 1, 2),
                W=filters_flip.dimshuffle(0, 3, 4, 1, 2),
                b=bias,
                d=(1, 1, 1)
            )
            self.output_in = self.output_in.dimshuffle(0, 3, 4, 1, 2)
        elif self.implementation == 'dnn_conv3d':
            self.output_in = theano.sandbox.cuda.dnn.dnn_conv3d(
                img=inpt.dimshuffle(0, 2, 1, 3, 4),
                kerns=self.weights.dimshuffle(0, 2, 1, 3, 4)
            )
            self.output_in = self.output_in.dimshuffle(0, 2, 1, 3, 4)
            if self.use_bias:
                self.output_in = self.output_in + self.bias.dimshuffle('x', 'x', 0, 'x', 'x')
        else:
            raise NotImplementedError('This class only supports conv3d2d, conv3D and dnn_conv3d')

        self.output = self.output_in

        if self.strides != (1, 1, 1):
            self.output = self.output[:, ::self.strides[2], :, ::self.strides[0], ::self.strides[1]]
开发者ID:jhzhou1111,项目名称:CNNbasedMedicalSegmentation,代码行数:65,代码来源:basic.py



注:本文中的theano.tensor.nnet.Conv3D.conv3D函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python abstract_conv.get_conv_output_shape函数代码示例发布时间:2022-05-27
下一篇:
Python nnet.softmax函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap