• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python conv3d2d.conv3d函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.nnet.conv3d2d.conv3d函数的典型用法代码示例。如果您正苦于以下问题:Python conv3d函数的具体用法?Python conv3d怎么用?Python conv3d使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了conv3d函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: lcn_3d_input

def lcn_3d_input(data, kernel_shape, n_maps):

    """
    :param data: [examples, depth, filters, height, width]
    :param kernel_shape: int
    :param n_maps: int
    :return: new_x: [examples, depth, filters, height, width]
    """

    # create symbolic variable for the input data
    ftensor5 = T.TensorType('float32', [False] * 5)
    x = ftensor5()

    # # determine the number of maps
    # n_maps = data.shape[2]

    # create 3d filter that spans across all channels / feature maps
    # todo: kernel is not really in 3d; need 3d implementation instead of 2d repeated across third dimension
    # todo: alternative is to keep 2d kernel and extend short range given data size in z-plane; change first kernel_sh.
    filter_shape = (1, kernel_shape[0], n_maps, kernel_shape[1], kernel_shape[2])
    filters = np.resize(gaussian_filter(kernel_shape[1]), filter_shape)
    filters = filters / np.sum(filters)
    filters = sharedX(filters)

    # convolve filter with input signal
    convolution_out = conv3d(
        signals=x,
        filters=filters,
        signals_shape=data.shape,
        filters_shape=filter_shape,
        border_mode='valid'
    )

    # for each pixel, remove mean of 9x9 neighborhood
    mid_0 = int(np.floor(kernel_shape[0] / 2.))
    mid_1 = int(np.floor(kernel_shape[1] / 2.))
    mid_2 = int(np.floor(kernel_shape[2] / 2.))
    mean = T.tile(convolution_out, (1, 1, n_maps, 1, 1))
    padded_mean = T.zeros_like(x)
    padded_mean = T.set_subtensor(padded_mean[:, mid_0:-mid_0, :, mid_1:-mid_1, mid_2:-mid_2], mean)
    centered_data = data - padded_mean

    # scale down norm of 9x9 patch if norm is bigger than 1
    sum_sqr_xx = conv3d(signals=T.sqr(data), filters=filters)
    denominator = T.tile(T.sqrt(sum_sqr_xx), (1, 1, n_maps, 1, 1))
    padded_denominator = T.ones_like(x)
    padded_denominator = T.set_subtensor(
        padded_denominator[:, mid_0:-mid_0, :, mid_1:-mid_1, mid_2:-mid_2], denominator
    )
    per_img_mean = padded_denominator.mean(axis=[1, 2, 3, 4])
    divisor = T.largest(
        per_img_mean.dimshuffle(0, 'x', 'x', 'x', 'x'),
        padded_denominator
    )
    new_x = centered_data / T.maximum(1., divisor)

    # compile theano function
    f = theano.function([x], new_x)

    return f(data)
开发者ID:dlacombejr,项目名称:Research,代码行数:60,代码来源:scaling.py


示例2: dot

    def dot(self):

        """ Convolve input with model weights """

        f = conv3d(self.x, self.w)

        return f
开发者ID:dlacombejr,项目名称:Research,代码行数:7,代码来源:network.py


示例3: convolve

 def convolve(self, input, **kwargs):
     # Conv3d expects input  [n_images, depth, channels, height, width]
     weights = self.W.dimshuffle(0, 2, 1, 3, 4)
     input_sh = input.dimshuffle(0, 2, 1, 3, 4)
     conved = conv3d(input_sh, weights, signals_shape=None, filters_shape=None, border_mode='valid')
     conved_sh = conved.dimshuffle(0, 2, 1, 3, 4)
     return conved_sh
开发者ID:zenna,项目名称:ig,代码行数:7,代码来源:conv3d.py


示例4: conv3d

def conv3d(x, kernel, strides=(1, 1, 1),
           border_mode='valid', dim_ordering='th',
           volume_shape=None, filter_shape=None):
    '''
    Run on cuDNN if available.
    border_mode: string, "same" or "valid".
    '''
    if dim_ordering not in {'th', 'tf'}:
        raise Exception('Unknown dim_ordering ' + str(dim_ordering))

    if border_mode not in {'same', 'valid'}:
        raise Exception('Invalid border mode: ' + str(border_mode))

    if dim_ordering == 'tf':
        # TF uses the last dimension as channel dimension,
        # instead of the 2nd one.
        # TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3)
        # TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3, input_depth)
        # TH kernel shape: (out_depth, input_depth, kernel_dim1, kernel_dim2, kernel_dim3)
        # TF kernel shape: (kernel_dim1, kernel_dim2, kernel_dim3, input_depth, out_depth)
        x = x.dimshuffle((0, 4, 1, 2, 3))
        kernel = kernel.dimshuffle((4, 3, 0, 1, 2))
        if volume_shape:
            volume_shape = (volume_shape[0], volume_shape[4],
                            volume_shape[1], volume_shape[2], volume_shape[3])
        if filter_shape:
            filter_shape = (filter_shape[4], filter_shape[3],
                            filter_shape[0], filter_shape[1], filter_shape[2])

    if border_mode == 'same':
        assert(strides == (1, 1, 1))
        pad_dim1 = (kernel.shape[2] - 1)
        pad_dim2 = (kernel.shape[3] - 1)
        pad_dim3 = (kernel.shape[4] - 1)
        output_shape = (x.shape[0], x.shape[1],
                        x.shape[2] + pad_dim1,
                        x.shape[3] + pad_dim2,
                        x.shape[4] + pad_dim3)
        output = T.zeros(output_shape)
        indices = (slice(None), slice(None),
                   slice(pad_dim1 // 2, x.shape[2] + pad_dim1 // 2),
                   slice(pad_dim2 // 2, x.shape[3] + pad_dim2 // 2),
                   slice(pad_dim3 // 2, x.shape[4] + pad_dim3 // 2))
        x = T.set_subtensor(output[indices], x)
        border_mode = 'valid'

    border_mode_3d = (border_mode, border_mode, border_mode)
    conv_out = conv3d2d.conv3d(signals=x.dimshuffle(0, 2, 1, 3, 4),
                               filters=kernel.dimshuffle(0, 2, 1, 3, 4),
                               border_mode=border_mode_3d)
    conv_out = conv_out.dimshuffle(0, 2, 1, 3, 4)

    # support strides by manually slicing the output
    if strides != (1, 1, 1):
        conv_out = conv_out[:, :, ::strides[0], ::strides[1], ::strides[2]]

    if dim_ordering == 'tf':
        conv_out = conv_out.dimshuffle((0, 2, 3, 4, 1))

    return conv_out
开发者ID:fvisin,项目名称:keras,代码行数:60,代码来源:theano_backend.py


示例5: get_output

    def get_output(self, train):
        X = self.get_input(train)
        border_mode = self.border_mode

        # Both conv3d2d.conv3d and nnet.conv3D only support the 'valid' border mode
        if border_mode != 'valid':
            if border_mode == 'same':
                assert(self.subsample == (1, 1, 1))
                pad_z = (self.nb_depth - self.subsample[0])
                pad_x = (self.nb_row - self.subsample[1])
                pad_y = (self.nb_col - self.subsample[2])
            else: #full
                pad_z = (self.nb_depth - 1) * 2
                pad_x = (self.nb_row - 1) * 2
                pad_y = (self.nb_col - 1) * 2

            input_shape = X.shape
            output_shape = (input_shape[0], input_shape[1],
                            input_shape[2] + pad_z,
                            input_shape[3] + pad_x,
                            input_shape[4] + pad_y)
            output = T.zeros(output_shape)
            indices = (slice(None), slice(None),
                       slice(pad_z//2, input_shape[2] + pad_z//2),
                       slice(pad_x//2, input_shape[3] + pad_x//2),
                       slice(pad_y//2, input_shape[4] + pad_y//2))
            X = T.set_subtensor(output[indices], X)


        border_mode = 'valid'

        if on_gpu():
            # Shuffle the dimensions as per the input parameter order, restore it once done
            W_shape = (self.W_shape[0], self.W_shape[2], self.W_shape[1],
                       self.W_shape[3],self.W_shape[4])

            conv_out = conv3d2d.conv3d(signals=X.dimshuffle(0, 2, 1, 3, 4),
                                       filters=self.W.dimshuffle(0, 2, 1, 3, 4),
                                       filters_shape=W_shape,
                                       border_mode=border_mode)

            conv_out = conv_out.dimshuffle(0, 2, 1, 3, 4)
            self.W = self.W.dimshuffle(0, 2, 1, 3, 4)
        else:
            # Shuffle the dimensions as per the input parameter order, restore it once done
            # W1 = self.W.dimshuffle(0, 1, 3, 4, 2)
            self.W = self.W.dimshuffle(0, 2, 3, 4 , 1)
            conv_out = T.nnet.conv3D(V=X.dimshuffle(0, 2, 3, 4, 1),
                                     W=self.W,
                                     b=self.b, d=self.subsample)
            conv_out = conv_out.dimshuffle(0, 4, 1, 2, 3)
            self.W = self.W.dimshuffle(0, 4, 1, 2, 3)

        output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x', 'x'))
        return output
开发者ID:JJiao,项目名称:keras,代码行数:55,代码来源:convolutional.py


示例6: __init__

    def __init__(self,
                 filters,
                 signal_shape,
                 filter_shape,
                 input_axes = ('b', 0, 1, 't', 'c'),
                 batch_size=None,
                 output_axes = ('b', 0, 1, 't', 'c'),
                 kernel_stride = [1, 1, 1],
                 pad=0,
                 message = '',
                 partial_sum=None):

        if len(kernel_stride) != 3:
            raise ValueError("kernel_stride must have length 3")
        elif kernel_stride[0] != kernel_stride[1]:
            raise ValueError("only values of kernel_stride with both "
                             "elements equal are supported currently")
        if message != '':
            raise NotImplementedError()

        if batch_size != None:
            raise NotImplementedError()

        if input_axes != ('b', 0, 1, 't', 'c'):
            raise NotImplementedError()

        print kernel_stride
        if kernel_stride != (1, 1, 1):
            raise ValueError("only values of kernel_stride with value of 1 "
                             " are supported currently")

        self.input_axes = input_axes
        self.output_axes = output_axes

        #self.conv3d_op = Conv3D()
        self.conv3d_op = conv3d()

        # filters should be a GPU shared variable.
        # I guess you could GpuFromHost them every time,
        # but if you're using this class you probably care
        # about performance and want to be at least warned
        # that this is happening
        assert hasattr(filters, 'get_value')
        assert 'Cuda' in str(type(filters))
        self._filters = filters
        self.pad = pad
        self.partial_sum = partial_sum
        self.kernel_stride = kernel_stride
        self.signal_shape = signal_shape
        self.filter_shape = filter_shape

        ## Add a dummy b for interface issue
        self.b = sharedX(np.zeros((filter_shape[0])))
开发者ID:AtousaTorabi,项目名称:HumanActivityRecognition,代码行数:53,代码来源:conv3d_btc01.py


示例7: compute_output

    def compute_output(self, network, in_vw):
        # gather hyperparameters
        num_filters = network.find_hyperparameter(["num_filters"])
        filter_size = network.find_hyperparameter(["filter_size"])
        stride = network.find_hyperparameter(["conv_stride", "stride"],
                                             (1, 1, 1))
        pad = network.find_hyperparameter(["conv_pad", "pad"], "valid")
        inits = list(toolz.concat(network.find_hyperparameters(
            ["inits"],
            [])))
        assert len(filter_size) == 3
        assert pad == "valid"
        assert stride == (1, 1, 1)

        # create weight
        num_channels = in_vw.shape[1]
        filter_shape = (num_filters, num_channels) + tuple(filter_size)
        W = network.create_vw(
            name="weight",
            is_shared=True,
            shape=filter_shape,
            tags={"parameter", "weight"},
            inits=inits,
        ).variable

        from theano.tensor.nnet.conv3d2d import conv3d
        # takes signals in order: (batch, time, channels, row, column)
        # and filters in order: (out channel, time, in channels, row, column)
        # but we keep the dimensions that W is stored in consistent with other
        # convolutions, so we have to dimshuffle here
        order = (0, 2, 1, 3, 4)
        out_var = conv3d(signals=in_vw.variable.dimshuffle(*order),
                         filters=W.dimshuffle(*order),
                         signals_shape=[in_vw.shape[o] for o in order],
                         filters_shape=[filter_shape[o] for o in order],
                         # HACK as of 20150916, conv3d does a check
                         # if isinstance(border_mode, str), so we manually
                         # cast as a string
                         border_mode=str("valid"))

        out_shape = conv_output_shape(input_shape=in_vw.shape,
                                      num_filters=num_filters,
                                      axes=(2, 3, 4),
                                      conv_shape=filter_size,
                                      strides=stride,
                                      pads=conv_parse_pad(filter_size, pad))

        network.create_vw(
            "default",
            variable=out_var,
            shape=out_shape,
            tags={"output"},
        )
开发者ID:rewonc,项目名称:treeano,代码行数:53,代码来源:conv.py


示例8: symb_forward

    def symb_forward(self, symb_input):

        """symb_input shape: (n_input, depth, channels, height, width)"""

        if symb_input.ndim < 5:
            raise NotImplementedError("3D convolution requires a dimension >= 5")

        conv_output = conv3d2d.conv3d(symb_input, self.weight, filters_shape=self.w_shape, border_mode=self.border_mode)

        if self.with_bias:
            return conv_output + self.bias.dimshuffle("x", "x", 0, "x", "x")
        else:
            return conv_output
开发者ID:elPistolero,项目名称:DeepFried2,代码行数:13,代码来源:SpatialConvolution3D.py


示例9: apply

    def apply(self, graph):
        in_vw = graph.read_key(key="input")
        num_filters = graph.read_key(key="num_filters")
        filter_size = graph.read_key(key="filter_size")
        stride = graph.read_key_with_default(key="stride", default=(1, 1, 1))
        pad = graph.read_key_with_default(key="pad", default="valid")
        assert len(filter_size) == 3
        assert pad == "valid"
        assert stride == (1, 1, 1)

        # create weight
        num_channels = in_vw.shape[1]
        filter_shape = (num_filters, num_channels) + tuple(filter_size)
        W = th_utils.read_key_with_state_default(
            graph=graph,
            key="weight",
            tags={"weight": True,
                  "linear_weight": True,
                  "in_axes": (1,),
                  "out_axes": (0,),
                  "shape": filter_shape,
                  "dtype": fX},
            state_tags={"parameter": True,
                        "state": True}
        ).var

        from theano.tensor.nnet.conv3d2d import conv3d
        # takes signals in order: (batch, time, channels, row, column)
        # and filters in order: (out channel, time, in channels, row, column)
        # but we keep the dimensions that W is stored in consistent with other
        # convolutions, so we have to dimshuffle here
        order = (0, 2, 1, 3, 4)
        out_var = conv3d(signals=in_vw.variable.dimshuffle(*order),
                         filters=W.dimshuffle(*order),
                         signals_shape=[in_vw.shape[o] for o in order],
                         filters_shape=[filter_shape[o] for o in order],
                         # HACK as of 20150916, conv3d does a check
                         # if isinstance(border_mode, str), so we manually
                         # cast as a string
                         border_mode=str("valid"))

        out_shape = conv_output_shape(input_shape=in_vw.shape,
                                      num_filters=num_filters,
                                      axes=(2, 3, 4),
                                      conv_shape=filter_size,
                                      strides=stride,
                                      pads=conv_parse_pad(filter_size, pad))

        out_vw = VariableWrapper(out_var, out_shape)
        graph.write_key(key="output", value=out_vw)
开发者ID:diogo149,项目名称:hooky,代码行数:50,代码来源:conv.py


示例10: kernel_3d_center_surround_filter

def kernel_3d_center_surround_filter(symbolic_input,model=None,name=uuid.uuid4(),config={}):
    """
        Function to be used to initialize a `Node`.

        Comparable to the VirtualRetina OPL layer, this node computes a center-surround signal.
        To do this it creates a big composit kernel.

    """
    _kernel = dtensor5(name+'_kernel')
    output_variable = conv3d(symbolic_input,_kernel)
    output_variable.name = name+'_output'
    parameter_variables = [_kernel]
    node_type = '3d Kernel Filter Node'
    
    epsilon = float(config.get('epsilon',0.000000001))
    num_E_n_C = m_en_filter(int(config.get('center-n__uint',0)),float(config.get('center-tau__sec',0.0001)),
                              normalize=True,retina=model)
    num_G_C = m_g_filter(float(config.get('center-sigma__deg',0.05)),float(config.get('center-sigma__deg',0.05)),
                         retina=model,normalize=True,even=False)
    num_TwuTu_C = m_t_filter(float(config.get('undershoot',{}).get('tau__sec',0.001)),
                              float(config.get('undershoot',{}).get('relative-weight',1.0)),
                              normalize=True,retina=model,epsilon=0.0000000000001)
    num_E_S = m_e_filter(float(config.get('surround-tau__sec',0.001)),retina=model,normalize=True)
    num_G_S = m_g_filter(float(config.get('surround-sigma__deg',0.15)),float(config.get('surround-sigma__deg',0.15)),
                         retina=model,normalize=True,even=False)
    num_Reshape_C_S = fake_filter(num_G_S,num_E_S)
    num_lambda_OPL = config.get('opl-amplification',0.25) / model.config.get('input-luminosity-range',255.0)
    num_w_OPL = config.get('opl-relative-weight',0.7)
    center_filter = retina_base.conv(retina_base.conv(num_E_n_C,num_TwuTu_C),
                                    num_G_C)
    num_kernel = retina_base.minimize_filter(
                        num_lambda_OPL*(
                                retina_base.conv(center_filter,num_Reshape_C_S)
                                - num_w_OPL * retina_base.conv(retina_base.conv(center_filter,num_E_S),num_G_S)),
                        filter_epsilon = epsilon)
    node_description = lambda: 'Convolution '+str(num_kernel.shape)
    def get_num_inputs(num_input_variable):
        return dict(zip(parameter_variables,[num_kernel]))
    return {
        'output_variable': output_variable,
        'accept_dimensions': [3],
        'parameter_variables': parameter_variables,
        'state_variables': [],
        'inital_states': [],
        'updated_state_variables': [],
        'node_type': '2d Gauss Filter Node',
        'node_description': lambda: 'Recursive Filtering',
        'get_num_inputs': get_num_inputs
    }
开发者ID:jahuth,项目名称:retina,代码行数:49,代码来源:vision.py


示例11: __init__

 def __init__(self,config,kernel_center=None,kernel_surround=None,name=None):
     self.config = config
     if name is None:
         name = str(uuid.uuid4())
     self.kernel_center = kernel_center if kernel_center is not None else np.ones((1,1,1,1,1))
     self.kernel_surround = kernel_surround if kernel_surround is not None else np.ones((1,1,1,1,1))
     self.name = self.config.get('name',name)
     self._I = dtensor5(name+'_I')
     self._kernel_C = dtensor5(name+'_k_C')
     self._kernel_S = dtensor5(name+'_k_S')
     self._C = conv3d(self._I,self._kernel_C)
     self._S = conv3d(self._C,self._kernel_S)
     self._Reshape_C_S = dtensor5(name+'_Reshape_C_S')
     self._lambda_OPL = T.dscalar(name+'_lambda_OPL')
     self._w_OPL = T.dscalar(name+'_lambda_OPL')
     self._I_OPL = self._lambda_OPL * (conv3d(self._C,self._Reshape_C_S) - self._w_OPL * self._S)
     self.input_variables = [self._I]
     self.internal_variables = [self._kernel_C,self._kernel_S,self._Reshape_C_S, self._lambda_OPL,self._w_OPL]
     self.output_variable = self._I_OPL
     self.compute_function= theano.function(self.input_variables + self.internal_variables, self.output_variable)
     self.num_Reshape_C_S = fake_filter(self.kernel_center)
     self.num_lambda_OPL = self.config.get('amplification',0.25) / self.config.get('input-luminosity-range',255.0)
     self.num_w_OPL = self.config.get('relative-weight',0.7)
     self.state = None
开发者ID:jahuth,项目名称:retina,代码行数:24,代码来源:vision.py


示例12: conv3d

def conv3d(x, kernel, strides=(1, 1, 1), border_mode='valid'):
    '''
    Run on cuDNN if available.
    border_mode: string, "same" or "valid".
    '''
    # Both conv3d2d.conv3d and nnet.conv3D only support the 'valid' border mode
    if border_mode != 'valid':
        if border_mode == 'same':
            assert(strides == (1, 1, 1))
            pad_z = (kernel.shape[2] - strides[0])
            pad_x = (kernel.shape[3] - strides[1])
            pad_y = (kernel.shape[4] - strides[2])
        else: #full
            pad_z = (kernel.shape[2] - 1) * 2
            pad_x = (kernel.shape[3] - 1) * 2
            pad_y = (kernel.shape[4] - 1) * 2

        input_shape = x.shape
        output_shape = (input_shape[0], input_shape[1],
                        input_shape[2] + pad_z,
                        input_shape[3] + pad_x,
                        input_shape[4] + pad_y)
        output = T.zeros(output_shape)
        indices = (slice(None), slice(None),
                   slice(pad_z//2, input_shape[2] + pad_z//2),
                   slice(pad_x//2, input_shape[3] + pad_x//2),
                   slice(pad_y//2, input_shape[4] + pad_y//2))
        x = T.set_subtensor(output[indices], x)

    border_mode = 'valid'

    if _on_gpu():
        assert(strides == (1, 1, 1))
        # Shuffle the dimensions as per the input parameter order, restore it once done
        conv_out = conv3d2d.conv3d(signals=x.dimshuffle(0, 2, 1, 3, 4),
                                   filters=kernel.dimshuffle(0, 2, 1, 3, 4),
                                   border_mode=border_mode)

        conv_out = conv_out.dimshuffle(0, 2, 1, 3, 4)
    else:
        # Shuffle the dimensions as per the input parameter order, restore it once done
        conv_out = T.nnet.conv3D(V=x.dimshuffle(0, 2, 3, 4, 1),
                                 W=kernel.dimshuffle(0, 2, 3, 4, 1),
                                 b=None, d=strides)
        conv_out = conv_out.dimshuffle(0, 4, 1, 2, 3)

    return conv_out
开发者ID:jruales,项目名称:keras,代码行数:47,代码来源:theano_backend.py


示例13: forward

    def forward(self, x, batch_size, run_time):
        img_batch_shape = (batch_size,) + self.image_shape

        x = x.reshape(img_batch_shape)

        # Convolve input feature maps with filters
        conv_out = conv3d2d.conv3d(signals=x,
                                   filters=self.w,
                                   signals_shape=img_batch_shape,
                                   filters_shape=self.filter_shape,
                                   border_mode='valid')

        perm = [0, 2, 1, 3, 4]  # Permutation is needed due to the pooling function prototype
        pooled_out = max_pool_3d(conv_out.dimshuffle(perm), self.poolsize, ignore_border=True)

        return self.neuron_type.activation_function(pooled_out.dimshuffle(perm)
                                                    + self.b.dimshuffle('x', 'x', 0, 'x', 'x')).flatten(2)
开发者ID:adbrebs,项目名称:spynet,代码行数:17,代码来源:layer_block.py


示例14: get_output_for

 def get_output_for(self, input, *args, **kwargs):
     """ input is bct01
     based on
     https://github.com/lpigou/Theano-3D-ConvNet/blob/master/convnet3d/convnet3d.py
     released as public domain.
     """
     input_shape = self.input_layer.get_output_shape()
     t, h, w = input_shape[2], input_shape[3], input_shape[4]
     input_c = input_shape[1]
     batch_size = input_shape[0]
     filter_t, filter_h, filter_w = self.filter_size
     input_btc01 = input.dimshuffle([0,2,1,3,4]) # bct01 -> btc01
     out_btc01 = conv3d2d.conv3d(signals=btc01, filters=self.W,
             signals_shape=(batch_size, t, input_c, h, w),
             filters_shape=(self.num_filters, filter_t, input_c, filter_h, filter_w),
             border_mode='valid')
     out_bct01 = out_btc01.dimshuffle([0,2,1,3,4]) # btc01 -> bct01
     if self.b is not None:
         out_bct01 = out_bct01 + self.b.dimshuffle('x',0,'x','x','x')
     return self.nonlinearity(out_bct01)
开发者ID:Jesse-Back,项目名称:voxnet,代码行数:20,代码来源:layers.py


示例15: get_reconstructed_input

	def get_reconstructed_input(self):
		""" Computes the reconstructed input given the values of the hidden layer """
		repeated_conv = conv3d(
				self.hidden,
				self.W_prime,
				)
		
		bp=(self.filter_shape[1]-1)/2
		repeated_conv=repeated_conv.dimshuffle(0,2,1,3,4)
		zeropad=T.zeros((self.image_shape[0],
				1,
				self.image_shape[1]/self.poolsize[0],
				self.image_shape[3]/self.poolsize[1],
				self.image_shape[4]/self.poolsize[2]))-100
		repeated_conv=T.set_subtensor(zeropad[:,:,bp:-bp,bp:-bp,bp:-bp],repeated_conv)
		#repeated_conv=repeated_conv.dimshuffle(0,2,1,3,4)
		
		#multiple_conv_out = [repeated_conv.flatten()] * np.prod(self.poolsize)
		#stacked_conv_neibs = T.stack(*multiple_conv_out).T

		#newshape=()
		#stretch_unpooling_out = T.nnet.neighbours.neibs2images(stacked_conv_neibs, 
							 #self.poolsize, self.x1.shape)
							 
		z=repeated_conv   ### now zp is (n_batch, 1, n/2, n/2, n/2)
		shp=z.shape
		zp= z.reshape((shp[0]*shp[1],shp[2],shp[3],shp[4]))  ### (50,16,16,16)
		
		iid = [T.arange(self.x1.shape[3])//self.poolsize[1]]
		
		c=zp[:,:,:,iid]
		c=c[:,:,iid]
		c=c[:,iid].reshape(self.x1.shape)
		
		#c = ((zp[T.arange(z.shape[0]*z.shape[1]*z.shape[2])//self.poolsize[0]].T)[T.arange(self.x1.shape[3])//self.poolsize[1]].T).reshape(self.x1.shape)
		z=T.nnet.sigmoid(c + self.b_prime.dimshuffle('x',  'x', 0, 'x', 'x'))

		return z
开发者ID:g5v991x,项目名称:emtest,代码行数:38,代码来源:convnet3d.py


示例16: __model

    def __model(self):

        #Customizable non-linearity, added 6/8/15
        #Default is the hyperbolic tangent

        #Prepare input tensor
        Xin = self.X.dimshuffle(3, 0, 'x', 1, 2)

        if((self.activation == 'sig') or (self.activation == 'Sig')): #Also include the option of only sigmoidal non-linear units
            #Layer 1: input layer
            out = T.nnet.sigmoid(conv3d(Xin, self.w[0], border_mode='valid') + self.b[0].dimshuffle('x','x',0,'x','x'))
                
            #Every other layer in the network, as definied by filter_shapes
            for layer in range(1, self.net_shape.shape[0]-1):
                out = T.nnet.sigmoid(conv3d(out, self.w[layer], border_mode='valid') + self.b[layer].dimshuffle('x','x',0,'x','x'))
           
        elif((self.activation == 'relu') or (self.activation == 'ReLU')): 
            #Layer 1: input layer
            out = T.maximum(conv3d(Xin, self.w[0], border_mode='valid') + self.b[0].dimshuffle('x','x',0,'x','x'), 0)
                
            #Every other layer in the network, as definied by filter_shapes
            for layer in range(1, self.net_shape.shape[0]-1):
                #An attempt to eliminate the nan errors by normalizing the relu outputs before sending them to the sigmoid function (added 6/15/15)
                out = T.maximum(conv3d(out, self.w[layer], border_mode='valid') + self.b[layer].dimshuffle('x','x',0,'x','x'), 0)
          
        else: #nonlin == 'tanh'
            #Layer 1: input layer
            out = T.tanh(conv3d(Xin, self.w[0], border_mode='valid') + self.b[0].dimshuffle('x','x',0,'x','x'))
                
            #Every other layer in the network, as definied by filter_shapes
            for layer in range(1, self.net_shape.shape[0]-1):
                out = T.tanh(conv3d(out, self.w[layer], border_mode='valid') + self.b[layer].dimshuffle('x','x',0,'x','x'))
        
        
        out = T.nnet.sigmoid(conv3d(out, self.w[-1], border_mode='valid') + self.b[-1].dimshuffle('x','x',0,'x','x'))          
        #Reshuffle the dimensions so that the last three are the xyz dimensions
        # and the second one is the number of affinity graph types (for each dimension)
        self.out = out.dimshuffle(2, 1, 3, 4, 0)
开发者ID:schurterb,项目名称:convnet,代码行数:38,代码来源:cnn.py


示例17: __init__

    def __init__(self, rng, input, filter_shape, temporal_filter, image_shape, poolsize=(2, 2), outputType = 'rl'):
        """
        Allocate a LeNetConvPoolLayer with shared variable internal parameters.

        :type rng: numpy.random.RandomState
        :param rng: a random number generator used to initialize weights

        :type input: theano.tensor.dtensor4
        :param input: symbolic image tensor, of shape image_shape

        :type filter_shape: tuple or list of length 4
        :param filter_shape: (number of filters, num input feature maps,
                              filter height,filter width)

        :type image_shape: tuple or list of length 4
        :param image_shape: (batch size, num input feature maps,
                             image height, image width)

        :type poolsize: tuple or list of length 2
        :param poolsize: the downsampling (pooling) factor (#rows,#cols)
        """

        self.input = input

        self.W = theano.shared(value=numpy.reshape(temporal_filter,(1,filter_shape[1],1,1,1)).astype(theano.config.floatX), name='W', borrow=True)
        self.W_helper = theano.shared(value=numpy.zeros((1,filter_shape[1],1,1,1), \
            dtype=theano.config.floatX), name='W_helper', borrow=True)
        self.W_helper2 = theano.shared(value=numpy.zeros((1,filter_shape[1],1,1,1), \
            dtype=theano.config.floatX), name='W_helper2', borrow=True)

        # parameters of this layer
        self.params = [self.W]
        self.params_helper = [self.W_helper]
        self.params_helper2 = [self.W_helper2]


        # to get same using 'valid', pre-pad with zeros
        image_shape_pad = list(image_shape)
        a1 = numpy.floor((filter_shape[1]-1)/2.0).astype(int)
        b1 = numpy.ceil((filter_shape[1]-1)/2.0).astype(int)
        #a2 = numpy.floor((filter_shape[3]-1)/2.0).astype(int)
        #b2 = numpy.ceil((filter_shape[3]-1)/2.0).astype(int)
        #a3 = numpy.floor((filter_shape[4]-1)/2.0).astype(int)
        #b3 = numpy.ceil((filter_shape[4]-1)/2.0).astype(int)

        image_shape_pad[1] += a1+b1
        #image_shape_pad[3] += a2+b2
        #image_shape_pad[4] += a3+b3

        input_padded = theano.shared(value=numpy.zeros(image_shape_pad, \
            dtype=theano.config.floatX), borrow=True)

        #input_padded = T.set_subtensor(input_padded[:,a1:-b1,:,a2:-b2,a3:-b3], input)
        input_padded = T.set_subtensor(input_padded[:,(a1+b1):,:,:,:], input)

        #post-pad
        #input_padded = T.concatenate( (input_padded,T.alloc(0,(1,b1,1,1,1))), axis = 1) #time
        #input_padded = T.concatenate( (input_padded,T.alloc(0,(1,1,1,b2,1))), axis = 3) #height
        #input_padded = T.concatenate( (input_padded,T.alloc(0,(1,1,1,1,b3))), axis = 4) #width

        conv_out = conv3d2d.conv3d(
            signals=input_padded,  # Ns, Ts, C, Hs, Ws
            filters=self.W, # Nf, Tf, C, Hf, Wf
            signals_shape=image_shape_pad, #(batchsize, in_time, in_channels, in_height, in_width)
            filters_shape=filter_shape, #(flt_channels, flt_time, in_channels, flt_height, flt_width)
            border_mode='valid')

        # downsample each feature map individually, using maxpooling
        pooled_out = downsample.max_pool_2d(input=conv_out,ds=poolsize, ignore_border=True)

        self.lin_output = pooled_out;

        # Activation is given by sigmoid:
        #self.output = T.tanh(lin_output)

        # Activation is rectified linear
        if outputType == 'rl':
            self.output = self.lin_output*(self.lin_output>0)
        elif outputType == 'l':
            self.output = self.lin_output
开发者ID:jsmerel,项目名称:retina_theano_example_code,代码行数:80,代码来源:Architecture_main.py


示例18: conv3d

def conv3d(x, kernel, strides=(1, 1, 1),
           border_mode='valid', dim_ordering='th',
           image_shape=None, filter_shape=None):
    '''
    Run on cuDNN if available.
    border_mode: string, "same" or "valid".
    conv_mode: string, "conv" or "cross".
    '''
    if dim_ordering not in {'th', 'tf'}:
        raise Exception('Unknown dim_ordering ' + str(dim_ordering))

    if dim_ordering == 'tf':
        # TF uses the last dimension as channel dimension,
        # instead of the 2nd one.
        # TH input shape: (samples, input_depth, rows, cols, time)
        # TH kernel shape: (depth, input_depth, rows, cols, time)
        # TF input shape: (samples, rows, cols, time, input_depth)
        # TF kernel shape: (rows, cols, time, input_depth, depth)
        x = x.dimshuffle((0, 4, 1, 2, 3))
        kernel = kernel.dimshuffle((4, 3, 0, 1, 2))
        if image_shape:
            image_shape = (image_shape[0], image_shape[4],
                           image_shape[1], image_shape[2],
                           image_shape[3])
        if filter_shape:
            filter_shape = (filter_shape[4], filter_shape[3],
                            filter_shape[0], filter_shape[1],
                            filter_shape[2])

    if _on_gpu() and dnn.dnn_available():
        if border_mode == 'same':
            np_kernel = kernel.eval()
            border_mode = tuple(s // 2 for s in np_kernel.shape[2:])
        conv_out = dnn.dnn_conv3d(img=x,
                                kerns=kernel,
                                border_mode=border_mode,
                                subsample=strides)
    else:
        if border_mode == 'same':
            assert(strides == (1, 1, 1))
            pad_dim1 = (kernel.shape[2] - 1)
            pad_dim2 = (kernel.shape[3] - 1)
            pad_dim3 = (kernel.shape[4] - 1)
            output_shape = (x.shape[0], x.shape[1],
                            x.shape[2] + pad_dim1,
                            x.shape[3] + pad_dim2,
                            x.shape[4] + pad_dim3)
            output = T.zeros(output_shape)
            indices = (slice(None), slice(None),
                       slice(pad_dim1 // 2, x.shape[2] + pad_dim1 // 2),
                       slice(pad_dim2 // 2, x.shape[3] + pad_dim2 // 2),
                       slice(pad_dim3 // 2, x.shape[4] + pad_dim3 // 2))
            x = T.set_subtensor(output[indices], x)
            border_mode = 'valid'

        border_mode_3d = (border_mode, border_mode, border_mode)
        conv_out = conv3d2d.conv3d(signals=x.dimshuffle(0, 2, 1, 3, 4),
                                   filters=kernel.dimshuffle(0, 2, 1, 3, 4),
                                   border_mode=border_mode_3d)
        conv_out = conv_out.dimshuffle(0, 2, 1, 3, 4)

        # support strides by manually slicing the output
        if strides != (1, 1, 1):
            conv_out = conv_out[:, :, ::strides[0], ::strides[1], ::strides[2]]
    if dim_ordering == 'tf':
        conv_out = conv_out.dimshuffle((0, 2, 3, 1))
    return conv_out
开发者ID:trungnt13,项目名称:odin_old,代码行数:67,代码来源:theano_backend.py


示例19: __init__


#.........这里部分代码省略.........
                b_values = np.asarray(
                    initWeights(
                        (n_out, ),
                        scale=0.5,
                        mode='const'),
                    dtype='float32')
            else:  # activation_func=='tanh':
                b_values = np.asarray(
                    initWeights(
                        (n_out, ),
                        scale=1e-6,
                        mode='fix-uni'),
                    dtype='float32')

            self.b = theano.shared(value=b_values, borrow=True, name='b_conv')

        else:
            if isinstance(b, np.ndarray):
                self.b = theano.shared(
                    b.astype(np.float32),
                    name='b_conv',
                    borrow=True)
            else:
                assert isinstance(
                    b,
                    T.TensorVariable), "b must be either np.ndarray or theano var"
                self.b = b

        # store parameters of this layer
        self.params = [self.W, self.b]

        # convolve input feature maps with filters
        self.mode = theano.compile.get_default_mode()
        self.conv_out = conv3d(
            signals=input,
            filters=self.W,
            border_mode='valid',
            filters_shape=filter_shape
        )  # signals_shape=input_shape if input_shape[0] is not None else None)

        # down-sample each feature map individually, using maxpooling
        if np.any(pool != 1):
            pool_func = lambda x: pooling.pooling3d(x, pool_shape=pool, mode=pooling_mode)
            if use_fragment_pooling:
                pooled_out, self.mfp_offsets, self.mfp_strides = self.fragmentpool(
                    self.conv_out, pool, mfp_offsets, mfp_strides, pool_func)
            else:
                pooled_out = pool_func(self.conv_out)
        else:
            pooled_out = self.conv_out

        if enable_dropout:
            print "Dropout: ACTIVE"
            self.activation_noise = theano.shared(
                np.float32(0.5),
                name='Dropout Rate')
            rng = T.shared_randomstreams.RandomStreams(int(time.time()))
            p = 1 - self.activation_noise
            self.dropout_gate = 1.0 / p * rng.binomial(
                (pooled_out.shape[1], pooled_out.shape[3],
                 pooled_out.shape[4]),
                1,
                p,
                dtype='float32')
            pooled_out = pooled_out * self.dropout_gate.dimshuffle(('x', 0, 'x', 1, 2))
开发者ID:ELEKTRONN,项目名称:ELEKTRONN,代码行数:66,代码来源:convlayer3d.py


示例20: __init__

    def __init__(self, rng, input,
                         filter_shape,
                          image_shape,
                           poolsize,
                       

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python neighbours.images2neibs函数代码示例发布时间:2022-05-27
下一篇:
Python conv.conv2d函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap