• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python dnn.dnn_available函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.sandbox.cuda.dnn.dnn_available函数的典型用法代码示例。如果您正苦于以下问题:Python dnn_available函数的具体用法?Python dnn_available怎么用?Python dnn_available使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了dnn_available函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: pool2d

def pool2d(x, pool_size, strides=(1, 1), border_mode='valid',
           dim_ordering='th', pool_mode='max'):
    if border_mode == 'same':
        # TODO: add implementation for border_mode="same"
        raise Exception('border_mode="same" not supported with Theano.')
    elif border_mode == 'valid':
        ignore_border = True
        padding = (0, 0)
    else:
        raise Exception('Invalid border mode: ' + str(border_mode))

    if dim_ordering not in {'th', 'tf'}:
        raise Exception('Unknown dim_ordering ' + str(dim_ordering))

    if dim_ordering == 'tf':
        x = x.dimshuffle((0, 3, 1, 2))

    if pool_mode == 'max':
        if _on_gpu() and dnn.dnn_available():
            pool_out = dnn_pool(x,
                                pool_size,
                                stride=strides,
                                mode='max')
        else:
            pool_out = downsample.max_pool_2d(x,
                                              ds=pool_size,
                                              st=strides,
                                              ignore_border=ignore_border,
                                              padding=padding,
                                              mode='max')
    elif pool_mode == 'avg':
        if _on_gpu() and dnn.dnn_available():
            pool_out = dnn_pool(x,
                                pool_size,
                                stride=strides,
                                mode='average_exc_pad')
        else:
            pool_out = downsample.max_pool_2d(x,
                                              ds=pool_size,
                                              st=strides,
                                              ignore_border=ignore_border,
                                              padding=padding,
                                              mode='average_exc_pad')
        
    else:
        raise Exception('Invalid pooling mode: ' + str(pool_mode))
    
    if dim_ordering == 'tf':
        pool_out = pool_out.dimshuffle((0, 2, 3, 1))
    return pool_out
开发者ID:kundajelab,项目名称:keras,代码行数:50,代码来源:theano_backend.py


示例2: get_output

    def get_output(self, train=False):
        X = self.get_input(train)
        border_mode = self.border_mode
        if on_gpu() and dnn.dnn_available():
            if border_mode == 'same':
                assert(self.subsample == (1, 1))
                pad_x = (self.nb_row - self.subsample[0]) // 2
                pad_y = (self.nb_col - self.subsample[1]) // 2
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=(pad_x, pad_y))
            else:
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample)
        else:
            if border_mode == 'same':
                border_mode = 'full'
                assert(self.subsample == (1, 1))

            conv_out = T.nnet.conv.conv2d(X, self.W,
                                          border_mode=border_mode,
                                          subsample=self.subsample,
                                          image_shape=self.input_shape,
                                          filter_shape=self.W_shape)
            if self.border_mode == 'same':
                shift_x = (self.nb_row - 1) // 2
                shift_y = (self.nb_col - 1) // 2
                conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x, shift_y:X.shape[3] + shift_y]

        return self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
开发者ID:rahulmohan,项目名称:keras,代码行数:32,代码来源:convolutional.py


示例3: test_cormm_conv

    def test_cormm_conv(self):
        if not dnn_available():
            raise SkipTest(cuda.dnn.dnn_available.msg)

        mode = mode_without_gpu
        for (i, f), s, b, flip, provide_shape in itertools.product(
                zip(self.inputs_shapes, self.filters_shapes),
                self.subsamples,
                self.border_modes,
                self.filter_flip,
                [False, True]):

            o = self.get_output_shape(i, f, s, b)
            self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
                         verify_grad=True, mode=mode, device='cpu',
                         provide_shape=provide_shape, border_mode=b,
                         filter_flip=flip)
            self.run_gradweight(inputs_shape=i, filters_shape=f,
                                output_shape=o, subsample=s,
                                verify_grad=True, mode=mode, device='cpu',
                                provide_shape=provide_shape, border_mode=b,
                                filter_flip=flip)
            self.run_gradinput(inputs_shape=i, filters_shape=f,
                               output_shape=o, subsample=s,
                               verify_grad=True, mode=mode, device='cpu',
                               provide_shape=provide_shape, border_mode=b,
                               filter_flip=flip)
开发者ID:huamichaelchen,项目名称:Theano,代码行数:27,代码来源:test_abstractconv.py


示例4: test_import_without_gpu_or_cudnn_raises

 def test_import_without_gpu_or_cudnn_raises(self):
     from theano.sandbox.cuda import dnn
     if theano.config.device.startswith("gpu") and dnn.dnn_available():
         pytest.skip()
     else:
         with pytest.raises(ImportError):
             import lasagne.layers.dnn
开发者ID:colinfang,项目名称:Lasagne,代码行数:7,代码来源:test_conv.py


示例5: create_NIPS_Sprag_init

def create_NIPS_Sprag_init(inp_shape, output_num, stride=None, untie_biases=False, input_var=None):
    import theano.tensor.signal.conv
    from theano.sandbox.cuda import dnn
    # if no dnn support use default conv
    if not theano.config.device.startswith("gpu") or not dnn.dnn_available():  # code stolen from lasagne dnn.py
        import lasagne.layers.conv
        conv = lasagne.layers.conv.Conv2DLayer
    else:
        import lasagne.layers.dnn
        conv = lasagne.layers.dnn.Conv2DDNNLayer

    # setup network layout
    l_in = lasagne.layers.InputLayer(inp_shape, input_var=input_var)
    l_hid1 = conv(l_in, 16, (8, 8), stride=stride[0], untie_biases=untie_biases,
                        W=lasagne.init.Normal(.01),
                        b=lasagne.init.Constant(.1))

    l_hid2 = conv(l_hid1, 32, (4, 4), stride=stride[1], untie_biases=untie_biases,
                        W=lasagne.init.Normal(.01),
                        b=lasagne.init.Constant(.1))

    l_hid3 = lasagne.layers.DenseLayer(l_hid2, 256,
                        W=lasagne.init.Normal(.01),
                        b=lasagne.init.Constant(.1))

    l_out = lasagne.layers.DenseLayer(l_hid3, output_num, nonlinearity=lasagne.nonlinearities.linear,
                        W=lasagne.init.Normal(.01),
                        b=lasagne.init.Constant(.1))

    return {'l_in': l_in, 'l_hid1': l_hid1, 'l_hid2': l_hid2, 'l_hid3': l_hid3, 'l_out': l_out}
开发者ID:hearthstoneboss,项目名称:learningALE,代码行数:30,代码来源:nns.py


示例6: test_softmax

    def test_softmax(self):
        if not dnn.dnn_available():
            raise SkipTest(dnn.dnn_available.msg)
        t = T.ftensor4('t')
        rand_tensor = numpy.asarray(
            numpy.random.rand(5, 4, 3, 2),
            dtype='float32'
        )
        self._compile_and_check(
            [t],
            [dnn.GpuDnnSoftmax('bc01', 'accurate', 'channel')(t)],
            [rand_tensor],
            dnn.GpuDnnSoftmax
        )

        self._compile_and_check(
            [t],
            [
                T.grad(
                    dnn.GpuDnnSoftmax(
                        'bc01',
                        'accurate',
                        'channel'
                    )(t).mean(),
                    t
                )
            ],
            [rand_tensor],
            dnn.GpuDnnSoftmaxGrad
        )
开发者ID:ChienliMa,项目名称:Theano,代码行数:30,代码来源:test_dnn.py


示例7: test_conv

    def test_conv(self):
        if not dnn.dnn_available():
            raise SkipTest(dnn.dnn_available.msg)
        img = T.ftensor4('img')
        kerns = T.ftensor4('kerns')
        img_val = numpy.asarray(
            numpy.random.rand(3, 4, 5, 6),
            dtype='float32'
        )
        kern_vals = numpy.asarray(
            numpy.random.rand(3, 4, 5, 6),
            dtype='float32'
        )

        for params in product(
            ['valid', 'full'],
            [(1, 1), (2, 2)],
            ['conv', 'cross']
        ):
            desc = dnn.GpuDnnConvDesc(
                border_mode=params[0],
                subsample=params[1],
                conv_mode=params[2]
            )(img.shape, kerns.shape)
            conv = dnn.GpuDnnConv()(img_val, kern_vals, desc)
            self._compile_and_check(
                [img, kerns],
                [conv],
                [img_val, kern_vals],
                dnn.GpuDnnConv
            )
开发者ID:c0g,项目名称:Theano,代码行数:31,代码来源:test_dnn.py


示例8: test_dnn_conv

 def test_dnn_conv(self):
     if not dnn_available():
         raise SkipTest(cuda.dnn.dnn_available.msg)
     mode = mode_with_gpu
     # provide_shape is not used by the CuDNN impementation
     provide_shape = False
     for (i, f), s, b, flip in itertools.product(
             zip(self.inputs_shapes, self.filters_shapes),
             self.subsamples,
             self.border_modes,
             self.filter_flip):
         o = self.get_output_shape(i, f, s, b)
         self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
                      verify_grad=True, mode=mode, device='gpu',
                      provide_shape=provide_shape, border_mode=b,
                      filter_flip=flip, target_op=GpuDnnConv)
         self.run_gradweight(inputs_shape=i, filters_shape=f,
                             output_shape=o, subsample=s,
                             verify_grad=True, mode=mode, device='gpu',
                             provide_shape=provide_shape, border_mode=b,
                             filter_flip=flip, target_op=GpuDnnConvGradW)
         self.run_gradinput(inputs_shape=i, filters_shape=f,
                            output_shape=o, subsample=s,
                            verify_grad=True, mode=mode, device='gpu',
                            provide_shape=provide_shape, border_mode=b,
                            filter_flip=flip, target_op=GpuDnnConvGradI)
开发者ID:DreamingPiggy,项目名称:Theano,代码行数:26,代码来源:test_abstractconv.py


示例9: get_output

    def get_output(self, train):
        X = self.get_input(train)
        border_mode = self.border_mode
        if dnn.dnn_available() and theano.config.device[:3] == 'gpu':
            if border_mode == 'same':
                assert(self.subsample == (1, 1))
                pad_x = (self.nb_row - self.subsample[0]) // 2
                pad_y = (self.nb_col - self.subsample[1]) // 2
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=(pad_x, pad_y))
            else:
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample)
        else:
            if border_mode == 'same':
                border_mode = 'full'

            conv_out = T.nnet.conv.conv2d(X, self.W,
                                          border_mode=border_mode,
                                          subsample=self.subsample)
            if self.border_mode == 'same':
                shift_x = (self.nb_row - 1) // 2
                shift_y = (self.nb_col - 1) // 2
                conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x, shift_y:X.shape[3] + shift_y]

        return self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
开发者ID:chuckgu,项目名称:Alphabeta,代码行数:29,代码来源:Convolutional_Layer.py


示例10: get_output

    def get_output(self, train=False):
        X = self.get_input(train)
        newshape = (X.shape[0]*X.shape[1], X.shape[2], X.shape[3], X.shape[4])
        Y = theano.tensor.reshape(X, newshape) #collapse num_samples and num_timesteps
        border_mode = self.border_mode
        if on_gpu() and dnn.dnn_available():
            if border_mode == 'same':
                assert(self.subsample == (1, 1))
                pad_x = (self.nb_row - self.subsample[0]) // 2
                pad_y = (self.nb_col - self.subsample[1]) // 2
                conv_out = dnn.dnn_conv(img=Y,
                                        kerns=self.W,
                                        border_mode=(pad_x, pad_y))
            else:
                conv_out = dnn.dnn_conv(img=Y,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample)
        else:
            if border_mode == 'same':
                border_mode = 'full'

            conv_out = theano.tensor.nnet.conv.conv2d(Y, self.W,
                border_mode=border_mode, subsample=self.subsample)

            if self.border_mode == 'same':
                shift_x = (self.nb_row - 1) // 2
                shift_y = (self.nb_col - 1) // 2
                conv_out = conv_out[:, :, shift_x:Y.shape[2] + shift_x, shift_y:Y.shape[3] + shift_y]

        output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
        newshape = (X.shape[0], X.shape[1], output.shape[1], output.shape[2], output.shape[3])
        return theano.tensor.reshape(output, newshape)
开发者ID:Michlong,项目名称:keras-extra,代码行数:33,代码来源:extra.py


示例11: test_gpucorrmm_conv

    def test_gpucorrmm_conv(self):
        if not dnn_available():
            raise SkipTest(cuda.dnn.dnn_available.msg)

        mode = mode_with_gpu.excluding('cudnn')
        for (i, f), s, b, flip, provide_shape in itertools.product(
                zip(self.inputs_shapes, self.filters_shapes),
                self.subsamples,
                self.border_modes,
                self.filter_flip,
                [False, True]):

            o = self.get_output_shape(i, f, s, b)
            self.run_fwd(inputs_shape=i, filters_shape=f, subsample=s,
                         verify_grad=True, mode=mode, device='gpu',
                         provide_shape=provide_shape, border_mode=b,
                         filter_flip=flip,
                         target_op=(GpuCorrMM,
                                    GpuCorrMM_gradWeights,
                                    GpuCorrMM_gradInputs))
            self.run_gradweight(inputs_shape=i, filters_shape=f,
                                output_shape=o, subsample=s,
                                verify_grad=True, mode=mode, device='gpu',
                                provide_shape=provide_shape, border_mode=b,
                                filter_flip=flip,
                                target_op=GpuCorrMM_gradWeights)
            self.run_gradinput(inputs_shape=i, filters_shape=f,
                               output_shape=o, subsample=s,
                               verify_grad=True, mode=mode, device='gpu',
                               provide_shape=provide_shape, border_mode=b,
                               filter_flip=flip,
                               target_op=GpuCorrMM_gradInputs)
开发者ID:DreamingPiggy,项目名称:Theano,代码行数:32,代码来源:test_abstractconv.py


示例12: test_conv_gradi

    def test_conv_gradi(self):
        if not dnn.dnn_available():
            raise SkipTest(dnn.dnn_available.msg)
        img = T.ftensor4("img")
        kerns = T.ftensor4("kerns")
        out = T.ftensor4("out")
        img_val = numpy.asarray(numpy.random.rand(3, 4, 5, 6), dtype="float32")
        kern_vals = numpy.asarray(numpy.random.rand(3, 4, 5, 6), dtype="float32")

        for params in product(["valid"], [(1, 1)], ["conv", "cross"]):  # Should this work for 'full'?
            temp_kerns = kerns.dimshuffle(1, 0, 2, 3)
            shape = (
                img_val.shape[0],
                kern_vals.shape[1],
                img_val.shape[2] + kern_vals.shape[2] - 1,
                img_val.shape[3] + kern_vals.shape[3] - 1,
            )
            out_vals = numpy.zeros(shape, dtype="float32")
            desc = dnn.GpuDnnConvDesc(border_mode=params[0], subsample=params[1], conv_mode=params[2])(
                out.shape, temp_kerns.shape
            )
            conv_grad_i = dnn.GpuDnnConvGradI()(temp_kerns, img, out, desc)
            self._compile_and_check(
                [temp_kerns, img, out], [conv_grad_i], [kern_vals, img_val, out_vals], dnn.GpuDnnConvGradI
            )
开发者ID:dapeng2018,项目名称:Theano,代码行数:25,代码来源:test_dnn.py


示例13: pool2d

def pool2d(x, pool_size, strides=(1, 1), border_mode='valid',
           dim_ordering='th', pool_mode='max'):
    # ====== dim ordering ====== #
    if dim_ordering not in {'th', 'tf'}:
        raise Exception('Unknown dim_ordering ' + str(dim_ordering))
    if dim_ordering == 'tf':
        x = x.dimshuffle((0, 3, 1, 2))
    # ====== border mode ====== #
    if border_mode == 'same':
        w_pad = pool_size[0] - 2 if pool_size[0] % 2 == 1 else pool_size[0] - 1
        h_pad = pool_size[1] - 2 if pool_size[1] % 2 == 1 else pool_size[1] - 1
        padding = (w_pad, h_pad)
    elif border_mode == 'valid':
        padding = (0, 0)
    elif isinstance(border_mode, (tuple, list)):
        padding = tuple(border_mode)
    else:
        raise Exception('Invalid border mode: ' + str(border_mode))

    # ====== pooling ====== #
    if _on_gpu() and dnn.dnn_available():
        pool_out = dnn.dnn_pool(x, pool_size,
                                stride=strides,
                                mode=pool_mode,
                                pad=padding)
    else: # CPU veresion support by theano
        pool_out = pool.pool_2d(x, ds=pool_size, st=strides,
                                ignore_border=True,
                                padding=padding,
                                mode=pool_mode)

    if dim_ordering == 'tf':
        pool_out = pool_out.dimshuffle((0, 2, 3, 1))
    return pool_out
开发者ID:trungnt13,项目名称:odin_old,代码行数:34,代码来源:theano_backend.py


示例14: test_conv_gradw

    def test_conv_gradw(self):
        if not dnn.dnn_available():
            raise SkipTest(dnn.dnn_available.msg)
        img = T.ftensor4("img")
        kerns = T.ftensor4("kerns")
        out = T.ftensor4("out")
        img_val = numpy.asarray(numpy.random.rand(2, 5, 6, 8), dtype="float32")
        kern_vals = numpy.asarray(numpy.random.rand(2, 1, 5, 6), dtype="float32")
        out_vals = numpy.zeros((3, 3, 1, 1), dtype="float32")

        for params in product(["valid", "full"], [(1, 1)], ["conv", "cross"]):  # strides besides (1, 1)
            temp_img = img.dimshuffle(1, 0, 2, 3)
            temp_kerns = kerns
            if params[2] == "conv":
                temp_kerns = temp_kerns[:, :, ::-1, ::-1]
            temp_kerns = temp_kerns.dimshuffle(1, 0, 2, 3)
            shape = (
                kern_vals.shape[1],
                img_val.shape[1],
                img_val.shape[2] - kern_vals.shape[2] + 1,
                img_val.shape[3] - kern_vals.shape[3] + 1,
            )
            out_vals = numpy.zeros(shape, dtype="float32")
            desc = dnn.GpuDnnConvDesc(border_mode=params[0], subsample=params[1], conv_mode=params[2])(
                temp_img.shape, out.shape
            )
            conv_grad_w = dnn.GpuDnnConvGradW()(temp_img, temp_kerns, out, desc)
            self._compile_and_check(
                [temp_img, temp_kerns, out], [conv_grad_w], [img_val, kern_vals, out_vals], dnn.GpuDnnConvGradW
            )
开发者ID:dapeng2018,项目名称:Theano,代码行数:30,代码来源:test_dnn.py


示例15: get_output

    def get_output(self, train=False):
        X = self.get_input(train)
        X = T.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1)).dimshuffle(0, 2, 1, 3)

        border_mode = self.border_mode
        if on_gpu() and dnn.dnn_available():
            if border_mode == 'same':
                assert(self.subsample_length == 1)
                pad_x = (self.filter_length - self.subsample_length) // 2
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=(pad_x, 0))
            else:
                conv_out = dnn.dnn_conv(img=X,
                                        kerns=self.W,
                                        border_mode=border_mode,
                                        subsample=self.subsample)
        else:
            if border_mode == 'same':
                assert(self.subsample_length == 1)
                border_mode = 'full'

            conv_out = T.nnet.conv.conv2d(X, self.W,
                                          border_mode=border_mode,
                                          subsample=self.subsample)
            if self.border_mode == 'same':
                shift_x = (self.filter_length - 1) // 2
                conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x, :]

        output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
        output = T.reshape(output, (output.shape[0], output.shape[1], output.shape[2])).dimshuffle(0, 2, 1)
        return output
开发者ID:Mofef,项目名称:keras,代码行数:32,代码来源:convolutional.py


示例16: test_pool

 def test_pool(self):
     if not dnn.dnn_available():
         raise SkipTest(dnn.dnn_available.msg)
     img = T.ftensor4("img")
     img_val = numpy.asarray(numpy.random.rand(2, 3, 4, 5), dtype="float32")
     for params in product([(1, 1), (2, 2), (3, 3)], [(1, 1), (2, 2), (3, 3)], ["max", "average"]):
         desc = dnn.GpuDnnPoolDesc(ws=params[0], stride=params[1], mode=params[2])()
         self._compile_and_check([img], [dnn.GpuDnnPool()(img, desc)], [img_val], dnn.GpuDnnPool)
开发者ID:dapeng2018,项目名称:Theano,代码行数:8,代码来源:test_dnn.py


示例17: get_output

 def get_output(self, input, params, testing=False):
     if dnn.dnn_available():
         return dnn.dnn_conv(img=input,
                             kerns=params[0],
                             subsample=(self.row_stride, self.col_stride),
                             border_mode=self.conv_mode)
     else:
         return T.nnet.conv2d(input,
                              params[0],
                              subsample=(self.row_stride, self.col_stride),
                              border_mode=self.conv_mode)
开发者ID:agajews,项目名称:Neural-Network-Dev,代码行数:11,代码来源:Convolutional.py


示例18: get_lasagne_conv_layer

def get_lasagne_conv_layer():
    import theano.tensor.signal.conv
    from theano.sandbox.cuda import dnn
    # if no dnn support use default conv
    if not theano.config.device.startswith("gpu") or not dnn.dnn_available():  # code stolen from lasagne dnn.py
        import lasagne.layers.conv
        conv = lasagne.layers.conv.Conv2DLayer
    else:
        import lasagne.layers.dnn
        conv = lasagne.layers.dnn.Conv2DDNNLayer
    return conv
开发者ID:Islandman93,项目名称:reinforcepy,代码行数:11,代码来源:dqn_inits.py


示例19: conv2d

def conv2d(x, kernel, strides=(1, 1), border_mode='valid', dim_ordering='th'):
    '''
    Run on cuDNN if available.
    border_mode: string, "same" or "valid".
    '''
    if dim_ordering not in {'th', 'tf'}:
        raise Exception('Unknown dim_ordering ' + str(dim_ordering))

    if dim_ordering == 'tf':
        # TF uses the last dimension as channel dimension,
        # instead of the 2nd one.
        # TH input shape: (samples, input_depth, rows, cols)
        # TF input shape: (samples, rows, cols, input_depth)
        # TH kernel shape: (depth, input_depth, rows, cols)
        # TF kernel shape: (rows, cols, input_depth, depth)
        x = x.dimshuffle((0, 3, 1, 2))
        kernel = kernel.dimshuffle((3, 2, 0, 1))

    if _on_gpu() and dnn.dnn_available():
        if border_mode == 'same':
            assert(strides == (1, 1))
            np_kernel = kernel.eval()
            pad_x = (np_kernel.shape[2] - strides[0]) // 2
            pad_y = (np_kernel.shape[3] - strides[1]) // 2
            conv_out = dnn.dnn_conv(img=x,
                                    kerns=kernel,
                                    border_mode=(pad_x, pad_y))
        else:
            conv_out = dnn.dnn_conv(img=x,
                                    kerns=kernel,
                                    border_mode=border_mode,
                                    subsample=strides)
    else:
        if border_mode == 'same':
            th_border_mode = 'full'
            assert(strides == (1, 1))
        elif border_mode == 'valid':
            th_border_mode = 'valid'
        else:
            raise Exception('Border mode not supported: ' + str(border_mode))

        conv_out = T.nnet.conv.conv2d(x, kernel,
                                      border_mode=th_border_mode,
                                      subsample=strides)
        if border_mode == 'same':
            shift_x = (kernel.shape[2] - 1) // 2
            shift_y = (kernel.shape[3] - 1) // 2
            conv_out = conv_out[:, :,
                                shift_x:x.shape[2] + shift_x,
                                shift_y:x.shape[3] + shift_y]
    if dim_ordering == 'tf':
        conv_out = conv_out.dimshuffle((0, 2, 3, 1))
    return conv_out
开发者ID:sfwlily,项目名称:keras,代码行数:53,代码来源:theano_backend.py


示例20: find_patch_matches

def find_patch_matches(a, a_norm, b):
    '''For each patch in A, find the best matching patch in B'''
    convs = None
    if K.backend() == 'theano':
        # HACK: This was not being performed on the GPU for some reason.
        from theano.sandbox.cuda import dnn
        if dnn.dnn_available():
            convs = dnn.dnn_conv(
                img=a, kerns=b[:, :, ::-1, ::-1], border_mode='valid')
    if convs is None:
        convs = K.conv2d(a, b[:, :, ::-1, ::-1], border_mode='valid')
    argmax = K.argmax(convs / a_norm, axis=1)
    return argmax
开发者ID:awentzonline,项目名称:image-analogies,代码行数:13,代码来源:patches.py



注:本文中的theano.sandbox.cuda.dnn.dnn_available函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python dnn.dnn_conv函数代码示例发布时间:2022-05-27
下一篇:
Python basic_ops.host_from_gpu函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap