• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python conv.conv2d函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.nnet.conv.conv2d函数的典型用法代码示例。如果您正苦于以下问题:Python conv2d函数的具体用法?Python conv2d怎么用?Python conv2d使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了conv2d函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: model

def model(X, filter_params, bias_params, p_dropout, srng):
    inp = X
    half = int(len(filter_params)/2)
    conv_params = filter_params[:half]
    deconv_params = filter_params[half:]
    conv_biases = bias_params[:half]
    deconv_biases = bias_params[half:]
    for f, b in zip(conv_params, conv_biases):
        outa = rectify(conv2d(inp, f, border_mode='valid') +
                       b.dimshuffle('x', 0, 'x', 'x'))
        outb = dropout(outa, srng, p_dropout)
        inp = outb
    c = 0
    for f, b in zip(deconv_params, deconv_biases):
        if c == len(deconv_params):
            outa = T.nnet.sigmoid(conv2d(inp, f, border_mode='full') +
                                  b.dimshuffle('x', 0, 'x', 'x'))
        else:
            outa = rectify(conv2d(inp, f, border_mode='full') +
                           b.dimshuffle('x', 0, 'x', 'x'))
        outb = dropout(outa, srng, p_dropout)
        inp = outb
        c += 1
    output = inp
    return output
开发者ID:MaGold,项目名称:Caustics,代码行数:25,代码来源:model.py


示例2: model

	def model(self, X, w1, w2, w3, w4, wo, p_drop_conv, p_drop_hidden):
		
		# print X
		l1a = self.rectify(conv2d(X, w1, border_mode = "full"))
		l1 = max_pool_2d(l1a, (2, 2))
		l1 = self.dropout(l1, p_drop_conv)
		# print np.mean(l1)
		
		l2a = self.rectify(conv2d(l1, w2))
		l2 = max_pool_2d(l2a, (2, 2))
		l2 = self.dropout(l2, p_drop_conv)
		# print np.mean(l2)

		l3a = self.rectify(conv2d(l2, w3))
		l3b = max_pool_2d(l3a, (2, 2))
		l3 = T.flatten(l3b, outdim = 2)
		l3 = self.dropout(l3, p_drop_conv)
		# print np.mean(l3)
		
		l4 = self.rectify(T.dot(l3, w4))
		l4 = self.dropout(l4, p_drop_hidden)
		# print np.mean(l4)
		# l4 = T.dot(l4, wo)
		sig = T.dot(l4, wo)
		# pyx = self.softmax(T.dot(l4, wo))
		return l1, l2, l3, l4, sig
开发者ID:ZhecanJamesWang,项目名称:conv-net-research,代码行数:26,代码来源:convnet_James_testing_parameters.py


示例3: get_theano_function

 def get_theano_function(self, input):
     print self.name
     print 'in',self.in_size
     print 'filt',self.theano_filter_shape
     print 'bias', self.bias.shape
     conv_out = None
     if self.pad[0] != 0:
         tmp_new = T.zeros_like(self.dpad)
         input = T.set_subtensor(tmp_new[:,:,self.pad[0]:(-self.pad[0]), 
                                         self.pad[3]:(-self.pad[3])], input)
     if self.groups == 1:
         conv_out = conv.conv2d(input, self.w, 
                                filter_shape=self.theano_filter_shape,
                                subsample=self.stride, 
                                image_shape=self.theano_in_size)
     else:
         in_1 = input[:,:input.shape[1]/2,:,:]
         in_2 = input[:,input.shape[1]/2:,:,:]
         fs = self.theano_filter_shape
         fs = (fs[0]/2,fs[1],fs[2],fs[3])
         conv_1 = conv.conv2d(in_1, self.w1, filter_shape=fs,
                              subsample=self.stride, 
                              image_shape=self.theano_in_size)
         conv_2 = conv.conv2d(in_2, self.w2, filter_shape=fs,
                              subsample=self.stride, 
                              image_shape=self.theano_in_size)
         conv_out = T.concatenate([conv_1, conv_2], axis=1)
     return conv_out + self.b.dimshuffle('x',0,'x','x')
开发者ID:matteopresutto,项目名称:caffe-to-theano,代码行数:28,代码来源:layers.py


示例4: model

def model(X, w, w2, w3, w4, w_o, p_drop_conv, p_drop_hidden):

    # conv + ReLU + pool
    # border_mode = full, then zero-padding, default is valid
    l1a = rectify(conv2d(X, w, border_mode='full'))
    # pooling at 2*2 kernel and select the largest in the kernel
    l1 = max_pool_2d(l1a, (2, 2))
    l1 = dropout(l1, p_drop_conv)

    # conv + ReLU + pool
    l2a = rectify(conv2d(l1, w2))
    l2 = max_pool_2d(l2a, (2, 2))
    l2 = dropout(l2, p_drop_conv)

    # conv + ReLU + pool
    l3a = rectify(conv2d(l2, w3))
    l3b = max_pool_2d(l3a, (2, 2))
    # convert a ndim array to 2 dim. if l3b dim larger than 2 then the rest dim collapsed.
    # flatten for enter the FC layer
    l3 = T.flatten(l3b, outdim=2)
    l3 = dropout(l3, p_drop_conv)

    # FC + ReLU
    l4 = rectify(T.dot(l3, w4))
    l4 = dropout(l4, p_drop_hidden)


    # output layer + softmax
    pyx = softmax(T.dot(l4, w_o))

    return l1, l2, l3, l4, pyx
开发者ID:coroner4817,项目名称:ForAWS,代码行数:31,代码来源:5_convolutional_net.py


示例5: decode

    def decode(self, hidden):
        hidden_ = T.alloc(0.,*self.hidden_shape)
        deconv_out = T.alloc(0.,*self.output_shape)
       
        # Zero padding How can I code easily?
        hidden_ = T.set_subtensor(hidden_[:,:,:,self.filter_shape[3]-1:],hidden)

        # Calculate output
        conv_odd = conv.conv2d(
            input = hidden_,
            filters = self.W_odd,
            filter_shape = self.filter_shape,
            image_shape = self.hidden_shape,)
        conv_even = conv.conv2d(
            input = hidden_,
            filters = self.W_even,
            filter_shape = self.filter_shape,
            image_shape = self.hidden_shape,)
        
        deconv_out = T.set_subtensor(deconv_out[:,:,:,::2], conv_odd)
        deconv_out = T.set_subtensor(deconv_out[:,:,:,1::2], conv_even)

        linout = deconv_out + self.b.dimshuffle('x',0,'x','x')
        
        if self.dec_hid == 'tanh':
            convout= T.tanh(linout)
        elif self.dec_hid == 'lin':
            convout=linout
        elif self.dec_hid == 'relu':
            convout=linout * (linout > 0.) + 0. * (linout < 0.)
        else:
            raise ValueError('Invalid dec_hid')
        #### Recurrent connection####
        return convout
开发者ID:ktho22,项目名称:speech_synthesis,代码行数:34,代码来源:rtdnn_bc01.py


示例6: testmodel

def testmodel(X, w, w2, w3, w_o, p_drop_conv, p_drop_hidden):
    l1a = rectify(conv2d(X, w, border_mode='valid'))
    l1 = max_pool_2d(l1a, (2, 2))
    l1 = dropout(l1, p_drop_conv)

    l2a = rectify(conv2d(l1, w2))
    l2b = max_pool_2d(l2a, (2, 2))
    l2 = T.flatten(l2b, outdim=2)
    l2 = dropout(l2, p_drop_conv)

    l3 = rectify(T.dot(l2, w3))
    l3 = dropout(l3, p_drop_hidden)
    
    pyx = softmax(T.dot(l3, w_o))
    # l3a = rectify(conv2d(l2, w3))
    # l3b = max_pool_2d(l3a, (2, 2))
    # l3 = T.flatten(l3b, outdim=2)
    # l3 = dropout(l3, p_drop_conv)

    # problem happening here
    # l4 = rectify(T.dot(l3, w4))
    # l4 = dropout(l4, p_drop_hidden)

    # pyx = softmax(T.dot(l4, w_o))
    return l1, l2, l3, pyx   
开发者ID:youralien,项目名称:smarterboard-nn,代码行数:25,代码来源:convnet.py


示例7: convolutional

def convolutional(X, X_test, input_shape, n_filters, filter_size):
	"""
	Implementation of a convolutional layer

	Parameters
	----------
	X
	input_shape
	n_filters
	filter_size

	Note
	----
	The convolutions are implemented using border_mode=same, that is the 
	output shape is the same as the input shape for the 2 last dimensions
	"""

	filters_shape = (n_filters, input_shape[1], filter_size[0], filter_size[1])
	filters = theano.shared(
		numpy.random.uniform(low=-0.1, high=0.1, size=filters_shape).astype(numpy.float32),
		'conv_filters'
	)

	output_shape = (input_shape[0], n_filters, input_shape[2], input_shape[3])

	output = conv2d(input=X, filters=filters, filter_shape=filters_shape, image_shape=input_shape, border_mode='full')
	output_test = conv2d(input=X_test, filters=filters, filter_shape=filters_shape, image_shape=input_shape, border_mode='full')

	shift_x = (filter_size[0] - 1) // 2
	shift_y = (filter_size[1] - 1) // 2

	output = output[:,:,shift_x:input_shape[2]+shift_x,shift_y:input_shape[3]+shift_y]
	output_test = output_test[:,:,shift_x:input_shape[2]+shift_x,shift_y:input_shape[3]+shift_y]

	return output, output_test, [filters], output_shape
开发者ID:tfjgeorge,项目名称:ift6268,代码行数:35,代码来源:layers.py


示例8: LCN

def LCN(data, kernel_shape):
    
    # X = T.ftensor4()

    filter_shape = (1, 1, kernel_shape, kernel_shape)
    filters = sharedX(gaussian_filter(kernel_shape).reshape(filter_shape))
    
    convout = conv2d(data, filters=filters, border_mode='full')
    
    # For each pixel, remove mean of 9x9 neighborhood
    mid = int(np.floor(kernel_shape/ 2.))
    centered_X = data - convout[:,:,mid:-mid,mid:-mid]
    
    # Scale down norm of 9x9 patch if norm is bigger than 1
    sum_sqr_XX = conv2d(T.sqr(data), filters=filters, border_mode='full')
    
    denom = T.sqrt(sum_sqr_XX[:,:,mid:-mid,mid:-mid])
    per_img_mean = denom.mean(axis = [2,3])
    divisor = T.largest(per_img_mean.dimshuffle(0, 1, 'x', 'x'), denom)
    
    new_X = centered_X / T.maximum(1., divisor)
    # new_X = new_X[:,:,mid:-mid, mid:-mid]

    new_X = T.extra_ops.squeeze(new_X)  # remove broadcastable dimension
    new_X = new_X[:, 0, :, :]  # TODO: check whether this forced squeeze is good

    return new_X
开发者ID:fengjiran,项目名称:sparse_filtering,代码行数:27,代码来源:scaling.py


示例9: __init__

    def __init__(self, rng, input_A, input_B, filter_shape, image_shape, poolsize=(2, 2)):

        print image_shape
        print filter_shape
        assert image_shape[1] == filter_shape[1]

        #calc the W_bound and init the W
        fan_in = numpy.prod(filter_shape[1:])
        fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
                   numpy.prod(poolsize))
        W_bound = numpy.sqrt(6. / (fan_in + fan_out))
        self.W = theano.shared(numpy.asarray(
            rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
            dtype = theano.config.floatX),
                        borrow = True)

        b_value = numpy.zeros((filter_shape[0],), 
                              dtype = theano.config.floatX)
        self.b = theano.shared(value = b_value, borrow = True)


        conv_out_A = conv.conv2d(input = input_A, filters = self.W, 
                filter_shape = filter_shape, image_shape = image_shape)
        conv_out_B = conv.conv2d(input = input_B, filters = self.W, 
                filter_shape = filter_shape, image_shape = image_shape)
        pooled_out_A = downsample.max_pool_2d(input = conv_out_A,
                                ds = poolsize, ignore_border = True)
        pooled_out_B = downsample.max_pool_2d(input = conv_out_B,
                                ds = poolsize, ignore_border = True)


        self.output_A = T.tanh(pooled_out_A + self.b.dimshuffle('x',0,'x','x'))
        self.output_B = T.tanh(pooled_out_B + self.b.dimshuffle('x',0,'x','x'))

        self.params = [self.W, self.b]
开发者ID:PiscesDream,项目名称:Lab_Models,代码行数:35,代码来源:lfw_cnn.py


示例10: LCNinput

def LCNinput(data, kernel_shape):
    
    X = T.ftensor4()
    filter_shape = (1, 1, kernel_shape, kernel_shape)
    filters = sharedX(gaussian_filter(kernel_shape).reshape(filter_shape))
    
    convout = conv2d(X, filters=filters, border_mode='full')
    
    # For each pixel, remove mean of 9x9 neighborhood
    mid = int(np.floor(kernel_shape/ 2.))
    centered_X = X - convout[:,:,mid:-mid,mid:-mid]
    
    # Scale down norm of 9x9 patch if norm is bigger than 1
    sum_sqr_XX = conv2d(T.sqr(X), filters=filters, border_mode='full')
    
    denom = T.sqrt(sum_sqr_XX[:,:,mid:-mid,mid:-mid])
    per_img_mean = denom.mean(axis = [2,3])
    divisor = T.largest(per_img_mean.dimshuffle(0,1, 'x', 'x'), denom)
    
    new_X = centered_X / T.maximum(1., divisor)
    # new_X = new_X[:,:,mid:-mid, mid:-mid]
    
    f = theano.function([X], new_X)
    
    return f(data)
开发者ID:fengjiran,项目名称:sparse_filtering,代码行数:25,代码来源:scaling.py


示例11: get_fprop_fn

def get_fprop_fn(variable_shape=False, include_pool=True):
    """
    build a theano function that use SAE weights to get convolved(or pooled if
    include_pool is True) features from a given input
    """
    conf = utils.get_config()
    paths = utils.get_paths()
    ae = serial.load(paths['sae']['model'])
    cnn_layer = 'cnn_layer_%i' % (conf['cnn_layers'])
    batch_size = conf[cnn_layer]['batch_size']
    nhid = conf['sae']['nhid']
    patch_size = conf['patch_size']
    region_size = conf['region_size']

    input = T.tensor4('input')
    filter_shape = (nhid, 1, patch_size, patch_size)
    filters = theano.shared(ae.get_weights().T.reshape(filter_shape))

    if variable_shape:
        out = conv.conv2d(input, filters)
    else:
        image_shape = [batch_size, 1, region_size, region_size]
        out = conv.conv2d(input, filters, filter_shape=filter_shape,
                          image_shape=image_shape)

    if include_pool:
        pool_fn = getattr(out, conf['pool_fn'])
        out = pool_fn(axis=(2, 3))
    return theano.function([input], out)
开发者ID:johnarevalo,项目名称:cnn-bcdr,代码行数:29,代码来源:fe_extraction.py


示例12: apply

    def apply(self, dataset, can_fit=True):
        x = dataset.get_design_matrix()

        denseX = T.matrix(dtype=x.dtype)

        image_shape = (len(x),) + self.img_shape
        X = denseX.reshape(image_shape)
        filters = gaussian_filter_9x9().reshape((1,1,9,9))

        convout = conv.conv2d(input = X,
                             filters = filters,
                             image_shape = image_shape,
                             filter_shape = (1, 1, 9, 9),
                             border_mode='full')

        # For each pixel, remove mean of 9x9 neighborhood
        centered_X = X - convout[:,:,4:-4,4:-4]
        
        # Scale down norm of 9x9 patch if norm is bigger than 1
        sum_sqr_XX = conv.conv2d(input = centered_X**2,
                             filters = filters,
                             image_shape = image_shape,
                             filter_shape = (1, 1, 9, 9),
                             border_mode='full')
        denom = T.sqrt(sum_sqr_XX[:,:,4:-4,4:-4])
        per_img_mean = T.mean(T.flatten(denom, outdim=3), axis=2)
        divisor = T.largest(per_img_mean.dimshuffle((0,1,'x','x')), denom)

        new_X = centered_X / divisor
        new_X = T.flatten(new_X, outdim=2)

        f = theano.function([denseX], new_X)
        dataset.set_design_matrix(f(x))
开发者ID:HoldenCaulfieldRye,项目名称:caffe-1,代码行数:33,代码来源:preproc.py


示例13: initialise

	def initialise(self):
		activation = self.activation
		rng = np.random.RandomState(235)
		inpt = self.inpt
		# initialise layer 1 weight vector. 
		#w_shp = (self.no_of_filters, 1.,self.in_channels, self.filter_length)
		w_shp = (self.no_of_filters, self.in_channels, self.filter_length,1.)
		w_bound = np.sqrt(self.in_channels* self.filter_length)
		W = theano.shared(value = np.asarray(
        rng.normal(0.,0.001,size=w_shp),
            dtype=inpt.dtype), name =self.param_names[0],borrow = True)
		b_shp = (self.no_of_filters,)
		b = theano.shared(value = np.asarray(
            rng.uniform(low=-.0, high=.0, size=b_shp),
            dtype=inpt.dtype), name =self.param_names[1],borrow = True)
		upsampled = self.inpt.repeat(int(self.pool),axis = 2)
		conv_out = conv.conv2d(upsampled, W.dimshuffle(0,3,2,1),subsample=(1,1),border_mode = "full")
		conv_out = conv_out[:,:,:,int(self.in_channels-1):-int(self.in_channels-1)]
		self.params = [W,b]
		if self.distribution==True:
			W_sigma = theano.shared(value = np.asarray(
	        rng.normal(0.,0.001,size=w_shp),
	            dtype=inpt.dtype), name ='lik_sigma',borrow = True)
			b_sigma = theano.shared(value = np.asarray(
	            rng.uniform(low=-.0, high=.0, size=b_shp),
	            dtype=inpt.dtype), name ='b_sigm',borrow = True)
			#self.output =conv_out + b.dimshuffle('x', 0, 'x', 'x')
			conv_out_sigma = conv.conv2d(upsampled, W_sigma.dimshuffle(0,3,2,1),subsample=(1,1),border_mode = "full",)
			conv_out_sigma = conv_out_sigma[:,:,:,int(self.in_channels-1):-int(self.in_channels-1)]
			self.log_sigma = conv_out_sigma + b_sigma.dimshuffle('x', 0, 'x', 'x')
			self.params +=[W_sigma,b_sigma]
		if activation!=None:
			self.output = self.activation(conv_out + b.dimshuffle('x', 0, 'x', 'x')).astype(theano.config.floatX)
		else:
			self.output = conv_out + b.dimshuffle('x', 0, 'x', 'x').astype(theano.config.floatX)
开发者ID:KyriacosShiarli,项目名称:SingNet,代码行数:35,代码来源:layers.py


示例14: model

def model(X, w, w2, w3, w4, w5, w_o, b_h1, b_h2, b_o, p_drop_conv, p_drop_hidden):
  
    l1_lin  = conv2d(X, w, border_mode='full')+b_c1.dimshuffle('x', 0, 'x', 'x')
    l1a     = alpha_c1 * rectify(l1_lin) + (1.- alpha_c1) * T.tanh(l1_lin)
    l1      = max_pool_2d(l1a, (2, 2))
    l1      = dropout(l1, p_drop_conv)

    l2_lin = conv2d(l1, w2) + b_c2.dimshuffle('x', 0, 'x', 'x')
    l2a    = alpha_c2 * rectify(l2_lin) + (1. - alpha_c2) * T.tanh(l2_lin)
    l2     = max_pool_2d(l2a, (2, 2))
    l2     = dropout(l2, p_drop_conv)

    l3_lin = conv2d(l2, w3) + b_c3.dimshuffle('x', 0, 'x', 'x')
    l3a    = alpha_c3 * rectify(l3_lin) + ( 1 - alpha_c3) * T.tanh(l3_lin)
    l3b    = max_pool_2d(l3a, (2, 2))
    l3     = T.flatten(l3b, outdim=2)
    l3     = dropout(l3, p_drop_conv)

    l4_lin = T.dot(l3, w4) + b_h1 
    l4 = alpha_h1 * rectify(l4_lin) + (1.-alpha_h1) * T.tanh(l4_lin)
    l4 = dropout(l4, p_drop_hidden)

    l5_lin = T.dot(l4, w5) + b_h2
    l5 = alpha_h1 * rectify(l5_lin) + (1.-alpha_h2) * T.tanh(l5_lin)
    l5 = dropout(l5, p_drop_hidden)

    pyx = softmax(T.dot(l5, w_o) + b_o )
    return l1, l2, l3, l4, l5, pyx
开发者ID:rjbashar,项目名称:learningActivations,代码行数:28,代码来源:z_alpha_node_3c_2h.py


示例15: model3

def model3(X, w, w2, w22, w222, w3, w4, p_drop_conv, p_drop_hidden):
  l1a = rectify(conv2d(X, w, border_mode='full'))
  l1 = max_pool_2d(l1a, (2, 2))
  l1 = dropout(l1, p_drop_conv)

  l2a = rectify(conv2d(l1, w2))
  l2 = max_pool_2d(l2a, (2, 2))
  l2 = dropout(l2, p_drop_conv)

  l22a = rectify(conv2d(l2, w22))
  l22 = max_pool_2d(l22a, (2, 2))
  l22 = dropout(l22, p_drop_conv)

  l222a = rectify(conv2d(l22, w222))
  l222 = max_pool_2d(l222a, (2, 2))
  l222 = dropout(l222, p_drop_conv)

  l3a = rectify(conv2d(l222, w3))
  l3b = max_pool_2d(l3a, (2, 2))
  l3 = T.flatten(l3b, outdim=2)
  l3 = dropout(l3, p_drop_conv)

  l4 = rectify(T.dot(l3, w4))
  l4 = dropout(l4, p_drop_hidden)

  pyx = softmax(T.dot(l4, w_o))
  return l1, l2, l22, l222, l3, l4, pyx
开发者ID:deccs,项目名称:ndsb_theano,代码行数:27,代码来源:conv_net.py


示例16: bench_ConvMed

def bench_ConvMed(batchsize):
    data_x.value = randn(n_examples, 1, 96, 96)
    w0 = shared(rand(6, 1, 7, 7) * numpy.sqrt(6 / (25.)))
    b0 = shared(zeros(6))
    w1 = shared(rand(16, 6, 7, 7) * numpy.sqrt(6 / (25.)))
    b1 = shared(zeros(16))
    vv = shared(rand(16*8*8, 120) * numpy.sqrt(6.0/16./25))
    cc = shared(zeros(120))
    v = shared(zeros(120, outputs))
    c = shared(zeros(outputs))
    params = [w0, b0, w1, b1, v, c, vv, cc]

    c0 = tanh(conv2d(sx, w0, image_shape=(batchsize, 1, 96, 96), filter_shape=(6,1,7,7)) + b0.dimshuffle(0, 'x', 'x'))
    s0 = tanh(max_pool_2d(c0, (3,3))) # this is not the correct leNet5 model, but it's closer to

    c1 = tanh(conv2d(s0, w1, image_shape=(batchsize, 6, 30, 30), filter_shape=(16,6,7,7)) + b1.dimshuffle(0, 'x', 'x'))
    s1 = tanh(max_pool_2d(c1, (3,3)))

    p_y_given_x = softmax(dot(tanh(dot(s1.flatten(2), vv)+cc), v)+c)
    nll = -log(p_y_given_x)[arange(sy.shape[0]), sy]
    cost = nll.mean()

    gparams = grad(cost, params)

    train = function([si, nsi], cost,
            updates=[(p,p-lr*gp) for p,gp in zip(params, gparams)])
    eval_and_report(train, "ConvMed", [batchsize], N=120)
开发者ID:HadoopIt,项目名称:DeepLearningBenchmarks,代码行数:27,代码来源:convnet.py


示例17: _lcn

def _lcn(image, im_shape, fmaps, pool_depth, width, sigma):
    """
    """
    import theano
    import theano.tensor as T
    from theano.tensor.nnet import conv

    border = width//2
    filters = _lcn_filters(fmaps, pool_depth, width, sigma) 
    filter_shape = filters.shape
    blurred_mean = conv.conv2d(input=image, filters=filters, 
            image_shape=im_shape, filter_shape=filter_shape,
            border_mode='full')
    image -= blurred_mean[:, :, border:-border, border:-border]
    
    image_sqr = T.sqr(image)
    blurred_sqr = conv.conv2d(input=image_sqr, filters=filters, 
            image_shape=im_shape, filter_shape=filter_shape,
            border_mode='full')

    div = T.sqrt(blurred_sqr[:, :, border:-border, border:-border])
    fm_mean = div.mean(axis=[2, 3])
    div = T.largest(fm_mean.dimshuffle(0, 1, 'x', 'x'), div) + 1e-6
    image = image/div
    return T.cast(image, theano.config.floatX)
开发者ID:tjsongzw,项目名称:datasets,代码行数:25,代码来源:helpers.py


示例18: output

    def output(self, input, mask=None):
        if mask is None:
            drop_in = input * self.drop
        else:
            drop_in = input * mask

        conv_out1 = conv.conv2d(input=drop_in, filters=self.W1, filter_shape=self.filter_shape1,
                               image_shape=self.shape_in)
        linout1 = T.nnet.relu(conv_out1 + self.b1.dimshuffle('x', 0, 'x', 'x'))
        output1 = (
            linout1 if self.activation is None
            else self.activation(linout1)
        )
        pooled_out1 = downsample.max_pool_2d(input=output1, ds=self.poolsize1, ignore_border=True)

        conv_out2 = conv.conv2d(input=drop_in, filters=self.W2, filter_shape=self.filter_shape2,
                                image_shape=self.shape_in)
        linout2 = T.nnet.relu(conv_out2 + self.b2.dimshuffle('x', 0, 'x', 'x'))
        output2 = (
            linout2 if self.activation is None
            else self.activation(linout2)
        )
        pooled_out2 = downsample.max_pool_2d(input=output2, ds=self.poolsize2, ignore_border=True)

        conv_out3 = conv.conv2d(input=drop_in, filters=self.W3, filter_shape=self.filter_shape3,
                                image_shape=self.shape_in)
        linout3 = T.nnet.relu(conv_out3 + self.b3.dimshuffle('x', 0, 'x', 'x'))
        output3 = (
            linout3 if self.activation is None
            else self.activation(linout3)
        )
        pooled_out3 = downsample.max_pool_2d(input=output3, ds=self.poolsize3, ignore_border=True)

        output = T.concatenate([pooled_out1, pooled_out2, pooled_out3], axis=1)
        return output
开发者ID:lrlAaronLee,项目名称:Sentiment-Analysis,代码行数:35,代码来源:models.py


示例19: __init__

    def __init__(self,name,rng,inputs, hiden_size,input_size,window_size,feature_num,init_W=None,init_b=None):
        '''
        inputs shape (batch size,input_size,1,max_sentence_length * feature_num)
        '''
        self.hiden_size = hiden_size
        self.window_size = window_size
        self.feature_num = feature_num
        self.conv_window  = self.window_size * self.feature_num
        if init_W == None:
            self.W = theano.shared(np.asarray(rng.uniform(low=-2.0, high=2.0, size=(hiden_size,1,1,self.conv_window)), dtype=theano.config.floatX)
                ,name='cov_1d_layer_W_%s' %(name))
        else:
            self.W = theano.shared(init_W,name='cov_1d_layer_W_%s' %(name))

        if init_b == None:
            self.b = theano.shared(np.asarray(rng.uniform(low=-2.0, high=2.0, size=(hiden_size)), dtype=theano.config.floatX)
                ,name='cov_1d_layer_b_%s' % (name))
        else:
            self.b = theano.shared(init_b,name='cov_1d_layer_b_%s' % (name))


        if input_size == 1:
            self.linear = conv.conv2d(inputs,self.W,subsample=(1,self.feature_num)) + self.b.dimshuffle('x', 0, 'x', 'x')
            #self.out = self.linear.dimshuffle(0,2,1,3)
            self.output = self.linear.dimshuffle('x',0,1,2,3)
        else:
            self.linear,_updates = theano.scan(lambda x_i: conv.conv2d(inputs[:,x_i:x_i+1,:,:],self.W,\
                    subsample=(1,self.feature_num))
                    + self.b.dimshuffle('x', 0, 'x', 'x'), sequences=[T.arange(input_size)])
            #self.output = self.linear.dimshuffle(1,0,2,3,4).reshape((inputs.shape[0],inputs.shape[1],hiden_size,-1))
            self.output = self.linear
开发者ID:rudaoshi,项目名称:knowledge.py,代码行数:31,代码来源:conv_layer.py


示例20: __init__

	def __init__(self, input, input_shape, filter_shape, border_mode="valid") :
		# input : theano symbolic variable of input, 4D tensor
		# input_shape : shape of input / (minibatch size, input channel num, image height, image width)
		# filter_shape : shape of filter / (# of new channels to make, input channel num, filter height, filter width)

		# initialize W (weight) randomly
		rng = np.random.RandomState(int(time.time()))
		w_bound = math.sqrt(filter_shape[1] * filter_shape[2] * filter_shape[3])
		self.W1 = theano.shared(np.asarray(rng.uniform(low=-1.0/w_bound, high=1.0/w_bound, size=filter_shape), dtype=theano.config.floatX), name='W', borrow=True)
		self.W2 = theano.shared(np.asarray(rng.uniform(low=-1.0/w_bound, high=1.0/w_bound, size=filter_shape), dtype=theano.config.floatX), name='W', borrow=True)
		self.W3 = theano.shared(np.asarray(rng.uniform(low=-1.0/w_bound, high=1.0/w_bound, size=filter_shape), dtype=theano.config.floatX), name='W', borrow=True)
		
		# initialize b (bias) with zeros
		self.b1 = theano.shared(np.asarray(np.zeros(filter_shape[0],), dtype=theano.config.floatX), name='b', borrow=True)
		self.b2 = theano.shared(np.asarray(np.zeros(filter_shape[0],), dtype=theano.config.floatX), name='b', borrow=True)
		self.b3 = theano.shared(np.asarray(np.zeros(filter_shape[0],), dtype=theano.config.floatX), name='b', borrow=True)

		# convolution & sigmoid calculation
		#self.conv_out = conv.conv2d(input, self.W, image_shape=input_shape, filter_shape=filter_shape)
		#self.output = 1.7159*T.tanh((self.conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))*(2.0/3.0))

		# maxout : 3
		out1 = conv.conv2d(input, self.W1, image_shape=input_shape, filter_shape=filter_shape, border_mode=border_mode) + self.b1.dimshuffle('x', 0, 'x', 'x')
		out2 = conv.conv2d(input, self.W2, image_shape=input_shape, filter_shape=filter_shape, border_mode=border_mode) + self.b2.dimshuffle('x', 0, 'x', 'x')
		out3 = conv.conv2d(input, self.W3, image_shape=input_shape, filter_shape=filter_shape, border_mode=border_mode) + self.b3.dimshuffle('x', 0, 'x', 'x')

		self.output = T.maximum(out1, T.maximum(out2, out3))

		# save parameter of this layer for back-prop convinience
		self.params = [self.W1, self.W2, self.W3, self.b1, self.b2, self.b3]
		insize = input_shape[1] * input_shape[2] * input_shape[3]
		self.paramins = [insize, insize, insize, insize, insize, insize]
开发者ID:shuuki4,项目名称:2015-2-ML,代码行数:32,代码来源:ConvLayer.py



注:本文中的theano.tensor.nnet.conv.conv2d函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python conv3d2d.conv3d函数代码示例发布时间:2022-05-27
下一篇:
Python bn.batch_normalization函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap