本文整理汇总了Python中theano.tensor.signal.pool.pool_2d函数的典型用法代码示例。如果您正苦于以下问题:Python pool_2d函数的具体用法?Python pool_2d怎么用?Python pool_2d使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pool_2d函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_pooling_with_tensor_vars
def test_pooling_with_tensor_vars(self):
x = tensor.ftensor4()
window_size = tensor.ivector()
stride = tensor.ivector()
padding = tensor.ivector()
data = numpy.random.normal(0, 1, (1, 1, 5, 5)).astype('float32')
# checking variable params vs fixed params
for ignore_border in [True, False]:
for mode in ['max', 'sum', 'average_inc_pad', 'average_exc_pad']:
y = pool_2d(x, window_size, ignore_border, stride, padding,
mode)
dx = theano.gradient.grad(y.sum(), x)
var_fct = theano.function([x, window_size, stride, padding],
[y, dx])
for ws in (4, 2, 5):
for st in (2, 3):
for pad in (0, 1):
if (pad > st or st > ws or
(pad != 0 and not ignore_border) or
(mode == 'average_exc_pad' and pad != 0)):
continue
y = pool_2d(x, (ws, ws), ignore_border, (st, st),
(pad, pad), mode)
dx = theano.gradient.grad(y.sum(), x)
fix_fct = theano.function([x], [y, dx])
var_y, var_dx = var_fct(data, (ws, ws), (st, st),
(pad, pad))
fix_y, fix_dx = fix_fct(data)
utt.assert_allclose(var_y, fix_y)
utt.assert_allclose(var_dx, fix_dx)
开发者ID:maniacs-ops,项目名称:Theano,代码行数:31,代码来源:test_pool.py
示例2: test_pooling_opt
def test_pooling_opt():
if not dnn.dnn_available(test_ctx_name):
raise SkipTest(dnn.dnn_available.msg)
x = T.fmatrix()
f = theano.function(
[x],
pool_2d(x, ds=(2, 2), mode='average_inc_pad',
ignore_border=True),
mode=mode_with_gpu)
assert any([isinstance(n.op, dnn.GpuDnnPool)
for n in f.maker.fgraph.toposort()])
f(numpy.zeros((10, 10), dtype='float32'))
f = theano.function(
[x],
T.grad(pool_2d(x, ds=(2, 2), mode='average_inc_pad',
ignore_border=True).sum(),
x),
mode=mode_with_gpu.including("cudnn"))
assert any([isinstance(n.op, dnn.GpuDnnPoolGrad)
for n in f.maker.fgraph.toposort()])
f(numpy.zeros((10, 10), dtype='float32'))
开发者ID:nke001,项目名称:Theano,代码行数:28,代码来源:test_dnn.py
示例3: pool2d
def pool2d(x, pool_size, strides=(1, 1), border_mode="valid", dim_ordering=_IMAGE_DIM_ORDERING, pool_mode="max"):
if border_mode == "same":
w_pad = pool_size[0] - 2 if pool_size[0] % 2 == 1 else pool_size[0] - 1
h_pad = pool_size[1] - 2 if pool_size[1] % 2 == 1 else pool_size[1] - 1
padding = (w_pad, h_pad)
elif border_mode == "valid":
padding = (0, 0)
else:
raise Exception("Invalid border mode: " + str(border_mode))
if dim_ordering not in {"th", "tf"}:
raise Exception("Unknown dim_ordering " + str(dim_ordering))
if dim_ordering == "tf":
x = x.dimshuffle((0, 3, 1, 2))
if pool_mode == "max":
pool_out = pool.pool_2d(x, ds=pool_size, st=strides, ignore_border=True, padding=padding, mode="max")
elif pool_mode == "avg":
pool_out = pool.pool_2d(
x, ds=pool_size, st=strides, ignore_border=True, padding=padding, mode="average_exc_pad"
)
else:
raise Exception("Invalid pooling mode: " + str(pool_mode))
if border_mode == "same":
expected_width = (x.shape[2] + strides[0] - 1) // strides[0]
expected_height = (x.shape[3] + strides[1] - 1) // strides[1]
pool_out = pool_out[:, :, :expected_width, :expected_height]
if dim_ordering == "tf":
pool_out = pool_out.dimshuffle((0, 2, 3, 1))
return pool_out
开发者ID:leocnj,项目名称:keras,代码行数:34,代码来源:theano_backend.py
示例4: __init__
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2), non_linear="tanh"):
"""
Allocate a LeNetConvPoolLayer with shared variable internal parameters.
:type rng: np.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dtensor4
:param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps, filter height, filter width)
:type image_shape: tuple or list of length 4
:param image_shape: (batch size, num input feature maps, image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows,#cols)
"""
assert image_shape[1] == filter_shape[1]
self.input = input
self.filter_shape = filter_shape
self.image_shape = image_shape
self.poolsize = poolsize
self.non_linear = non_linear
self.output_shape = (image_shape[0],filter_shape[0],int((image_shape[2]-filter_shape[2]+1)/poolsize[0]),int(image_shape[3]-filter_shape[3]+1)/poolsize[1])
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = np.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) /np.prod(poolsize))
# initialize weights with random weights
if self.non_linear=="none" or self.non_linear=="relu":
self.W = theano.shared(np.asarray(rng.uniform(low=-0.01,high=0.01,size=filter_shape),
dtype=theano.config.floatX),borrow=True,name="W_conv")
else:
W_bound = np.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(np.asarray(rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX),borrow=True,name="W_conv")
b_values = np.zeros((self.output_shape[1],image_shape[2]-filter_shape[2]+1,image_shape[3]-filter_shape[3]+1), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True, name="b_conv")
# convolve input feature maps with filters
self.conv_out = conv.conv2d(input=input, filters=self.W,filter_shape=self.filter_shape, image_shape=self.image_shape)
if self.non_linear=="tanh":
self.conv_out_tanh = T.tanh(self.conv_out + self.b)
self.output = pool.pool_2d(input=self.conv_out_tanh, ds=self.poolsize, ignore_border=True)
elif self.non_linear=="relu":
self.conv_out_tanh = ReLU(self.conv_out + self.b)
self.output = pool.pool_2d(input=self.conv_out_tanh, ds=self.poolsize, ignore_border=True)
else:
pooled_out = pool.pool_2d(input=self.conv_out, ds=self.poolsize, ignore_border=True)
self.output = pooled_out + self.b
self.params = [self.W, self.b]
self.L2 = (self.W**2).sum()
开发者ID:giahy2507,项目名称:summarynew,代码行数:60,代码来源:nnlayers.py
示例5: model
def model(X, params, pDropConv, pDropHidden):
lnum = 0 # conv: (32, 32) pool: (16, 16)
layer = nin(X, params[lnum])
layer = pool_2d(layer, (2, 2), st=(2, 2), ignore_border=False, mode='max')
layer = basicUtils.dropout(layer, pDropConv)
lnum += 1 # conv: (16, 16) pool: (8, 8)
layer = nin(layer, params[lnum])
layer = pool_2d(layer, (2, 2), st=(2, 2), ignore_border=False, mode='max')
layer = basicUtils.dropout(layer, pDropConv)
lnum += 1 # conv: (8, 8) pool: (4, 4)
layer = nin(layer, params[lnum])
layer = pool_2d(layer, (2, 2), st=(2, 2), ignore_border=False, mode='max')
layer = basicUtils.dropout(layer, pDropConv)
# 全连接层
# layer = T.flatten(layer, outdim=2)
# lnum += 1
# layer = fc(layer, params[lnum])
# layer = utils.dropout(layer, pDropHidden)
# lnum += 1
# layer = fc(layer, params[lnum])
# 全局平均池化
lnum += 1
layer = conv1t1(layer, params[lnum])
layer = basicUtils.dropout(layer, pDropHidden)
lnum += 1
layer = conv1t1(layer, params[lnum])
layer = gap(layer)
return softmax(layer) # 如果使用nnet中的softmax训练产生NAN
开发者ID:ifenghao,项目名称:myDeepLearning,代码行数:28,代码来源:NINv1.py
示例6: model
def model(X, params, featMaps, pieces, pDropConv, pDropHidden):
lnum = 0 # conv: (32, 32) pool: (16, 16)
layer = conv2d(X, params[lnum][0], border_mode='half') + \
params[lnum][1].dimshuffle('x', 0, 'x', 'x')
layer = maxout(layer, featMaps[lnum], pieces[lnum])
layer = pool_2d(layer, (2, 2), st=(2, 2), ignore_border=False, mode='max')
layer = basicUtils.dropout(layer, pDropConv)
lnum += 1 # conv: (16, 16) pool: (8, 8)
layer = conv2d(layer, params[lnum][0], border_mode='half') + \
params[lnum][1].dimshuffle('x', 0, 'x', 'x')
layer = maxout(layer, featMaps[lnum], pieces[lnum])
layer = pool_2d(layer, (2, 2), st=(2, 2), ignore_border=False, mode='max')
layer = basicUtils.dropout(layer, pDropConv)
lnum += 1 # conv: (8, 8) pool: (4, 4)
layer = conv2d(layer, params[lnum][0], border_mode='half') + \
params[lnum][1].dimshuffle('x', 0, 'x', 'x')
layer = maxout(layer, featMaps[lnum], pieces[lnum])
layer = pool_2d(layer, (2, 2), st=(2, 2), ignore_border=False, mode='max')
layer = basicUtils.dropout(layer, pDropConv)
lnum += 1
layer = T.flatten(layer, outdim=2)
layer = T.dot(layer, params[lnum][0]) + params[lnum][1].dimshuffle('x', 0)
layer = relu(layer, alpha=0)
layer = basicUtils.dropout(layer, pDropHidden)
lnum += 1
layer = T.dot(layer, params[lnum][0]) + params[lnum][1].dimshuffle('x', 0)
layer = relu(layer, alpha=0)
layer = basicUtils.dropout(layer, pDropHidden)
lnum += 1
return softmax(T.dot(layer, params[lnum][0]) + params[lnum][1].dimshuffle('x', 0)) # 如果使用nnet中的softmax训练产生NAN
开发者ID:ifenghao,项目名称:myDeepLearning,代码行数:30,代码来源:Maxoutconv1.py
示例7: test_old_pool_interface
def test_old_pool_interface(self):
if sys.version_info[0] != 3:
# Only tested with python 3 because of pickling issues.
raise SkipTest('Skip old pool interface with python 2.x')
# 1. Load the old version
testfile_dir = os.path.dirname(os.path.realpath(__file__))
fname = 'old_pool_interface.pkl'
with open(os.path.join(testfile_dir, fname), 'rb') as fp:
try:
old_fct = cPickle.load(fp, encoding='latin1')
except ImportError:
# Windows sometimes fail with nonsensical errors like:
# ImportError: No module named type
# ImportError: No module named copy_reg
# when "type" and "copy_reg" are builtin modules.
if sys.platform == 'win32':
exc_type, exc_value, exc_trace = sys.exc_info()
reraise(SkipTest, exc_value, exc_trace)
raise
# 2. Create the new version
x = theano.tensor.ftensor4()
y = pool_2d(x, (2, 2), mode='max', ignore_border=True)
z = pool_2d(x, (2, 2), mode='average_exc_pad', ignore_border=True)
dy_dx = theano.gradient.grad(y.sum(), x)
dz_dx = theano.gradient.grad(z.sum(), x)
new_fct = theano.function([x], [y, z, dy_dx, dz_dx])
# 3. Assert that the answer is the same
rng = numpy.random.RandomState(utt.fetch_seed())
image_val = rng.rand(4, 6, 7, 9).astype(numpy.float32)
old_out = old_fct(image_val)
new_out = new_fct(image_val)
for o, n in zip(old_out, new_out):
utt.assert_allclose(o, n)
开发者ID:maniacs-ops,项目名称:Theano,代码行数:33,代码来源:test_pool.py
示例8: CNN
def CNN(x,c_l1,c_l2,f_l1,f_l2,insize):
print "in size ", insize
conv1=tensor.nnet.relu(conv2d(x,c_l1)) #default stride=1 --subsample=(1,1)
conv1_shp=get_conv_output_shape(insize,c_l1.get_value().shape,border_mode='valid',subsample=(1,1))
print "conv1 size ", conv1_shp
pool1=pool_2d(conv1,(3,3),st=(3,3),ignore_border=True) #default maxpool
pool1_shp=get_pool_output_shape(conv1_shp,pool_size=(3,3),st=(3,3),ignore_border=True)
print "pool1 size ", pool1_shp
lrn1=LRN(pool1,pool1_shp)
lrn1_shp=tuple(pool1_shp)
print "cross map norm1 size ", lrn1_shp
conv2=tensor.nnet.relu(conv2d(lrn1,c_l2))
conv2_shp=get_conv_output_shape(lrn1_shp,c_l2.get_value().shape,border_mode='valid',subsample=(1,1))
print "conv2 size ", conv2_shp
pool2=pool_2d(conv2,(2,2),st=(2,2),ignore_border=True)
pool2_shp=get_pool_output_shape(conv2_shp,pool_size=(2,2),st=(2,2),ignore_border=True)
print "pool2 size ", pool2_shp
lrn2=LRN(pool2,pool2_shp)
lrn2_shp=tuple(pool2_shp)
print "cross map norm2 size " , lrn2_shp
fpool2=tensor.flatten(lrn2,outdim=2)
full1=tensor.nnet.relu(tensor.dot(fpool2,f_l1))
pyx=tensor.nnet.sigmoid(tensor.dot(full1,f_l2))
return c_l1, c_l2, f_l1, f_l2, pyx
开发者ID:yunjieliu,项目名称:Machine-Learning,代码行数:26,代码来源:AR_CNN.py
示例9: pool3d
def pool3d(x, pool_size, strides=(1, 1, 1), border_mode='valid',
dim_ordering='th', pool_mode='max'):
if border_mode == 'same':
# TODO: add implementation for border_mode="same"
raise Exception('border_mode="same" not supported with Theano.')
elif border_mode == 'valid':
ignore_border = True
padding = (0, 0)
else:
raise Exception('Invalid border mode: ' + str(border_mode))
if dim_ordering not in {'th', 'tf'}:
raise Exception('Unknown dim_ordering ' + str(dim_ordering))
if dim_ordering == 'tf':
x = x.dimshuffle((0, 4, 1, 2, 3))
if pool_mode == 'max':
# pooling over conv_dim2, conv_dim1 (last two channels)
output = pool.pool_2d(input=x.dimshuffle(0, 1, 4, 3, 2),
ds=(pool_size[1], pool_size[0]),
st=(strides[1], strides[0]),
ignore_border=ignore_border,
padding=padding,
mode='max')
# pooling over conv_dim3
pool_out = pool.pool_2d(input=output.dimshuffle(0, 1, 4, 3, 2),
ds=(1, pool_size[2]),
st=(1, strides[2]),
ignore_border=ignore_border,
padding=padding,
mode='max')
elif pool_mode == 'avg':
# pooling over conv_dim2, conv_dim1 (last two channels)
output = pool.pool_2d(input=x.dimshuffle(0, 1, 4, 3, 2),
ds=(pool_size[1], pool_size[0]),
st=(strides[1], strides[0]),
ignore_border=ignore_border,
padding=padding,
mode='average_exc_pad')
# pooling over conv_dim3
pool_out = pool.pool_2d(input=output.dimshuffle(0, 1, 4, 3, 2),
ds=(1, pool_size[2]),
st=(1, strides[2]),
ignore_border=ignore_border,
padding=padding,
mode='average_exc_pad')
else:
raise Exception('Invalid pooling mode: ' + str(pool_mode))
if dim_ordering == 'tf':
pool_out = pool_out.dimshuffle((0, 2, 3, 4, 1))
return pool_out
开发者ID:fvisin,项目名称:keras,代码行数:56,代码来源:theano_backend.py
示例10: CNN
def CNN(x,c_l1,c_l2,f_l1,f_l2):
conv1=tensor.nnet.relu(conv2d(x,c_l1)) #default stride=1 --subsample=(1,1)
pool1=pool_2d(conv1,(2,2),st=(2,2),ignore_border=True) #default maxpool
conv2=tensor.nnet.relu(conv2d(pool1,c_l2))
pool2=pool_2d(conv2,(2,2),st=(2,2),ignore_border=True)
fpool2=tensor.flatten(pool2,outdim=2)
full1=tensor.nnet.relu(tensor.dot(fpool2,f_l1))
pyx=tensor.nnet.sigmoid(tensor.dot(full1,f_l2))
return c_l1, c_l2, f_l1, f_l2, pyx
开发者ID:yunjieliu,项目名称:Machine-Learning,代码行数:10,代码来源:FR_CNN.py
示例11: modelFlow
def modelFlow(X, params):
lconv1 = relu(conv2d(X, params[0][0], border_mode='full') +
params[0][1].dimshuffle('x', 0, 'x', 'x'))
lds1 = pool_2d(lconv1, (2, 2))
lconv2 = relu(conv2d(lds1, params[1][0]) +
params[1][1].dimshuffle('x', 0, 'x', 'x'))
lds2 = pool_2d(lconv2, (2, 2))
lconv3 = relu(conv2d(lds2, params[2][0]) +
params[2][1].dimshuffle('x', 0, 'x', 'x'))
lds3 = pool_2d(lconv3, (2, 2))
return X, lconv1, lds1, lconv2, lds2, lconv3, lds3
开发者ID:ifenghao,项目名称:myDeepLearning,代码行数:13,代码来源:visualize.py
示例12: run_test
def run_test(direction='forward'):
print ('=' * 60)
print ('generate relu_pool graph before and after opt for %s pass' % direction)
x = T.ftensor4('x')
maxpoolshp = (2, 2)
ignore_border = False
mode = 'max'
imval = np.random.rand(4, 2, 16, 16).astype(np.float32)
reluOut = T.nnet.relu(x)
poolOut = pool.pool_2d(reluOut, maxpoolshp, ignore_border, mode=mode)
if direction == 'forward':
theano.printing.pydotprint(poolOut, outfile="relu_pool_before_opt.png", var_with_name_simple=True)
f = theano.function(inputs=[x], outputs=[poolOut])
theano.printing.pydotprint(f, outfile="relu_pool_after_opt.png", var_with_name_simple=True)
f(imval)
elif direction == 'backward':
poolSum = T.sum(poolOut)
poolBackward = T.grad(poolSum, [x])
theano.printing.pydotprint(poolBackward, outfile="relu_poolBackward_before_opt.png", var_with_name_simple=True)
f = theano.function(inputs=[x], outputs=poolBackward)
theano.printing.pydotprint(f, outfile="relu_poolBackward_after_opt.png", var_with_name_simple=True)
f(imval)
else:
print ("Invalid direction, only forward or backward allowed!")
开发者ID:intel,项目名称:theano,代码行数:27,代码来源:gen_combination_graph.py
示例13: pool2d
def pool2d(x, pool_size, strides=(1, 1), border_mode='valid',
dim_ordering='th', pool_mode='max'):
# ====== dim ordering ====== #
if dim_ordering not in {'th', 'tf'}:
raise Exception('Unknown dim_ordering ' + str(dim_ordering))
if dim_ordering == 'tf':
x = x.dimshuffle((0, 3, 1, 2))
# ====== border mode ====== #
if border_mode == 'same':
w_pad = pool_size[0] - 2 if pool_size[0] % 2 == 1 else pool_size[0] - 1
h_pad = pool_size[1] - 2 if pool_size[1] % 2 == 1 else pool_size[1] - 1
padding = (w_pad, h_pad)
elif border_mode == 'valid':
padding = (0, 0)
elif isinstance(border_mode, (tuple, list)):
padding = tuple(border_mode)
else:
raise Exception('Invalid border mode: ' + str(border_mode))
# ====== pooling ====== #
if _on_gpu() and dnn.dnn_available():
pool_out = dnn.dnn_pool(x, pool_size,
stride=strides,
mode=pool_mode,
pad=padding)
else: # CPU veresion support by theano
pool_out = pool.pool_2d(x, ds=pool_size, st=strides,
ignore_border=True,
padding=padding,
mode=pool_mode)
if dim_ordering == 'tf':
pool_out = pool_out.dimshuffle((0, 2, 3, 1))
return pool_out
开发者ID:trungnt13,项目名称:odin_old,代码行数:34,代码来源:theano_backend.py
示例14: test_max_pool_2d_2D
def test_max_pool_2d_2D(self):
rng = numpy.random.RandomState(utt.fetch_seed())
maxpoolshps = ((1, 1), (3, 2))
imval = rng.rand(4, 5)
images = tensor.dmatrix()
for maxpoolshp, ignore_border, mode in product(maxpoolshps,
[True, False],
['max', 'sum',
'average_inc_pad',
'average_exc_pad']):
# print 'maxpoolshp =', maxpoolshp
# print 'ignore_border =', ignore_border
numpy_output_val = self.numpy_max_pool_2d(imval, maxpoolshp,
ignore_border,
mode=mode)
output = pool_2d(images, maxpoolshp, ignore_border,
mode=mode)
output_val = function([images], output)(imval)
utt.assert_allclose(output_val, numpy_output_val)
def mp(input):
return pool_2d(input, maxpoolshp, ignore_border,
mode=mode)
utt.verify_grad(mp, [imval], rng=rng)
开发者ID:12190143,项目名称:Theano,代码行数:25,代码来源:test_pool.py
示例15: model
def model(X, params, pDropConv, pDropHidden):
lnum = 0 # conv: (32, 32) pool: (16, 16)
layer = nin(X, params[lnum])
layer = pool_2d(layer, (2, 2), st=(2, 2), ignore_border=False, mode='max')
layer = basicUtils.dropout(layer, pDropConv)
lnum += 1 # conv: (16, 16) pool: (8, 8)
layer = nin(layer, params[lnum])
layer = pool_2d(layer, (2, 2), st=(2, 2), ignore_border=False, mode='max')
layer = basicUtils.dropout(layer, pDropConv)
lnum += 1 # conv: (8, 8) pool: (4, 4)
layer = nin(layer, params[lnum])
layer = pool_2d(layer, (2, 2), st=(2, 2), ignore_border=False, mode='max')
layer = basicUtils.dropout(layer, pDropConv)
lnum += 1
layer = gap(layer, params[lnum])
return softmax(layer) # 如果使用nnet中的softmax训练产生NAN
开发者ID:ifenghao,项目名称:myDeepLearning,代码行数:16,代码来源:NINgaplayer.py
示例16: get_output_for
def get_output_for(self, input, **kwargs):
if self.pad == 'strictsamex':
assert(self.stride[0] == 1)
kk = self.pool_size[0]
ll = int(np.ceil(kk/2.))
# rr = kk-ll
# pad = (ll, 0)
pad = [(ll, 0)]
length = input.shape[2]
self.ignore_border = True
input = padding.pad(input, pad, batch_ndim=2)
pad = (0, 0)
else:
pad = self.pad
pooled = pool.pool_2d(input,
ds=self.pool_size,
st=self.stride,
ignore_border=self.ignore_border,
padding=pad,
mode=self.mode,
)
if self.pad == 'strictsamex':
pooled = pooled[:, :, :length or None, :]
return pooled
开发者ID:tweihaha,项目名称:aed-by-cnn,代码行数:29,代码来源:layers.py
示例17: test_convolution
def test_convolution(self):
"""
input: a 4D tensor corresponding to a mini-batch of input images. The shape of the tensor is as follows:
[mini-batch size, number of input feature maps, image height, image width].
"""
self.input = T.tensor4(name='input')
#Weights
W_shape = (self.numbers_of_feature_maps[1],self.numbers_of_feature_maps[0],self.filter_shape[0],self.filter_shape[1])
w_bound = np.sqrt(self.numbers_of_feature_maps[0]*self.filter_shape[0]*self.filter_shape[1])
self.W = theano.shared( np.asarray(np.random.uniform(-1.0/w_bound,1,0/w_bound,W_shape),dtype=self.input.dtype), name = 'W' )
#Bias
bias_shape = (self.numbers_of_feature_maps[1],)
self.bias = theano.shared(np.asarray(
np.random.uniform(-.5,.5, size=bias_shape),
dtype=input.dtype), name ='b')
#Colvolution
self.convolution = conv.conv2d(self.input,self.W)
self.max_pooling = pool.pool_2d(
input=self.convolution,
ds=self.pooling_size,
ignore_border=True
)
output = T.tanh(self.convolution + self.bias.dimshuffle('x', 0, 'x', 'x'))
f = theano.function([input], output)
开发者ID:samiraabnar,项目名称:CNN,代码行数:32,代码来源:ConvolutionalNetwork.py
示例18: __init__
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
"""
Allocate a LeNetConvPoolLayer with shared variable internal parameters.
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dtensor4
:param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps,
filter height, filter width)
:type image_shape: tuple or list of length 4
:param image_shape: (batch size, num input feature maps,
image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows, #cols)
"""
assert image_shape[1] == filter_shape[1]
self.input = input
# No. of inputs to a hidden unit =
# input_feature_maps * filter_height * filter_width
fan_in = numpy.prod(filter_shape[1:])
# Each unit in the lower layer recieves gradients
# from num_output_feature_maps * filter_height * filter_width
# / pooling_size
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) //
numpy.prod(poolsize))
W_values = xavier_init(rng, fan_in, fan_out, T.tanh, filter_shape)
self.W = theano.shared(W_values, borrow=True)
b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(b_values, borrow=True)
# Convolution operation (input feature maps with filters)
conv_out = conv2d(
input=input,
filters=self.W,
filter_shape=filter_shape,
input_shape=image_shape
)
# Apply max-pooling (downsample)
# Notice that instead of padding 0s, the border is ignored
pooled_out = pool_2d(
input=conv_out,
ds=poolsize,
ignore_border=True
)
# Dimshuffle bias vector to allow one bias term per filter
# The rest will be handled via broadcasting
self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
self.params = [self.W, self.b]
self.input = input
开发者ID:noisychannel,项目名称:fancyketchup,代码行数:60,代码来源:conv_pool_layer.py
示例19: pooling
def pooling(self, inputs, pool_size, ignore_border, stride, pad, mode):
if pool_size == [1, 1]:
return inputs
if mode == "avg":
mode = "average_exc_pad"
if mode == "fmp":
height = inputs.shape[2]
width = inputs.shape[3]
batch = inputs.shape[0]
X = inputs.dimshuffle(2, 3, 0, 1) # (row, col, batches, filters)
sizes = T.zeros((batch, 2))
sizes = T.set_subtensor(sizes[:, 0], height)
sizes = T.set_subtensor(sizes[:, 1], width)
pooled_out, _ = fmp(X, sizes, pool_size[0])
return pooled_out.dimshuffle(2, 3, 0, 1)
pool_out = pool.pool_2d(
input=inputs,
ds=pool_size,
ignore_border=ignore_border,
st=stride,
padding=pad,
mode=mode
)
pool_out.name = "pool_out_"+self.name
return pool_out
开发者ID:atuxhe,项目名称:returnn,代码行数:28,代码来源:NetworkCNNLayer.py
示例20: encode
def encode(self, utt_j, uttcut_j):
# transform word embedding to hidden size
emb_j = T.tanh( self.Wemb[utt_j[:uttcut_j],:] )
# 1st convolution
wh1_j = self.convolution(emb_j,self.Wcv1)
if self.pool[0]: # pooling
wh1_j = pool.max_pool(input=wh1_j,ds=(3,1),ignore_border=False)
wh1_j = T.tanh(wh1_j)
# 2nd convolution
wh2_j = self.convolution(wh1_j, self.Wcv2)
if self.pool[1]: # pooling
wh2_j = pool.max_pool(input=wh2_j,ds=(3,1),ignore_border=False)
wh2_j = T.tanh(wh2_j)
if self.level>=3:
# 3nd convolution
wh3_j = self.convolution(wh2_j, self.Wcv3)
if self.pool[2]:
wh3_j = pool.pool_2d(input=wh3_j,ds=(3,1),
ignore_border=False)
# average pooling
wh3_j = T.tanh(T.sum(wh3_j,axis=0))
else: # level < 3
wh3_j = None
if self.pool==(True,True,True):
return _, wh3_j
else:
return T.concatenate([wh1_j,wh2_j],axis=1), wh3_j
开发者ID:jungle-cat,项目名称:NNDIAL,代码行数:32,代码来源:encoder.py
注:本文中的theano.tensor.signal.pool.pool_2d函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论