本文整理汇总了Python中theano.tensor.tensor4函数的典型用法代码示例。如果您正苦于以下问题:Python tensor4函数的具体用法?Python tensor4怎么用?Python tensor4使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了tensor4函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: theano_kernel_derivative
def theano_kernel_derivative(imshp,kshp,featshp,stride=1):
features = T.tensor4(dtype=theano.config.floatX)
kernel = T.tensor4(dtype=theano.config.floatX)
image = T.tensor4(dtype=theano.config.floatX)
# Need to transpose first two dimensions of kernel, and reverse index kernel image dims (for correlation)
kernel_rotated = T.transpose(kernel[:,:,::-1,::-1],axes=[1,0,2,3])
featshp_logical = (featshp[0],featshp[1],featshp[2]*stride,featshp[3]*stride)
kshp_rotated = (kshp[1], kshp[0], kshp[2], kshp[3])
image_estimate = conv2d(features,kernel_rotated,border_mode='full',
image_shape=featshp,filter_shape=kshp_rotated,
imshp_logical=featshp_logical[1:],kshp_logical=kshp[2:])
image_error = image - image_estimate
image_error_rot = T.transpose(image_error,[1,0,2,3])[:,:,::-1,::-1]
imshp_rot = (imshp[1],imshp[0],imshp[2],imshp[3])
featshp_rot = (featshp[1],featshp[0],featshp[2],featshp[3])
features_rot = T.transpose(features,[1,0,2,3])
featshp_rot_logical = (featshp_rot[0],featshp_rot[1],featshp_rot[2]*stride,featshp_rot[3]*stride)
kernel_grad_rot = -conv2d(image_error_rot,features_rot,
image_shape=imshp_rot,filter_shape=featshp_rot,
imshp_logical=imshp_rot[1:],kshp_logical=featshp_rot_logical[2:])
kernel_grad = T.transpose(kernel_grad_rot,[1,0,2,3])
return function(inputs=[image,features,kernel],outputs=kernel_grad)
开发者ID:baylabs,项目名称:hdl,代码行数:29,代码来源:conv_models.py
示例2: test_graph
def test_graph(self):
# define common values first
groups = 3
bottom = np.random.rand(3, 6, 5, 5).astype(theano.config.floatX)
kern = np.random.rand(9, 2, 3, 3).astype(theano.config.floatX)
bottom_sym = T.tensor4('bottom')
kern_sym = T.tensor4('kern')
# grouped convolution graph
conv_group = self.conv(num_groups=groups)(bottom_sym, kern_sym)
gconv_func = theano.function([bottom_sym, kern_sym], conv_group, mode=self.mode)
# Graph for the normal hard way
kern_offset = kern_sym.shape[0] // groups
bottom_offset = bottom_sym.shape[1] // groups
split_conv_output = [self.conv()(bottom_sym[:, i * bottom_offset:(i + 1) * bottom_offset, :, :],
kern_sym[i * kern_offset:(i + 1) * kern_offset, :, :, :])
for i in range(groups)]
concatenated_output = T.concatenate(split_conv_output, axis=1)
conv_func = theano.function([bottom_sym, kern_sym], concatenated_output, mode=self.mode)
# calculate outputs for each graph
gconv_output = gconv_func(bottom, kern)
conv_output = conv_func(bottom, kern)
# compare values
utt.assert_allclose(gconv_output, conv_output)
开发者ID:DEVESHTARASIA,项目名称:Theano,代码行数:27,代码来源:test_corr.py
示例3: fix_gpu_transfer
def fix_gpu_transfer():
kshp=(10,2,10,10)
featshp=(3,10,11,11)
stride=8
mask = False
imshp = (featshp[0],kshp[1],featshp[2]*stride + kshp[2] - 1,featshp[3]*stride + kshp[3] - 1) # num images, channels, szy, szx
from theano import tensor as T
x = T.tensor4()
a = T.tensor4()
A = T.tensor4()
image_error = helper_T_l2_cost_conv(x,a,A,imshp,kshp,featshp,stride=(stride,stride),mask=mask)
cost = .5*T.sum(image_error **2)
func = function([x,a,A],cost)
import theano
theano.printing.debugprint(func)
x_in = np.random.randn(*imshp).astype(np.float32)
a_in = np.random.randn(*featshp).astype(np.float32)
A_in = np.random.randn(*kshp).astype(np.float32)
from time import time as now
repeats = 10
t0 = now()
for i in range(repeats):
output = func(x_in,a_in,A_in)
t = now() - t0
print 'time / iter = %f' % (t/repeats)
开发者ID:baylabs,项目名称:hdl,代码行数:32,代码来源:conv_models.py
示例4: __init__
def __init__(self,test_data_x,test_data_y):
self.test_data_x=test_data_x
self.test_data_y=test_data_y
test = T.tensor4('test')
pred = T.tensor4('pred')
dc = dice_coef(test,pred)
self.dc = theano.function([test,pred],dc)
开发者ID:ericsolo,项目名称:python,代码行数:7,代码来源:LUNA_unet.py
示例5: create_iter_funcs_valid
def create_iter_funcs_valid(l_out, bs=None, N=50, mc_dropout=False):
X = T.tensor4('X')
y = T.ivector('y')
X_batch = T.tensor4('X_batch')
y_batch = T.ivector('y_batch')
if not mc_dropout:
y_hat = layers.get_output(l_out, X, deterministic=True)
else:
if bs is None:
raise ValueError('a fixed batch size is required for mc dropout')
X_repeat = T.extra_ops.repeat(X, N, axis=0)
y_sample = layers.get_output(
l_out, X_repeat, deterministic=False)
sizes = [X_repeat.shape[0] / X.shape[0]] * bs
y_sample_split = T.as_tensor_variable(
T.split(y_sample, sizes, bs, axis=0))
y_hat = T.mean(y_sample_split, axis=1)
valid_loss = T.mean(
T.nnet.categorical_crossentropy(y_hat, y))
valid_acc = T.mean(
T.eq(y_hat.argmax(axis=1), y))
valid_iter = theano.function(
inputs=[theano.Param(X_batch), theano.Param(y_batch)],
outputs=[valid_loss, valid_acc],
givens={
X: X_batch,
y: y_batch,
},
)
return valid_iter
开发者ID:hjweide,项目名称:cifar-10-uncertainty,代码行数:35,代码来源:iter_funcs.py
示例6: __init__
def __init__(self):
X1 = T.tensor4()
X2 = T.tensor4()
X = [X1, X2]
Y = [T.ivector()]
model = Model()
#conv1
model.add(Conv(filter_shape = (32, 3, 3, 3), regularizers = {'W': l1(0.0001)}, w_shared = True, n_inputs = 2))
model.add(Conv(filter_shape = (32, 32, 2, 2), regularizers = {'W': l1(0.0001)}, w_shared = True, n_inputs = 2))
model.add(Pooling(pool_size = (2,2)))
model.add(Activation(mode = 'tanh'))
#conv2
model.add(Conv(filter_shape = (32, 32, 3, 3), regularizers = {'W': l1(0.0001)}, w_shared = True, n_inputs = 2))
model.add(Pooling(pool_size = (2,2)))
model.add(Activation(mode = 'tanh'))
#abs_diff
model.add(Abs_diff())
#conv3
model.add(Conv(filter_shape = (32, 32, 3, 3), regularizers = {'W': l1(0.0001)}, w_shared = True))
model.add(Pooling(pool_size = (2,2)))
model.add(Activation(mode = 'tanh'))
model.add(Flatten())
self.f = theano.function(X, model.f(X, is_train = True))
model.add(Fully((2880, 512)))
model.add(Activation(mode = 'tanh'))
model.add(Dropout(0.5))
model.add(Fully((512, 2)))
model.add(Activation(mode = 'softmax'))
model.build(CostFunc.nll, RMSprop(), X, Y)
self.model = model
开发者ID:yangli625,项目名称:ReId_theano,代码行数:33,代码来源:model_new.py
示例7: test_mask_loss_sobel
def test_mask_loss_sobel():
th_mask, th_img = T.tensor4(), T.tensor4()
ml = mask_loss_sobel(th_mask, th_img)
mask_loss = theano.function([th_mask, th_img],
[ml.loss] + list(ml.sobel_mask) +
list(ml.sobel_img))
mask_idx = next(masks(1))
image_ok = 0.5 * np.ones_like(mask_idx)
image_ok[mask_idx > MASK["IGNORE"]] = 1
image_ok[mask_idx < MASK["BACKGROUND_RING"]] = 0
print()
loss, sobel_mask_x, sobel_mask_y, sobel_img_x, sobel_img_y = \
mask_loss(mask_idx, image_ok)
plt.set_cmap('gray')
plt.subplot(221)
plt.imshow(sobel_mask_x[0, 0])
plt.subplot(222)
plt.imshow(sobel_mask_y[0, 0])
plt.colorbar()
plt.subplot(223)
plt.imshow(sobel_img_x[0, 0])
plt.subplot(224)
plt.imshow(sobel_img_y[0, 0])
plt.colorbar()
plt.savefig("mask_loss_sobel.png")
print()
print("mask_loss: {}".format(mask_loss(mask_idx, image_ok)))
assert loss == 0
开发者ID:GALI472,项目名称:deepdecoder,代码行数:30,代码来源:test_gpu_only_mask_loss.py
示例8: compile_dream
def compile_dream(self, X_train, shapes, indices, initializer):
self.dream_compiled = True
self.X_dream = []
index = 0
for i in range(len(X_train)):
if i in indices:
self.X_dream.append(theano.shared(initializer(shapes[index]).astype('float32')))
index += 1
else:
X_train[i] = atleast_4d(X_train[i][[0]])
self.X_dream.append(theano.shared(X_train[i].astype('float32')))
y_hat_test, layer_updates = self.tree.get_output(self.params_shared, self.X_dream[:], True)
preds = y_hat_test.flatten(self.num_output_dims).mean(axis=None)
self.dream_optimizer.build([self.X_dream[index] for index in indices])
updates = list(self.dream_optimizer.get_updates([self.X_dream[index] for index in indices], -preds))
for i, update in enumerate(updates):
updates[i] = (update[0], update[1].astype('float32'))
updates += layer_updates
y_pred = T.tensor4(dtype='float32')
y = T.tensor4(dtype='float32')
accuracy = self.accuracy.get_accuracy(y_pred, y)
self.dream_accuracy_theano = theano.function([y_pred, y], accuracy)
self.dream_update = theano.function(
inputs=[],
outputs=preds,
updates=updates
)
开发者ID:agajews,项目名称:Neural-Network-Dev,代码行数:31,代码来源:Core.py
示例9: compile
def compile(self):
# Helper function for rendering test images during training, or standalone inference mode.
input_tensor, seed_tensor = T.tensor4(), T.tensor4()
input_layers = {self.network['img']: input_tensor, self.network['seed']: seed_tensor}
output = lasagne.layers.get_output([self.network[k] for k in ['seed','out']], input_layers, deterministic=True)
self.predict = theano.function([seed_tensor], output)
if not args.train: return
output_layers = [self.network['out'], self.network[args.perceptual_layer], self.network['disc']]
gen_out, percept_out, disc_out = lasagne.layers.get_output(output_layers, input_layers, deterministic=False)
# Generator loss function, parameters and updates.
self.gen_lr = theano.shared(np.array(0.0, dtype=theano.config.floatX))
self.adversary_weight = theano.shared(np.array(0.0, dtype=theano.config.floatX))
gen_losses = [self.loss_perceptual(percept_out) * args.perceptual_weight,
self.loss_total_variation(gen_out) * args.smoothness_weight,
self.loss_adversarial(disc_out) * self.adversary_weight]
gen_params = lasagne.layers.get_all_params(self.network['out'], trainable=True)
print(' - {} tensors learned for generator.'.format(len(gen_params)))
gen_updates = lasagne.updates.adam(sum(gen_losses, 0.0), gen_params, learning_rate=self.gen_lr)
# Discriminator loss function, parameters and updates.
self.disc_lr = theano.shared(np.array(0.0, dtype=theano.config.floatX))
disc_losses = [self.loss_discriminator(disc_out)]
disc_params = list(itertools.chain(*[l.get_params() for k, l in self.network.items() if 'disc' in k]))
print(' - {} tensors learned for discriminator.'.format(len(disc_params)))
grads = [g.clip(-5.0, +5.0) for g in T.grad(sum(disc_losses, 0.0), disc_params)]
disc_updates = lasagne.updates.adam(grads, disc_params, learning_rate=self.disc_lr)
# Combined Theano function for updating both generator and discriminator at the same time.
updates = collections.OrderedDict(list(gen_updates.items()) + list(disc_updates.items()))
self.fit = theano.function([input_tensor, seed_tensor], gen_losses + [disc_out.mean(axis=(1,2,3))], updates=updates)
开发者ID:JulienHeiduk,项目名称:neural-enhance,代码行数:33,代码来源:enhance.py
示例10: create_iter_funcs_test
def create_iter_funcs_test(l_out, bs, N=50):
X = T.tensor4('X')
X_batch = T.tensor4('X_batch')
X_repeat = T.extra_ops.repeat(X, N, axis=0)
y_sample = layers.get_output(
l_out, X_repeat, deterministic=False)
# the number of splits needs to be pre-defined
sizes = [X_repeat.shape[0] / X.shape[0]] * bs
y_sample_split = T.as_tensor_variable(
T.split(y_sample, sizes, bs, axis=0))
y_hat = T.mean(y_sample_split, axis=1)
#y_var = T.var(y_sample_split, axis=1)
test_iter = theano.function(
inputs=[theano.Param(X_batch)],
outputs=y_hat,
#outputs=[y_hat, y_var],
givens={
X: X_batch,
},
)
return test_iter
开发者ID:hjweide,项目名称:cifar-10-uncertainty,代码行数:25,代码来源:iter_funcs.py
示例11: burn
def burn():
sz = 128
img_shp = [sz, sz, sz, sz]
kern_shp = [sz // 2, sz, 3, 3]
out_shp = get_conv_output_shape(img_shp, kern_shp, 'valid', (1, 1))
img = T.tensor4('img')
kern = T.tensor4('kern')
out = T.tensor4('out')
def rand(shp):
return np.random.rand(*shp).astype(theano.config.floatX)
img = theano.shared(rand(img_shp))
kern = theano.shared(rand(kern_shp))
out = theano.shared(rand(out_shp))
# beta 1 is needed to force the reuse of out, otherwise, it is
# replaced by a GpuAllocEmpty
o1 = dnn._dnn_conv(img, kern, conv_mode='conv', out=out, beta=1.)
mode = theano.compile.get_default_mode().including(
"local_remove_all_assert")
f = theano.function([], [o1], mode=mode)
theano.printing.debugprint(f)
print("Start computation")
for i in range(10000):
f.fn()
print("Computation stopped")
开发者ID:Theano,项目名称:Theano,代码行数:26,代码来源:burn_gpu.py
示例12: test_batch_normalization_train_without_running_averages
def test_batch_normalization_train_without_running_averages():
# compile and run batch_normalization_train without running averages
utt.seed_rng()
x, scale, bias, dy = T.tensor4('x'), T.tensor4('scale'), T.tensor4('bias'), T.tensor4('dy')
data_shape = (5, 10, 30, 25)
param_shape = (1, 10, 30, 25)
# forward pass
out, x_mean, x_invstd = bn.batch_normalization_train(x, scale, bias, 'per-activation')
# backward pass
grads = T.grad(None, wrt=[x, scale, bias], known_grads={out: dy})
# compile
f = theano.function([x, scale, bias, dy], [out, x_mean, x_invstd] + grads)
# check if the abstract Ops have been replaced
assert not any([isinstance(n.op, (bn.AbstractBatchNormTrain,
bn.AbstractBatchNormInference,
bn.AbstractBatchNormTrainGrad))
for n in f.maker.fgraph.toposort()])
# run
X = 4 + 3 * numpy.random.randn(*data_shape).astype(theano.config.floatX)
Dy = -1 + 2 * numpy.random.randn(*data_shape).astype(theano.config.floatX)
Scale = numpy.random.randn(*param_shape).astype(theano.config.floatX)
Bias = numpy.random.randn(*param_shape).astype(theano.config.floatX)
f(X, Scale, Bias, Dy)
开发者ID:Faruk-Ahmed,项目名称:Theano,代码行数:25,代码来源:test_bn.py
示例13: create_iter_funcs_train
def create_iter_funcs_train(l_out, lr, mntm, wd):
X = T.tensor4('X')
y = T.ivector('y')
X_batch = T.tensor4('X_batch')
y_batch = T.ivector('y_batch')
y_hat = layers.get_output(l_out, X, deterministic=False)
# softmax loss
train_loss = T.mean(
T.nnet.categorical_crossentropy(y_hat, y))
# L2 regularization
train_loss += wd * regularize_network_params(l_out, l2)
train_acc = T.mean(
T.eq(y_hat.argmax(axis=1), y))
all_params = layers.get_all_params(l_out, trainable=True)
updates = lasagne.updates.nesterov_momentum(
train_loss, all_params, lr, mntm)
train_iter = theano.function(
inputs=[theano.Param(X_batch), theano.Param(y_batch)],
outputs=[train_loss, train_acc],
updates=updates,
givens={
X: X_batch,
y: y_batch,
},
)
return train_iter
开发者ID:hjweide,项目名称:cifar-10-uncertainty,代码行数:33,代码来源:iter_funcs.py
示例14: test_theano_transposed_convolution
def test_theano_transposed_convolution(self):
# how to use t_mk_conv_transpose
from deconv.tdeconv_utils import t_mk_conv_transpose
in4 = T.tensor4(name='conv_in', dtype=theano.config.floatX)
f4 = T.tensor4(name='filters', dtype=theano.config.floatX)
f_t_conv = theano.function(
[in4],
t_mk_conv_transpose(in4, f4),
givens=[(f4, self.filters)]
)
test_input = np.array(
[[[[0, 1, 0],
[0, 1, 0],
[0, 1, 0]],
[[0, 0, 0],
[1, 1, 1],
[0, 0, 0]]]],
dtype=theano.config.floatX
)
ground_truth = np.array(
[[[[ 0, 0, 0, 0, 0],
[-1, -1, -1, 0, 0],
[ 0, 0, 0, 1, 0],
[ 0, 0, 0, 1, 0],
[ 0, 0, 0, 1, 0]]]],
dtype=theano.config.floatX
)
assert_true(np.all(f_t_conv(test_input) == ground_truth))
开发者ID:bonext,项目名称:deconvn,代码行数:29,代码来源:tdeconv_tests.py
示例15: make_apply_gabor_function
def make_apply_gabor_function(filter_stack_shape,complex_cell=True):
stim_tnsr = tnsr.tensor4('stim_tnsr') ##T x n_color_channels x stim_size x stim_size
real_filter_stack_tnsr = tnsr.tensor4('real_feature_map_tnsr') ##D x n_color_channels x stim_size x stim_size. complex
imag_filter_stack_tnsr = tnsr.tensor4('imag_feature_map_tnsr') ##D x n_color_channels x stim_size x stim_size. complex
real_feature_map_tnsr = tnsr.nnet.conv2d(stim_tnsr,
real_filter_stack_tnsr,
filter_shape = filter_stack_shape,
border_mode = 'full') ##produces T x D x stim_size x stim_size maps
imag_feature_map_tnsr = tnsr.nnet.conv2d(stim_tnsr,
imag_filter_stack_tnsr,
filter_shape = filter_stack_shape,
border_mode = 'full') ##produces T x D x stim_size x stim_size maps
if complex_cell:
##for filtering with complex gabors, we need an operation for squaring/summing real/imag parts
abs_value = tnsr.sqrt(tnsr.sqr(real_feature_map_tnsr) + tnsr.sqr(imag_feature_map_tnsr))
##functionize feature mapping
make_feature_maps = function(inputs = [stim_tnsr,real_filter_stack_tnsr,imag_filter_stack_tnsr],
outputs = abs_value)
else:
make_feature_maps = function(inputs = [stim_tnsr,real_filter_stack_tnsr],
outputs = real_feature_map_tnsr)
return make_feature_maps
开发者ID:tnaselar,项目名称:hrf_fitting,代码行数:25,代码来源:gabor_feature_dictionaries.py
示例16: getTheanoConvFunction
def getTheanoConvFunction(patchsize=None, imagesize=None):
"""
Return a theano function erforming valid convolution of a filter on an
image
"""
# Define the size of the images and filters to allow Theano to
# further optimize the convolution op
image_shape = (None, 1, imagesize, imagesize)
filter_shape = (None, 1, patchsize, patchsize)
# Define the input variables to the function
img = T.tensor4(dtype='floatX')
filter = T.tensor4(dtype='floatX')
mask = T.tensor4(dtype='floatX')
# Convolve the image with both the filter and the mask
convImgWithFilter = T.nnet.conv.conv2d(img, filter, border_mode='valid',
image_shape=image_shape,
filter_shape=filter_shape)
# Norm convImgWithFilter by the norm of each portions of the image's norm
# to avoid a brighter region taking the lead on a darker, better-fitting
# one.
convImgWithMask = T.nnet.conv.conv2d(img**2, mask, border_mode='valid',
image_shape=image_shape,
filter_shape=filter_shape)
convImgWithMask = convImgWithMask ** 0.5
normConvImgWithFilter = convImgWithFilter / (convImgWithMask ** 0.5)
# Compile and return the theano function
f = theano.function([img, filter, mask], normConvImgWithFilter)
return f
开发者ID:TongZZZ,项目名称:ift6266h13,代码行数:34,代码来源:extractKmeansFeatures.py
示例17: train_model
def train_model():
batch_size = 16
num_epochs = c.ch4_train_epochs
sz = c.fcn_img_size
version=2
for i in xrange(5):
data = u.DataH5PyStreamer(os.path.join(c.data_intermediate, 'ch4_256.hdf5'),
batch_size=batch_size, folds=(5,i))
input_var = T.tensor4('input')
label_var = T.tensor4('label')
net, output, output_det = m.build_fcn_segmenter(input_var,
(None, 1, sz, sz), version=version)
params = nn.layers.get_all_params(net['output'], trainable=True)
lr = theano.shared(nn.utils.floatX(3e-3))
loss = du.sorenson_dice(output, label_var)
te_loss = du.sorenson_dice(output_det, label_var)
te_acc = nn.objectives.binary_accuracy(output_det, label_var).mean()
updates = nn.updates.adam(loss, params, learning_rate=lr)
train_fn = theano.function([input_var, label_var], loss, updates=updates)
test_fn = theano.function([input_var, label_var], te_loss)
acc_fn = theano.function([input_var, label_var], te_acc)
pred_fn = theano.function([input_var], output_det)
hist = u.train_with_hdf5(data, num_epochs=num_epochs,
train_fn = train_fn, test_fn=test_fn,
max_per_epoch=-1, use_tqdm=False,
tr_transform=lambda x: du.segmenter_data_transform(x, rotate=(-180, 180)),
te_transform=lambda x: du.segmenter_data_transform(x, rotate=None),
last_layer = net['output'],
save_params_to=os.path.join(c.params_dir, 'ch4seg_v{}/test_ch4seg_f{}_v{}.npz'\
.format(version, i, version)))
开发者ID:01bui,项目名称:diagnose-heart,代码行数:30,代码来源:ch4.py
示例18: functions
def functions(encoder, network, l_rate=1.):
# For network
X = T.tensor4()
Y = T.tensor4() # X = Y
parameters = nn.layers.get_all_params(layer=network, trainable=True)
output = nn.layers.get_output(layer_or_layers=network, inputs=X)
all_layers = nn.layers.get_all_layers(network)
loss = T.mean(nn.objectives.squared_error(output, Y))
updates = nn.updates.sgd(
loss_or_grads=loss, params=parameters, learning_rate=l_rate)
training_function = theano.function(
inputs=[X, Y], outputs=loss, updates=updates)
test_function = theano.function(
inputs=[X, Y], outputs=[loss, output])
# For encoder
code_output = nn.layers.get_output(layer_or_layers=encoder, inputs=X)
code_function = theano.function(inputs=[X], outputs=code_output)
# For decoder
Z = T.tensor4()
decode_output = nn.layers.get_output(
layer_or_layers=network, inputs={encoder: Z})
decode_function = theano.function(inputs=[Z], outputs=decode_output)
return training_function, test_function, code_function, decode_function
开发者ID:mollymr305,项目名称:mnist-autoencoder,代码行数:26,代码来源:auto_encoder.py
示例19: test_pooling
def test_pooling():
shift = [[0, 1], [0, 1]]
pool_shape = [2, 2]
limits = [2, 2]
inpt = prepare_array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
output = prepare_array([[5, 6], [8, 9]])
inpt_expr = tensor4('input')
output_expr = perform_pooling(inpt_expr, shift, pool_shape, limits)
f = theano.function([inpt_expr], output_expr)
assert np.allclose(f(inpt), output)
shift = [[0], [0, 1]]
pool_shape = [2, 2]
limits = [1, 2]
output = prepare_array([[5, 6]])
inpt_expr = tensor4('input')
output_expr = perform_pooling(inpt_expr, shift, pool_shape, limits)
f = theano.function([inpt_expr], output_expr)
assert np.allclose(f(inpt), output)
shift = [[0, 1], [0, 1]]
pool_shape = [1, 2]
limits = [3, 2]
output = prepare_array([[2, 3], [5, 6], [8, 9]])
inpt_expr = tensor4('input')
output_expr = perform_pooling(inpt_expr, shift, pool_shape, limits)
f = theano.function([inpt_expr], output_expr)
assert np.allclose(f(inpt), output)
开发者ID:RuinCakeLie,项目名称:breze,代码行数:26,代码来源:test_cnn.py
示例20: get_dc_input_layers
def get_dc_input_layers(shape):
"""
Creates input layer for the CNN. Works for 2D and 3D input.
Returns
-------
net: Ordered Dictionary
net config with 3 entries: input, kspace_input, mask.
"""
if len(shape) > 4:
# 5D
input_var = tensor5('input_var')
kspace_input_var = tensor5('kspace_input_var')
mask_var = tensor5('mask')
else:
input_var = T.tensor4('input_var')
kspace_input_var = T.tensor4('kspace_input_var')
mask_var = T.tensor4('mask')
input_layer = InputLayer(shape, input_var=input_var, name='input')
kspace_input_layer = InputLayer(shape, input_var=kspace_input_var,
name='kspace_input')
mask_layer = InputLayer(shape, input_var=mask_var, name='mask')
return input_layer, kspace_input_layer, mask_layer
开发者ID:snowbhr06,项目名称:Deep-MRI-Reconstruction,代码行数:25,代码来源:input.py
注:本文中的theano.tensor.tensor4函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论