本文整理汇总了Python中theano.tensor.reshape函数的典型用法代码示例。如果您正苦于以下问题:Python reshape函数的具体用法?Python reshape怎么用?Python reshape使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了reshape函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: get_unfolding_cost
def get_unfolding_cost(self):
''' computes the unfolding rwconstructed cost (more than 2 inputs) '''
x = T.reshape(self.x, (-1, self.n_vector))
yi = x[0];i=1
for i in range(1, self.num):
#while T.lt(i, self.num):
xi = T.concatenate((yi, x[i]))
yi = self.get_hidden_values(xi)
i += 1
# Save the deepest hidden value as output vactor
self.vector = copy.deepcopy(yi)
tmp = []
i = 1
for i in range(1, self.num):
#while T.lt(i, self.num):
zi = self.get_reconstructed(yi)
t = T.reshape(zi, (2, self.n_vector))
tmp.append(t[1])
yi = t[0]
i += 1
tmp.append(yi)
tmp.reverse()
x = self.x
z = T.concatenate(tmp)
# cross-entropy cost should be modified here.
L = -T.sum( (0.5*x+0.5)*T.log(0.5*z+0.5) + (-0.5*x+0.5)*T.log(-0.5*z+0.5) )
# squred cost.
#L = -T.sum( (x-z)**2 )
cost = T.mean(L) + 0.01*(self.W**2).sum() # cost for a minibatch
return cost
开发者ID:MultiPath,项目名称:Dep-Compo,代码行数:34,代码来源:RAE.py
示例2: depool
def depool(X, factor=2):
"""
luke perforated upsample
http://www.brml.org/uploads/tx_sibibtex/281.pdf
"""
output_shape = [
X.shape[1],
X.shape[2]*factor,
X.shape[3]*factor
]
stride = X.shape[2]
offset = X.shape[3]
in_dim = stride * offset
out_dim = in_dim * factor * factor
upsamp_matrix = T.zeros((in_dim, out_dim))
rows = T.arange(in_dim)
cols = rows*factor + (rows/stride * factor * offset)
upsamp_matrix = T.set_subtensor(upsamp_matrix[rows, cols], 1.)
flat = T.reshape(X, (X.shape[0], output_shape[0], X.shape[2] * X.shape[3]))
up_flat = T.dot(flat, upsamp_matrix)
upsamp = T.reshape(up_flat, (X.shape[0], output_shape[0],
output_shape[1], output_shape[2]))
return upsamp
开发者ID:zhijiang,项目名称:AllAboutAutoencoder,代码行数:27,代码来源:conv_deconv_vae.py
示例3: _transform_affine
def _transform_affine(theta, input, downsample_factor):
num_batch, num_channels, height, width = input.shape
theta = T.reshape(theta, (-1, 2, 3))
# grid of (x_t, y_t, 1), eq (1) in ref [1]
out_height = T.cast(height / downsample_factor[0], 'int64')
out_width = T.cast(width / downsample_factor[1], 'int64')
grid = _meshgrid(out_height, out_width)
# Transform A x (x_t, y_t, 1)^T -> (x_s, y_s)
T_g = T.dot(theta, grid)
x_s = T_g[:, 0]
y_s = T_g[:, 1]
x_s_flat = x_s.flatten()
y_s_flat = y_s.flatten()
# dimshuffle input to (bs, height, width, channels)
input_dim = input.dimshuffle(0, 2, 3, 1)
input_transformed = _interpolate(
input_dim, x_s_flat, y_s_flat,
out_height, out_width)
output = T.reshape(
input_transformed, (num_batch, out_height, out_width, num_channels))
output = output.dimshuffle(0, 3, 1, 2) # dimshuffle to conv format
return output
开发者ID:AdityoSanjaya,项目名称:Lasagne,代码行数:26,代码来源:special.py
示例4: error
def error(self, outputs):
'''Build a theano expression for computing the network error.
Parameters
----------
outputs : dict mapping str to theano expression
A dictionary of all outputs generated by the layers in this network.
Returns
-------
error : theano expression
A theano expression representing the network error.
'''
output = outputs[self.output_name()]
alpha = outputs['hid2:alpha']
alpha_sum = alpha.sum(axis = 0) # max_dst_len * batch_size * max_src_len
alpha_l_inf = alpha_sum.max(axis = -1) # batch_size
# flatten all but last components of the output and labels
n = output.shape[0] * output.shape[1]
#print output.shape.eval()
correct = TT.reshape(self.labels, (n, ))
weights = TT.reshape(self.weights, (n, ))
prob = TT.reshape(output, (n, output.shape[2]))
nlp = -TT.log(TT.clip(prob[TT.arange(n), correct], 1e-8, 1))
if self.weighted:
return (weights * nlp).sum() / weights.sum() + alpha_l_inf.mean()
return nlp.mean()
开发者ID:masterkeywikz,项目名称:seq2graph,代码行数:29,代码来源:recurrent.py
示例5: k_max_pool
def k_max_pool(self, x, k):
"""
perform k-max pool on the input along the rows
input: theano.tensor.tensor4
k: theano.tensor.iscalar
the k parameter
Returns:
4D tensor
"""
x = T.reshape(x, (x.shape[0], x.shape[1], 1, x.shape[2] * x.shape[3]))
ind = T.argsort(x, axis=3)
sorted_ind = T.sort(ind[:, :, :, -k:], axis=3)
dim0, dim1, dim2, dim3 = sorted_ind.shape
indices_dim0 = T.arange(dim0).repeat(dim1 * dim2 * dim3)
indices_dim1 = (
T.arange(dim1).repeat(dim2 * dim3).reshape((dim1 * dim2 * dim3, 1)).repeat(dim0, axis=1).T.flatten()
)
indices_dim2 = T.arange(dim2).repeat(dim3).reshape((dim2 * dim3, 1)).repeat(dim0 * dim1, axis=1).T.flatten()
result = x[indices_dim0, indices_dim1, indices_dim2, sorted_ind.flatten()].reshape(sorted_ind.shape)
shape = (result.shape[0], result.shape[1], result.shape[2] * result.shape[3], 1)
result = T.reshape(result, shape)
return result
开发者ID:Xls1994,项目名称:DeepLearning,代码行数:31,代码来源:convLayer.py
示例6: T_subspacel1_slow_shrinkage_conv
def T_subspacel1_slow_shrinkage_conv(a, L, lam_sparse, lam_slow, imshp,kshp,featshp,stride=(1,1),small_value=.001):
featshp = (imshp[0],kshp[0],featshp[2],featshp[3]) # num images, features, szy, szx
features = T.reshape(T.transpose(a),featshp,ndim=4)
amp = T.sqrt(features[:,::2,:,:]**2 + features[:,1::2,:,:]**2 + small_value)
#damp = amp[:,1:] - amp[:,:-1]
# compose slow shrinkage with subspace l1 shrinkage
# slow shrinkage
div = T.zeros_like(amp)
d1 = amp[1:,:,:,:] - amp[:-1,:,:,:]
d2 = d1[1:,:,:,:] - d1[:-1,:,:,:]
div = T.set_subtensor(div[1:-1,:,:,:], -d2)
div = T.set_subtensor(div[0,:,:,:], -d1[0,:,:,:])
div = T.set_subtensor(div[-1,:,:,:], d1[-1,:,:,:])
slow_amp_shrinkage = 1 - (lam_slow / L) * (div / amp)
slow_amp_value = T.switch(T.gt(slow_amp_shrinkage, 0), slow_amp_shrinkage, 0)
slow_shrinkage_prox_a = slow_amp_value * features[:, ::2, :,:]
slow_shrinkage_prox_b = slow_amp_value * features[:,1::2, :,:]
# subspace l1 shrinkage
amp_slow_shrinkage_prox = T.sqrt(slow_shrinkage_prox_a ** 2 + slow_shrinkage_prox_b ** 2)
#amp_shrinkage = 1. - (lam_slow*lam_sparse/L)*amp_slow_shrinkage_prox
amp_shrinkage = 1. - (lam_sparse / L) / amp_slow_shrinkage_prox
amp_value = T.switch(T.gt(amp_shrinkage, 0.), amp_shrinkage, 0.)
subspacel1_prox = T.zeros_like(features)
subspacel1_prox = T.set_subtensor(subspacel1_prox[:, ::2, :,:], amp_value * slow_shrinkage_prox_a)
subspacel1_prox = T.set_subtensor(subspacel1_prox[:,1::2, :,:], amp_value * slow_shrinkage_prox_b)
reshape_subspacel1_prox = T.transpose(T.reshape(subspacel1_prox,(featshp[0],featshp[1]*featshp[2]*featshp[3]),ndim=2))
return reshape_subspacel1_prox
开发者ID:baylabs,项目名称:hdl,代码行数:32,代码来源:theano_methods.py
示例7: _active
def _active(m, pre_h, x):
x = T.reshape(x, (self.batch_size, last_shape[0]))
pre_h = T.reshape(pre_h, (self.batch_size, last_shape[1]))
h = self.decoder._active(x, pre_h)
y = T.nnet.softmax(T.dot(h, self.W_hy) + self.b_y)
y = y * m[:, None]
print type(y)
y_dim_y = y[:, 0:self.dim_y]
y_dim_pos = y[:, self.dim_y:]
print type(y_dim_y)
print type(y_dim_pos)
new_y_dim_y = y_dim_y + T.dot(y_dim_pos, self.word_tag_matrix)
# y = np.column_stack((new_y_dim_y, y_dim_pos))
y = T.concatenate([new_y_dim_y, y_dim_pos], axis=1)
print type(y)
h = T.reshape(h, (1, self.batch_size * last_shape[1]))
y = T.reshape(y, (1, self.batch_size * last_shape[0]))
return h, y, new_y_dim_y, y_dim_pos
开发者ID:luochuwei,项目名称:POS_tag_NN,代码行数:25,代码来源:word_decoder_POS.py
示例8: maxpool_3D
def maxpool_3D(input, ds, ignore_border=False):
#input.dimshuffle (0, 2, 1, 3, 4) # convert to make video in back.
# no need to reshuffle.
if input.ndim < 3:
raise NotImplementedError('max_pool_3d requires a dimension >= 3')
# extract nr dimensions
vid_dim = input.ndim
# max pool in two different steps, so we can use the 2d implementation of
# downsamplefactormax. First maxpool frames as usual.
# Then maxpool the time dimension. Shift the time dimension to the third
# position, so rows and cols are in the back
# extract dimensions
frame_shape = input.shape[-2:]
# count the number of "leading" dimensions, store as dmatrix
batch_size = T.prod(input.shape[:-2])
batch_size = T.shape_padright(batch_size,1)
# store as 4D tensor with shape: (batch_size,1,height,width)
new_shape = T.cast(T.join(0, batch_size,
T.as_tensor([1,]),
frame_shape), 'int32')
input_4D = T.reshape(input, new_shape, ndim=4)
# downsample mini-batch of videos in rows and cols
op = DownsampleFactorMax((ds[1],ds[2]), ignore_border) # so second and third dimensions of ds are for height and width
output = op(input_4D)
# restore to original shape
outshape = T.join(0, input.shape[:-2], output.shape[-2:])
out = T.reshape(output, outshape, ndim=input.ndim)
# now maxpool time
# output (time, rows, cols), reshape so that time is in the back
shufl = (list(range(vid_dim-3)) + [vid_dim-2]+[vid_dim-1]+[vid_dim-3])
input_time = out.dimshuffle(shufl)
# reset dimensions
vid_shape = input_time.shape[-2:]
# count the number of "leading" dimensions, store as dmatrix
batch_size = T.prod(input_time.shape[:-2])
batch_size = T.shape_padright(batch_size,1)
# store as 4D tensor with shape: (batch_size,1,width,time)
new_shape = T.cast(T.join(0, batch_size,
T.as_tensor([1,]),
vid_shape), 'int32')
input_4D_time = T.reshape(input_time, new_shape, ndim=4)
# downsample mini-batch of videos in time
op = DownsampleFactorMax((1,ds[0]), ignore_border) # Here the time dimension is downsampled.
outtime = op(input_4D_time)
# output
# restore to original shape (xxx, rows, cols, time)
outshape = T.join(0, input_time.shape[:-2], outtime.shape[-2:])
shufl = (list(range(vid_dim-3)) + [vid_dim-1]+[vid_dim-3]+[vid_dim-2])
#rval = T.reshape(outtime, outshape, ndim=input.ndim).dimshuffle(shufl)
return T.reshape(outtime, outshape, ndim=input.ndim).dimshuffle(shufl)
开发者ID:kli-nlpr,项目名称:Convolutional-Neural-Networks,代码行数:60,代码来源:core.py
示例9: Transform
def Transform(X, w1, g1, b1, w2, g2, b2, downsample_factor=2):
theta = GetTheta(X, w1, g1, b1, w2, g2, b2)
num_batch, num_channels, height, width = X.shape
theta = T.reshape(theta, (-1, 2, 3))
height_f = T.cast(height, 'float32')
width_f = T.cast(width, 'float32')
out_height = T.cast(height_f // downsample_factor, 'int64')
out_width = T.cast(width_f // downsample_factor, 'int64')
grid = Meshgrid(out_height, out_width)
# Transform A x (x_t, y_t, 1)^T -> (x_s, y_s)
T_g = T.dot(theta, grid)
x_s, y_s = T_g[:, 0], T_g[:, 1]
x_s_flat = x_s.flatten()
y_s_flat = y_s.flatten()
# dimshuffle input to (bs, height, width, channels)
input_dim = input.dimshuffle(0, 2, 3, 1)
input_transformed = Interpolate(input_dim, x_s_flat, y_s_flat, downsample_factor)
output = T.reshape(input_transformed,
(num_batch, out_height, out_width, num_channels))
output = output.dimshuffle(0, 3, 1, 2)
return output
开发者ID:xzhang311,项目名称:SpatialTransformLayer_Theano,代码行数:26,代码来源:TransformerLayer.py
示例10: do_fft
def do_fft(input, n_hidden):
fft_input = T.reshape(input, (input.shape[0], 2, n_hidden))
fft_input = fft_input.dimshuffle(0,2,1)
fft_output = cufft(fft_input) * T.sqrt(n_hidden)
fft_output = fft_output.dimshuffle(0,2,1)
output = T.reshape(fft_output, (input.shape[0], 2*n_hidden))
return output
开发者ID:Nehoroshiy,项目名称:urnn,代码行数:7,代码来源:utils.py
示例11: do_ifft
def do_ifft(input, n_hidden):
ifft_input = T.reshape(input, (input.shape[0], 2, n_hidden))
ifft_input = ifft_input.dimshuffle(0,2,1)
ifft_output = cuifft(ifft_input) / T.sqrt(n_hidden)
ifft_output = ifft_output.dimshuffle(0,2,1)
output = T.reshape(ifft_output, (input.shape[0], 2*n_hidden))
return output
开发者ID:Nehoroshiy,项目名称:urnn,代码行数:7,代码来源:utils.py
示例12: unitary_transform
def unitary_transform(input, n_hidden, U):
UR, UI = U[0, :, :], U[1, :, :]
unitary_input = T.reshape(input, (input.shape[0], 2, n_hidden))
IR, II = unitary_input[:, 0, :], unitary_input[:, 1, :]
output = T.stack([IR.dot(UR) - II.dot(UI), IR.dot(UI) + II.dot(UR)], axis=1)
output = T.reshape(output, (input.shape[0], 2*n_hidden))
return output
开发者ID:Nehoroshiy,项目名称:urnn,代码行数:7,代码来源:utils.py
示例13: T_l2_cost_conv
def T_l2_cost_conv(x,a,A,imshp,kshp,mask=True):
"""
xsz*ysz*nchannels, nimages = x.shape
xsz*ysz*nfeat, nimages = a.shape
xsz*ysz*nchannels, nfeat = A.shape
"""
#imshp = num images, channels, szy, szx
#kshp = features, channels, szy, szx
#featshp = num images, features, szy, szx
featshp = (imshp[0],kshp[0],imshp[2] - kshp[2] + 1,imshp[3] - kshp[3] + 1) # num images, features, szy, szx
image = T.reshape(T.transpose(x),imshp)
kernel = T.reshape(T.transpose(A),kshp)
features = T.reshape(T.transpose(a),featshp)
# Need to transpose first two dimensions of kernel, and reverse index kernel image dims (for correlation)
kernel_rotated = T.transpose(kernel[:,:,::-1,::-1],axes=[1,0,2,3])
image_estimate = conv2d(features,kernel_rotated,border_mode='full')
if mask:
image_error_temp = image - image_estimate
image_error = T.zeros_like(image_error_temp)
image_error = T.set_subtensor(image_error[:,:,(kshp[2]-1):(imshp[2]-kshp[2]+1),(kshp[3]-1):(imshp[3]-kshp[3]+1)],
image_error_temp[:,:,(kshp[2]-1):(imshp[2]-kshp[2]+1),(kshp[3]-1):(imshp[3]-kshp[3]+1)])
else:
image_error = image - image_estimate
return .5*T.sum(image_error **2)
开发者ID:mczhu,项目名称:hdl,代码行数:31,代码来源:theano_methods.py
示例14: get_output
def get_output(self, train=False):
X = self.get_input(train)
X = T.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1)).dimshuffle(0, 2, 1, 3)
border_mode = self.border_mode
if on_gpu() and dnn.dnn_available():
if border_mode == 'same':
assert(self.subsample_length == 1)
pad_x = (self.filter_length - self.subsample_length) // 2
conv_out = dnn.dnn_conv(img=X,
kerns=self.W,
border_mode=(pad_x, 0))
else:
conv_out = dnn.dnn_conv(img=X,
kerns=self.W,
border_mode=border_mode,
subsample=self.subsample)
else:
if border_mode == 'same':
assert(self.subsample_length == 1)
border_mode = 'full'
conv_out = T.nnet.conv.conv2d(X, self.W,
border_mode=border_mode,
subsample=self.subsample)
if self.border_mode == 'same':
shift_x = (self.filter_length - 1) // 2
conv_out = conv_out[:, :, shift_x:X.shape[2] + shift_x, :]
output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
output = T.reshape(output, (output.shape[0], output.shape[1], output.shape[2])).dimshuffle(0, 2, 1)
return output
开发者ID:Mofef,项目名称:keras,代码行数:32,代码来源:convolutional.py
示例15: cost
def cost(self):
"""
:param y: shape (time*batch,) -> label
:return: error scalar, known_grads dict
"""
y_f = T.cast(T.reshape(self.y_data_flat, (self.y_data_flat.shape[0] * self.y_data_flat.shape[1]), ndim = 1), 'int32')
known_grads = None
if self.loss == 'sprint':
if not isinstance(self.sprint_opts, dict):
import json
self.sprint_opts = json.loads(self.sprint_opts)
assert isinstance(self.sprint_opts, dict), "you need to specify sprint_opts in the output layer"
if self.exp_normalize:
log_probs = T.log(self.p_y_given_x)
else:
log_probs = self.z
sprint_error_op = SprintErrorSigOp(self.attrs.get("target", "classes"), self.sprint_opts)
err, grad = sprint_error_op(log_probs, T.sum(self.index, axis=0))
err = err.sum()
if self.loss_like_ce:
y_ref = T.clip(self.p_y_given_x - grad, numpy.float32(0), numpy.float32(1))
err = -T.sum(T.log(T.pow(self.p_y_given_x, y_ref)) * T.cast(self.index, "float32").dimshuffle(0, 1, 'x'))
if self.ce_smoothing:
err *= numpy.float32(1.0 - self.ce_smoothing)
grad *= numpy.float32(1.0 - self.ce_smoothing)
if not self.prior_scale: # we kept the softmax bias as it was
nll, pcx = T.nnet.crossentropy_softmax_1hot(x=self.y_m[self.i], y_idx=self.y_data_flat[self.i])
else: # assume that we have subtracted the bias by the log priors beforehand
assert self.log_prior is not None
# In this case, for the CE calculation, we need to add the log priors again.
y_m_prior = T.reshape(self.z + numpy.float32(self.prior_scale) * self.log_prior,
(self.z.shape[0] * self.z.shape[1], self.z.shape[2]), ndim=2)
nll, pcx = T.nnet.crossentropy_softmax_1hot(x=y_m_prior[self.i], y_idx=self.y_data_flat[self.i])
ce = numpy.float32(self.ce_smoothing) * T.sum(nll)
err += ce
grad += T.grad(ce, self.z)
known_grads = {self.z: grad}
return err, known_grads
elif self.loss == 'ctc':
from theano.tensor.extra_ops import cpu_contiguous
err, grad, priors = CTCOp()(self.p_y_given_x, cpu_contiguous(self.y.dimshuffle(1, 0)), self.index_for_ctc())
known_grads = {self.z: grad}
return err.sum(), known_grads, priors.sum(axis=0)
elif self.loss == 'ce_ctc':
y_m = T.reshape(self.z, (self.z.shape[0] * self.z.shape[1], self.z.shape[2]), ndim=2)
p_y_given_x = T.nnet.softmax(y_m)
#pcx = p_y_given_x[(self.i > 0).nonzero(), y_f[(self.i > 0).nonzero()]]
pcx = p_y_given_x[self.i, self.y_data_flat[self.i]]
ce = -T.sum(T.log(pcx))
return ce, known_grads
elif self.loss == 'ctc2':
from NetworkCtcLayer import ctc_cost, uniq_with_lengths, log_sum
max_time = self.z.shape[0]
num_batches = self.z.shape[1]
time_mask = self.index.reshape((max_time, num_batches))
y_batches = self.y_data_flat.reshape((max_time, num_batches))
targets, seq_lens = uniq_with_lengths(y_batches, time_mask)
log_pcx = self.z - log_sum(self.z, axis=0, keepdims=True)
err = ctc_cost(log_pcx, time_mask, targets, seq_lens)
return err, known_grads
开发者ID:chagge,项目名称:returnn,代码行数:60,代码来源:NetworkOutputLayer.py
示例16: compute_f_mu
def compute_f_mu(x, t, params):
[centers, spreads, biases, M, b]=params
diffs=x.dimshuffle(0,1,2,'x')-centers.dimshuffle('x','x',0,1)
scaled_diffs=(diffs**2)*T.exp(spreads).dimshuffle('x','x',0,1)
exp_terms=T.sum(scaled_diffs,axis=2)+biases.dimshuffle('x','x',0)*0.0
h=T.exp(-exp_terms)
sumact=T.sum(h,axis=2)
#Normalization
hnorm=h/sumact.dimshuffle(0,1,'x')
z=T.dot(hnorm,M)
z=T.reshape(z,(t.shape[0],t.shape[1],ntgates,nx))+b.dimshuffle('x','x',0,1) #nt by nb by ntgates by nx
#z=z+T.reshape(x,(t.shape[0],t.shape[1],1,nx))
tpoints=T.cast(T.arange(ntgates),'float32')/T.cast(ntgates-1,'float32')
tpoints=T.reshape(tpoints, (1,1,ntgates))
#tgating=T.exp(T.dot(t,muWT)+mubT) #nt by nb by ntgates
tgating=T.exp(-kT*(tpoints-t)**2)
tgating=tgating/T.reshape(T.sum(tgating, axis=2),(t.shape[0], t.shape[1], 1))
tgating=T.reshape(tgating,(t.shape[0],t.shape[1],ntgates,1))
mult=z*tgating
out=T.sum(mult,axis=2)
#out=out+x
return T.cast(out,'float32')
开发者ID:float650,项目名称:Diffusion-Model,代码行数:27,代码来源:diffusion_model_learn_betafunc.py
示例17: T_l2_cost_conv_dA
def T_l2_cost_conv_dA(x,a,A,imshp,kshp,featshp,stride=(1,1),mask=True):
image_error, kernel, features = helper_T_l2_cost_conv(x=x,a=a,A=A,imshp=imshp,kshp=kshp,featshp=featshp,stride=stride,mask=mask)
if stride == (1,1):
image_error_rot = T.transpose(image_error,[1,0,2,3])[:,:,::-1,::-1]
imshp_rot = (imshp[1],imshp[0],imshp[2],imshp[3])
featshp_rot = (featshp[1],featshp[0],featshp[2],featshp[3])
features_rot = T.transpose(features,[1,0,2,3])
featshp_rot_logical = (featshp_rot[0],
featshp_rot[1],
imshp[2] - kshp[2] + 1,
imshp[3] - kshp[3] + 1)
kernel_grad_rot = -1.*conv2d(image_error_rot,features_rot,
image_shape=imshp_rot,filter_shape=featshp_rot,
imshp_logical=imshp_rot[1:],kshp_logical=featshp_rot_logical[2:])
kernel_grad = T.transpose(kernel_grad_rot,[1,0,2,3])
reshape_kernel_grad = T.transpose(T.reshape(kernel_grad,(kshp[0],kshp[1]*kshp[2]*kshp[3]),ndim=2))
return reshape_kernel_grad
else:
my_conv = MyConv_view(strides=stride,kshp=kshp)
kernel_grad = my_conv(image_error,features)
reshape_kernel_grad = T.transpose(T.reshape(kernel_grad, (kshp[0], kshp[1] * kshp[2] * kshp[3]), ndim=2))
return reshape_kernel_grad
开发者ID:baylabs,项目名称:hdl,代码行数:30,代码来源:theano_methods.py
示例18: max_pool_2d
def max_pool_2d(input, ds, ignore_border=False):
"""
Takes as input a N-D tensor, where N >= 2. It downscales the input image by
the specified factor, by keeping only the maximum value of non-overlapping
patches of size (ds[0],ds[1])
:type input: N-D theano tensor of input images.
:param input: input images. Max pooling will be done over the 2 last dimensions.
:type ds: tuple of length 2
:param ds: factor by which to downscale. (2,2) will halve the image in each dimension.
:param ignore_border: boolean value. When True, (5,5) input with ds=(2,2) will generate a
(2,2) output. (3,3) otherwise.
"""
if input.ndim < 2:
raise NotImplementedError("max_pool_2d requires a dimension >= 2")
# extract image dimensions
img_shape = input.shape[-2:]
# count the number of "leading" dimensions, store as dmatrix
batch_size = tensor.prod(input.shape[:-2])
batch_size = tensor.shape_padright(batch_size, 1)
# store as 4D tensor with shape: (batch_size,1,height,width)
new_shape = tensor.cast(tensor.join(0, batch_size, tensor.as_tensor([1]), img_shape), "int64")
input_4D = tensor.reshape(input, new_shape, ndim=4)
# downsample mini-batch of images
op = DownsampleFactorMax(ds, ignore_border)
output = op(input_4D)
# restore to original shape
outshp = tensor.join(0, input.shape[:-2], output.shape[-2:])
return tensor.reshape(output, outshp, ndim=input.ndim)
开发者ID:igul222,项目名称:Theano,代码行数:34,代码来源:downsample.py
示例19: build
def build(self,output_type):
self.params+=[self.W_hy, self.b_hy,self.W_hi, self.b_hi]
for param in self.params:
self.updates[param] = theano.shared(
value = np.zeros(
param.get_value(
borrow = True).shape,
dtype = theano.config.floatX),
name = 'updates')
### set up regularizer
self.L1 += T.sum(abs(self.W_hy))
self.L2_sqr += T.sum(self.W_hy**2)
### fianl prediction formular
#self.y = T.vector(name = 'y', dtype = 'int32')
self.y_pred = T.dot(self.get_output(), self.W_hy) + self.b_hy
y_p = self.y_pred
y_p_m = T.reshape(y_p, (y_p.shape[0] * y_p.shape[1], -1))
y_p_s = T.nnet.softmax(y_p_m)
self.p_y_given_x = T.reshape(y_p_s, y_p.shape)
self.loss = lambda y: Loss.nll_multiclass(self.p_y_given_x,y)
开发者ID:chuckgu,项目名称:RNN,代码行数:31,代码来源:Models.py
示例20: castray
def castray(ro, rd, shape_params, nprims, width, height):
tmin = 1.0
tmax = 20.0
precis = 0.002
m = -1.0
# There are a sequence of distances, d1, d2, ..., dn
# then theres the accumulated distances d1, d1+d2, d1+d2+d3....
# What we actually want in the output is the sfor each ray the distance to the surface
# So we want something like 0, 20, 25, 27, 28, 28, 28, 28, 28
# OK
max_num_steps = 25
# distcolors = map(ro + rd * 0, width, height) #FIXME, reshape instead of mul by 0
distcolors = mapedit(ro + rd * 0, shape_params, nprims, width, height)
dists = distcolors
steps = T.switch(dists < precis, T.zeros_like(dists), T.ones_like(dists))
accum_dists = T.reshape(dists, (width, height, 1))
for i in range(max_num_steps - 1):
# distcolors = map(ro + rd * accum_dists, width, height) #FIXME, reshape instead of mul by 0
distcolors = mapedit(ro + rd * accum_dists, shape_params, nprims, width, height) #FIXME, reshape instead of mul by 0
dists = distcolors
steps = steps + T.switch(dists < precis, T.zeros_like(dists), T.ones_like(dists))
accum_dists = accum_dists + T.reshape(dists, (width, height, 1))
last_depth = T.reshape(accum_dists, (width, height))
depthmap = T.switch(last_depth < tmax, last_depth / tmax, T.zeros_like(last_depth))
color = 1.0 - steps / float(max_num_steps)
# Distance marched along ray and delta between last two steps
return depthmap
开发者ID:zenna,项目名称:Arrows.jl,代码行数:31,代码来源:iq.py
注:本文中的theano.tensor.reshape函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论