本文整理汇总了Python中theano.tensor.tile函数的典型用法代码示例。如果您正苦于以下问题:Python tile函数的具体用法?Python tile怎么用?Python tile使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了tile函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: fwd
def fwd(self, x, V, A, L):
"""
x : signal
V : eigenvectors
A : area
L : eigenvalues
"""
V = V[:,:self.K]
L = L[:self.K]
L = L.dimshuffle('x','x',0)
rho = T.sqrt(T.sum(A))
# Q x 1 x K, a window for each input function
ghat = self.activation_interp(
T.batched_dot(T.tile(L, [self.nin,1,1]), self.Winterp))
# Q x K x N
V_ = T.tile(V.dimshuffle('x',1,0), [self.nin, 1, 1])
# Q x K x N
tmp = (ghat * V).dimshuffle(0,2,1)
# Q x N x N
transl = rho * T.batched_dot(V_.dimshuffle(0,2,1), tmp)
transl = A.dimshuffle('x',0,'x') * transl
# Q x K x N
tmp = (V.dimshuffle(0,'x',1) * x.dimshuffle(0,1,'x')).dimshuffle(1,2,0)
# Q x K x N
desc = rho * T.batched_dot(tmp, transl)
desc = T.abs_(desc)
desc = desc.dimshuffle(2,0,'x',1) # BC01 format : N x Q x 1 x K
return self.activation(theano.tensor.nnet.conv.conv2d(desc, self.W).flatten(2) + self.b)
开发者ID:jonathanmasci,项目名称:ShapeNet,代码行数:34,代码来源:layers_lscnn.py
示例2: setup_generate
def setup_generate(self):
# dimensions: (batch, time, 12)
chord_types = T.btensor3()
# dimensions: (batch, time)
chord_roots = T.imatrix()
n_batch, n_time = chord_roots.shape
specs = [lstmstack.prepare_sample_scan( start_pos=T.alloc(np.array(encoding.STARTING_POSITION, np.int32), (n_batch)),
start_out=T.tile(encoding.initial_encoded_form(), (n_batch,1)),
timestep=T.tile(T.arange(n_time), (n_batch,1)),
cur_chord_type=chord_types,
cur_chord_root=chord_roots,
deterministic_dropout=True )
for lstmstack, encoding in zip(self.lstmstacks, self.encodings)]
updates, all_chosen, all_probs, indiv_probs = helper_generate_from_spec(specs, self.lstmstacks, self.encodings, self.srng, n_batch, n_time, self.bounds, self.normalize_artic_only)
self.generate_fun = theano.function(
inputs=[chord_roots, chord_types],
updates=updates,
outputs=all_chosen,
allow_input_downcast=True,
mode=(NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True) if self.nanguard else None))
self.generate_visualize_fun = theano.function(
inputs=[chord_roots, chord_types],
updates=updates,
outputs=[all_chosen, all_probs] + indiv_probs,
allow_input_downcast=True,
mode=(NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True) if self.nanguard else None))
开发者ID:Impro-Visor,项目名称:lstmprovisor-python,代码行数:33,代码来源:product_model.py
示例3: make_gaussian_filter
def make_gaussian_filter(self):
W_shape = self.get_W_shape()
k = self.filter_size[0]
k_low = int(np.floor(-(k-1)/2))
k_high = k_low+k
W_std = T.exp(self.W_logstd)
std_array = T.tile(
W_std.dimshuffle('x', 0, 'x'),
(self.num_input_channels, 1, k)
)
x = np.arange(k_low, k_high).reshape((1, 1, -1))
x = T.tile(
x, (self.num_input_channels, self.num_input_channels, 1)
).astype(floatX)
p1 = (1./(np.sqrt(2.*np.pi))).astype(floatX)
p2 = np.asarray(2., dtype=floatX)
gf = (p1/std_array)*T.exp(-x**2/(p2*(std_array**2)))
# gf = gf.astype(theano.config.floatX)
mask = np.zeros(W_shape)
rg = np.arange(self.num_input_channels)
mask[rg, rg, :] = 1
mask = mask.astype(floatX)
gf = gf*mask
return gf
开发者ID:tweihaha,项目名称:aed-by-cnn,代码行数:30,代码来源:layers.py
示例4: output_probabilistic
def output_probabilistic(self, m_x, v_x):
m_linear = T.dot(m_x, self.m_W[ 0, :, : ]) + T.tile(self.m_b[ 0, :, : ], [ m_x.shape[ 0 ], 1 ])
v_linear = T.dot(m_x**2, self.v_W[ 0, :, : ]) + T.dot(v_x, self.m_W[ 0, :, : ]**2) + T.dot(v_x, self.v_W[ 0, :, : ]) + \
T.tile(self.v_b[ 0, :, : ], [ m_x.shape[ 0 ], 1 ])
if not self.output_layer:
# We compute the mean and variance after the ReLU activation
alpha = m_linear / T.sqrt(v_linear)
gamma = Network_layer.gamma(-alpha)
gamma_robust = -alpha - 1.0 / alpha + 2.0 / alpha**3
gamma_final = T.switch(T.lt(-alpha, T.fill(alpha, 30)), gamma, gamma_robust)
v_aux = m_linear + T.sqrt(v_linear) * gamma_final
m_a = Network_layer.n_cdf(alpha) * v_aux
v_a = m_a * v_aux * Network_layer.n_cdf(-alpha) + Network_layer.n_cdf(alpha) * v_linear * (1 - gamma_final * (gamma_final + alpha))
return (m_a, v_a)
else:
return (m_linear, v_linear)
开发者ID:Riashat,项目名称:Active-Learning-Bayesian-Convolutional-Neural-Networks,代码行数:25,代码来源:network_layer.py
示例5: apply
def apply(self, v):
[h_vals, _], _ = theano.scan(fn=self.step,
sequences = v,
outputs_info = [T.tile(self.h0, (v.shape[1], 1)),
T.tile(self.c0, (v.shape[1], 1))]
)
return h_vals
开发者ID:briancheung,项目名称:Peano,代码行数:7,代码来源:rnnet.py
示例6: lcn_3d_input
def lcn_3d_input(data, kernel_shape, n_maps):
"""
:param data: [examples, depth, filters, height, width]
:param kernel_shape: int
:param n_maps: int
:return: new_x: [examples, depth, filters, height, width]
"""
# create symbolic variable for the input data
ftensor5 = T.TensorType('float32', [False] * 5)
x = ftensor5()
# # determine the number of maps
# n_maps = data.shape[2]
# create 3d filter that spans across all channels / feature maps
# todo: kernel is not really in 3d; need 3d implementation instead of 2d repeated across third dimension
# todo: alternative is to keep 2d kernel and extend short range given data size in z-plane; change first kernel_sh.
filter_shape = (1, kernel_shape[0], n_maps, kernel_shape[1], kernel_shape[2])
filters = np.resize(gaussian_filter(kernel_shape[1]), filter_shape)
filters = filters / np.sum(filters)
filters = sharedX(filters)
# convolve filter with input signal
convolution_out = conv3d(
signals=x,
filters=filters,
signals_shape=data.shape,
filters_shape=filter_shape,
border_mode='valid'
)
# for each pixel, remove mean of 9x9 neighborhood
mid_0 = int(np.floor(kernel_shape[0] / 2.))
mid_1 = int(np.floor(kernel_shape[1] / 2.))
mid_2 = int(np.floor(kernel_shape[2] / 2.))
mean = T.tile(convolution_out, (1, 1, n_maps, 1, 1))
padded_mean = T.zeros_like(x)
padded_mean = T.set_subtensor(padded_mean[:, mid_0:-mid_0, :, mid_1:-mid_1, mid_2:-mid_2], mean)
centered_data = data - padded_mean
# scale down norm of 9x9 patch if norm is bigger than 1
sum_sqr_xx = conv3d(signals=T.sqr(data), filters=filters)
denominator = T.tile(T.sqrt(sum_sqr_xx), (1, 1, n_maps, 1, 1))
padded_denominator = T.ones_like(x)
padded_denominator = T.set_subtensor(
padded_denominator[:, mid_0:-mid_0, :, mid_1:-mid_1, mid_2:-mid_2], denominator
)
per_img_mean = padded_denominator.mean(axis=[1, 2, 3, 4])
divisor = T.largest(
per_img_mean.dimshuffle(0, 'x', 'x', 'x', 'x'),
padded_denominator
)
new_x = centered_data / T.maximum(1., divisor)
# compile theano function
f = theano.function([x], new_x)
return f(data)
开发者ID:dlacombejr,项目名称:Research,代码行数:60,代码来源:scaling.py
示例7: est_log_part_fun
def est_log_part_fun(self):
# init first visible data
v_mean = T.nnet.softmax(self.base_vbias)[0]
v_mean_rep = T.tile(v_mean, (self.numruns,)).reshape((self.numruns, self.model.num_vis))
D = T.tile(T.sum(self.base_vbias, axis=0).dimshuffle('x'), (self.numruns,))
v_samples, updates = theano.scan(fn=self.multinom_sampler,non_sequences=[v_mean_rep, D], n_steps=10)
v = v_samples[-1]
# init logw with beta = 0
logw = - self.log_p_k(v, 0., D)
[logw_list, vs, Ds], updates = theano.scan(self.ais_step, sequences = self.betas[1:], outputs_info = [logw, v, None])
logw = logw_list[-1]
v = vs[-1]
D = Ds[-1]
logw += self.log_p_k(v, 1, D)
r = logsum(logw) - T.log(self.numruns)
log_z_base = T.sum(T.log(1+T.exp(self.base_vbias))) + (self.model.num_hid)*T.log(2)
log_z_est = r + log_z_base
perform_fun = theano.function([], log_z_est, updates=updates)
return perform_fun()
开发者ID:alexeyche,项目名称:alexeyche-junk,代码行数:26,代码来源:ais.py
示例8: recurrence
def recurrence(x_t, h_tm1, c_tm1):
i = T.nnet.sigmoid(T.dot(x_t, self.wi) + T.dot(h_tm1, self.wih) + self.bi) # input gate
c_proposed = T.tanh(T.dot(x_t, self.wc) + T.dot(h_tm1, self.wch) + self.bc) # proposed memory cell content
f = T.nnet.sigmoid(T.dot(x_t, self.wf) + T.dot(h_tm1, self.wfh) + self.bf) # forget gate
c_t = (T.tile(i, self.memory_size) * c_proposed) + (T.tile(f, self.memory_size) * c_tm1) # new memory cell content
o = T.nnet.sigmoid(T.dot(x_t, self.wo) + T.dot(h_tm1, self.woh) + self.bo) # output gate
h_t = T.tile(o, self.memory_size) * T.tanh(c_t)
return [h_t, c_t]
开发者ID:gokererdogan,项目名称:DeepLearning,代码行数:8,代码来源:lstm.py
示例9: setup_generate
def setup_generate(self):
# dimensions: (batch, time, 12)
chord_types = T.btensor3()
# dimensions: (batch, time)
chord_roots = T.imatrix()
n_batch, n_time = chord_roots.shape
spec = self.lstmstack.prepare_sample_scan( start_pos=T.alloc(np.array(self.encoding.STARTING_POSITION, np.int32), (n_batch)),
start_out=T.tile(self.encoding.initial_encoded_form(), (n_batch,1)),
timestep=T.tile(T.arange(n_time), (n_batch,1)),
cur_chord_type=chord_types,
cur_chord_root=chord_roots,
deterministic_dropout=True )
def _scan_fn(*inputs):
# inputs is [ spec_sequences..., last_absolute_position, spec_taps..., spec_non_sequences... ]
inputs = list(inputs)
last_absolute_chosen = inputs.pop(len(spec.sequences))
scan_rout = self.lstmstack.sample_scan_routine(spec, *inputs)
last_rel_pos, last_out, cur_kwargs = scan_rout.send(None)
new_pos = self.encoding.get_new_relative_position(last_absolute_chosen, last_rel_pos, last_out, self.bounds.lowbound, self.bounds.highbound, **cur_kwargs)
addtl_kwargs = {
"last_output": last_out
}
out_activations = scan_rout.send((new_pos, addtl_kwargs))
out_probs = self.encoding.decode_to_probs(out_activations,new_pos,self.bounds.lowbound, self.bounds.highbound)
sampled_note = Encoding.sample_absolute_probs(self.srng, out_probs)
encoded_output = self.encoding.note_to_encoding(sampled_note, new_pos, self.bounds.lowbound, self.bounds.highbound)
scan_outputs = scan_rout.send(encoded_output)
scan_rout.close()
return [sampled_note, out_probs] + scan_outputs
outputs_info = [{"initial":T.zeros((n_batch,),'int32'), "taps":[-1]}, None] + spec.outputs_info
result, updates = theano.scan(fn=_scan_fn, sequences=spec.sequences, non_sequences=spec.non_sequences, outputs_info=outputs_info)
all_chosen = result[0].dimshuffle((1,0))
all_probs = result[1].dimshuffle((1,0,2))
self.generate_fun = theano.function(
inputs=[chord_roots, chord_types],
updates=updates,
outputs=all_chosen,
allow_input_downcast=True,
mode=(NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True) if self.nanguard else None))
self.generate_visualize_fun = theano.function(
inputs=[chord_roots, chord_types],
updates=updates,
outputs=[all_chosen, all_probs],
allow_input_downcast=True,
mode=(NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True) if self.nanguard else None))
开发者ID:Impro-Visor,项目名称:lstmprovisor-python,代码行数:57,代码来源:simple_rel_model.py
示例10: mmd_full
def mmd_full(x_t, y_t, alpha=0.5):
""" Implementation of the full kernel MMD statistic (gaussian kernel)"""
N = x_t.shape[1]
M = y_t.shape[1]
term1 = T.mean(T.exp(-0.5 * (1 / alpha) * T.square(T.repeat(x_t, N) - T.tile(x_t, N))))
term2 = T.mean(T.exp(-0.5 * (1 / alpha) * T.square(T.repeat(x_t, M) - T.tile(y_t, N))))
term3 = T.mean(T.exp(-0.5 * (1 / alpha) * T.square(T.repeat(y_t, M) - T.tile(y_t, M))))
return term1 - 2 * term2 + term3
开发者ID:JonnyTran,项目名称:ML-algorithms,代码行数:9,代码来源:mmd.py
示例11: IRNN
def IRNN(n_input, n_hidden, n_output, input_type='real', out_every_t=False, loss_function='CE'):
np.random.seed(1234)
rng = np.random.RandomState(1234)
x, y = initialize_data_nodes(loss_function, input_type, out_every_t)
inputs = [x, y]
h_0 = theano.shared(np.zeros((1, n_hidden), dtype=theano.config.floatX))
V = initialize_matrix(n_input, n_hidden, 'V', rng)
W = theano.shared(np.identity(n_hidden, dtype=theano.config.floatX))
out_mat = initialize_matrix(n_hidden, n_output, 'out_mat', rng)
hidden_bias = theano.shared(np.zeros((n_hidden,), dtype=theano.config.floatX))
out_bias = theano.shared(np.zeros((n_output,), dtype=theano.config.floatX))
parameters = [h_0, V, W, out_mat, hidden_bias, out_bias]
def recurrence(x_t, y_t, h_prev, cost_prev, acc_prev, V, W, hidden_bias, out_mat, out_bias):
if loss_function == 'CE':
data_lin_output = V[x_t]
else:
data_lin_output = T.dot(x_t, V)
h_t = T.nnet.relu(T.dot(h_prev, W) + data_lin_output + hidden_bias.dimshuffle('x', 0))
if out_every_t:
lin_output = T.dot(h_t, out_mat) + out_bias.dimshuffle('x', 0)
cost_t, acc_t = compute_cost_t(lin_output, loss_function, y_t)
else:
cost_t = theano.shared(NP_FLOAT(0.0))
acc_t = theano.shared(NP_FLOAT(0.0))
return h_t, cost_t, acc_t
non_sequences = [V, W, hidden_bias, out_mat, out_bias]
h_0_batch = T.tile(h_0, [x.shape[1], 1])
if out_every_t:
sequences = [x, y]
else:
sequences = [x, T.tile(theano.shared(np.zeros((1,1), dtype=theano.config.floatX)), [x.shape[0], 1, 1])]
outputs_info = [h_0_batch, theano.shared(NP_FLOAT(0.0)), theano.shared(NP_FLOAT(0.0))]
[hidden_states, cost_steps, acc_steps], updates = theano.scan(fn=recurrence,
sequences=sequences,
non_sequences=non_sequences,
outputs_info = outputs_info)
if not out_every_t:
lin_output = T.dot(hidden_states[-1,:,:], out_mat) + out_bias.dimshuffle('x', 0)
costs = compute_cost_t(lin_output, loss_function, y)
else:
cost = cost_steps.mean()
accuracy = acc_steps.mean()
costs = [cost, accuracy]
return inputs, parameters, costs
开发者ID:Nehoroshiy,项目名称:urnn,代码行数:57,代码来源:irnn.py
示例12: weighted_binary_cross_entropy_4
def weighted_binary_cross_entropy_4(pred, target, class_normalization):
# Mix of 0 and 2
# From theano
DIM = pred.shape[1]
BATCH_SIZE = pred.shape[0]
N_on_per_batch = (T.transpose(T.tile(target.sum(axis=1), (DIM, 1))) + 1)
N_off_per_batch = (T.transpose(T.tile((1-target).sum(axis=1), (DIM, 1))) + 1)
class_norm_tile = T.tile(class_normalization, (BATCH_SIZE, 1))
return -(class_norm_tile * target * T.log(pred) / N_on_per_batch + (1.0 - target) * T.log(1.0 - pred) / N_off_per_batch)
开发者ID:aciditeam,项目名称:acidano,代码行数:9,代码来源:cost.py
示例13: get_input_vectors
def get_input_vectors(shape, phases, scaling, offset):
x = T.repeat(offset[0] + T.arange(shape[0]) / scaling, shape[1] * phases).reshape(
(shape[0], shape[1], phases)) * T.pow(2, T.arange(phases))
y = T.repeat(T.tile(offset[1] + T.arange(shape[1]) / scaling, shape[0]).reshape(
(shape[0], shape[1], 1)), phases, axis=2) * T.pow(2, T.arange(phases))
z = T.tile(offset[2] + 10 * T.arange(phases), shape[0] * shape[1]).reshape((shape[0], shape[1], phases, 1))
x = x.reshape((shape[0], shape[1], phases, 1))
y = y.reshape((shape[0], shape[1], phases, 1))
return T.concatenate([x, y, z], axis=3).reshape((shape[0] * shape[1] * phases, 3)).astype('float32')
开发者ID:pinae,项目名称:simplexnoise,代码行数:9,代码来源:theano-simplex-matrix.py
示例14: initial_states
def initial_states(self, batch_size, *args, **kwargs):
states_dict = self.fst.expand({self.fst.fst.start: 0.0})
states = tensor.as_tensor_variable(
self.transition.pad(states_dict.keys(), NOT_STATE))
states = tensor.tile(states[None, :], (batch_size, 1))
weights = tensor.as_tensor_variable(
self.transition.pad(states_dict.values(), 0))
weights = tensor.tile(weights[None, :], (batch_size, 1))
add = self.probability_computer(states, weights)
return states, weights, add
开发者ID:DingKe,项目名称:attention-lvcsr,代码行数:10,代码来源:language_models.py
示例15: __init__
def __init__(self, rng, input, num_filters, input_shape):
self.K = num_filters
self.N = input_shape[2] * input_shape[3]
self.D = input_shape[1]
self.B = input_shape[0]
self.input = input
filter_shape = (self.K, self.D, 1, 1)
fan_in = numpy.prod(filter_shape[1:])
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]))
W_bound = numpy.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(
numpy.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX
),
borrow=True
)
b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
c_bound = numpy.sqrt(1. / (self.K * self.D))
self.c = theano.shared(
numpy.asarray(
rng.uniform(low=-c_bound, high=c_bound, size=(self.K, self.D)),
dtype=theano.config.floatX
),
borrow=True
)
conved = conv2d(input, self.W,
input_shape=input_shape,
filter_shape=filter_shape)
conved = conved + self.b.dimshuffle('x', 0, 'x', 'x')
conved = conved.reshape((self.B, self.K, self.N))
a = self.softmax3d(conved)
x = input.reshape((self.B, self.D, self.N))
v = theano.shared(numpy.zeros((self.B, self.K, self.D), dtype=theano.config.floatX))
for k in range(self.K):
ar = T.tile(a[:,k], (1,self.D)).reshape((self.B, self.D, self.N))
cr = T.tile(self.c[k].reshape((1,self.D,1)), (self.B, 1, self.N))
vr = (ar*(x+cr)).sum(2)
g = T.sqrt((vr**2).sum(1)) # add eps?
v = T.set_subtensor(v[:,k,:], vr/T.tile(g.reshape((self.B, 1)), (1, self.D)))
# v = v/T.sqrt((v**2).sum()) # whole normalize
self.output = v
self.params = [self.W, self.b, self.c]
开发者ID:forwchen,项目名称:vlad,代码行数:54,代码来源:layers.py
示例16: _loopoverallball
def _loopoverallball(self, ballid,batchid):
ox=self.middle[batchid][ballid*2].reshape((1,1))
print "ox:",ox.ndim
x=T.tile(ox,(self.height,self.width))
oy=self.middle[batchid][ballid*2+1].reshape((1,1))
y=T.tile(oy,(self.height,self.width))
w=T.tile(T.arange(0,self.width),(self.height,)).reshape((self.height,self.width))
h=T.tile(T.arange(0,self.height).reshape((self.height,1)),(1,self.width))
cof=(T.pow(x-w,2)+T.pow(y-h,2))*(-1.0/self.sigma)
print T.exp(cof).ndim
return T.exp(cof)
开发者ID:caomw,项目名称:CNNHandPoseEstimationTotal,代码行数:11,代码来源:customtest.py
示例17: get_output_for
def get_output_for(self, input, get_details=False, **kwargs):
input = input.dimshuffle(1, 0, 2)
def step(x_t, M_tm1, h_tm1, state_tm1, ww_tm1, wr_tm1, *params):
# Update the memory (using w_tm1 of the writing heads & M_tm1)
M_t = self.write_heads.write(h_tm1, ww_tm1, M_tm1)
# Get the read vector (using w_tm1 of the reading heads & M_t)
r_t = self.read_heads.read(wr_tm1, M_t)
# Apply the controller (using x_t, r_t & the requirements for the controller)
h_t, state_t = self.controller.step(x_t, r_t, h_tm1, state_tm1)
# Update the weights (using h_t, M_t & w_tm1)
ww_t = self.write_heads.get_weights(h_t, ww_tm1, M_t)
wr_t = self.read_heads.get_weights(h_t, wr_tm1, M_t)
return [M_t, h_t, state_t, ww_t, wr_t]
memory_init = T.tile(self.memory.memory_init, (input.shape[1], 1, 1))
memory_init = T.unbroadcast(memory_init, 0)
write_weights_init = T.tile(self.write_heads.weights_init, (input.shape[1], 1, 1))
write_weights_init = T.unbroadcast(write_weights_init, 0)
read_weights_init = T.tile(self.read_heads.weights_init, (input.shape[1], 1, 1))
read_weights_init = T.unbroadcast(read_weights_init, 0)
non_seqs = self.controller.get_params() + self.memory.get_params() + \
self.write_heads.get_params() + self.read_heads.get_params()
hids, _ = theano.scan(
fn=step,
sequences=input,
outputs_info=[memory_init] + self.controller.outputs_info(input.shape[1]) + \
[write_weights_init, read_weights_init],
non_sequences=non_seqs,
strict=True)
# dimshuffle back to (n_batch, n_time_steps, n_features)
if get_details:
hid_out = [
hids[0].dimshuffle(1, 0, 2, 3),
hids[1].dimshuffle(1, 0, 2),
hids[2].dimshuffle(1, 0, 2),
hids[3].dimshuffle(1, 0, 2, 3),
hids[4].dimshuffle(1, 0, 2, 3)]
else:
if self.only_return_final:
hid_out = hids[1][-1]
else:
hid_out = hids[1].dimshuffle(1, 0, 2)
return hid_out
开发者ID:cequencer,项目名称:ntm-lasagne,代码行数:54,代码来源:layers.py
示例18: log_f_hat
def log_f_hat(self):
v_W = 1.0 / (1.0 / self.N * (1.0 / self.v_W - 1.0 / self.v_prior))
m_W = 1.0 / self.N * self.m_W / self.v_W * v_W
v_b = 1.0 / (1.0 / self.N * (1.0 / self.v_b - 1.0 / self.v_prior))
m_b = 1.0 / self.N * self.m_b / self.v_b * v_b
log_f_hat_W = T.sum(-0.5 * T.tile(1.0 / v_W, [ self.n_samples, 1, 1 ]) * self.W**2 + \
T.tile(m_W / v_W, [ self.n_samples, 1, 1 ]) * self.W, axis = [ 1, 2 ], keepdims = True)[ :, :, 0 ]
log_f_hat_b = T.sum(-0.5 * T.tile(1.0 / v_b, [ self.n_samples, 1, 1 ]) * self.b**2 + \
T.tile(m_b / v_b, [ self.n_samples, 1, 1 ]) * self.b, axis = [ 1, 2 ], keepdims = True)[ :, :, 0 ]
return log_f_hat_W + log_f_hat_b
开发者ID:Riashat,项目名称:Active-Learning-Bayesian-Convolutional-Neural-Networks,代码行数:13,代码来源:network_layer.py
示例19: getKMeansLoss
def getKMeansLoss(self, latent_space_expression, soft_assignments, t_cluster_centers, num_clusters, latent_space_dim, num_samples, soft_loss=False):
# Kmeans loss = weighted sum of latent space representation of inputs from the cluster centers
z = latent_space_expression.reshape((num_samples, 1, latent_space_dim))
z = T.tile(z, (1, num_clusters, 1))
u = t_cluster_centers.reshape((1, num_clusters, latent_space_dim))
u = T.tile(u, (num_samples, 1, 1))
distances = (z - u).norm(2, axis=2).reshape((num_samples, num_clusters))
if soft_loss:
weighted_distances = distances * soft_assignments
loss = weighted_distances.sum(axis=1).mean()
else:
loss = distances.min(axis=1).mean()
return loss
开发者ID:XJTUeducation,项目名称:Clustering-with-Deep-learning,代码行数:13,代码来源:network.py
示例20: update_sample_weights
def update_sample_weights(self):
# We update the mean and variances of q
self.v_W = self.v_prior * self.logistic(self.log_var_param_W)
self.m_W = self.mean_param_W
self.v_b = self.v_prior * self.logistic(self.log_var_param_b)
self.m_b = self.mean_param_b
# We update the random samples for the network weights
self.W = self.randomness_W * T.tile(T.sqrt(self.v_W), [ self.n_samples, 1, 1 ]) + T.tile(self.m_W, [ self.n_samples, 1, 1 ])
self.b = self.randomness_b * T.tile(T.sqrt(self.v_b), [ self.n_samples, 1, 1 ]) + T.tile(self.m_b, [ self.n_samples, 1, 1 ])
开发者ID:Riashat,项目名称:Active-Learning-Bayesian-Convolutional-Neural-Networks,代码行数:14,代码来源:network_layer.py
注:本文中的theano.tensor.tile函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论