本文整理汇总了Python中theano.tensor.switch函数的典型用法代码示例。如果您正苦于以下问题:Python switch函数的具体用法?Python switch怎么用?Python switch使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了switch函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: mindist
def mindist(translate, min_so_far, ro, rd):
# ro: 3
# transalate: nbatch * 3
# min_so_far: nbatch * width * height
# rd: width * height * 3
ro = ro + translate
# d_o = T.dot(rd, ro) # 640, 480
# d_o = dotty(rd, ro, axis=1)
d_o = T.tensordot(rd, ro, axes=[2,1])
o_o = T.sum(ro**2,axis=1)
b = 2*d_o
c = o_o - 0.001 #FIXME, remove this squaring
inner = b **2 - 4 * c # 640 480
does_not_intersect = inner < 0.0
minus_b = -b
# sqrt_inner = T.sqrt(T.maximum(0.0001, inner))
eps = 1e-9
background_dist = 10.0
sqrt_inner = T.sqrt(T.maximum(eps, inner))
root1 = (minus_b - sqrt_inner)/2.0
root2 = (minus_b + sqrt_inner)/2.0
depth = T.switch(does_not_intersect, background_dist,
T.switch(root1 > 0, root1,
T.switch(root2 > 0, root2, background_dist)))
return T.min([min_so_far, depth], axis=0)
开发者ID:zenna,项目名称:ig,代码行数:25,代码来源:render.py
示例2: s_logprior
def s_logprior(self, s_params, strength=10.0):
# -- I don't know what distribution this would be
# but I think it makes a nice shape
s_alpha, s_cond_x, s_cond_y = self.unpack(s_params)
n_alpha_min = self._alpha_from_l(self._lenscales_min)
n_alpha_max = self._alpha_from_l(self._lenscales_max)
#return strength * (alpha - alpha_min) ** 2
log0 = -10000
width = n_alpha_max - n_alpha_min
#alpha_mean = 0.5 * (alpha_max + alpha_min)
energy = strength * 0.5 * (s_alpha - n_alpha_max) ** 2 / width ** 2
lenscale_logprior = TT.switch(s_alpha < n_alpha_min,
log0,
TT.switch(s_alpha < n_alpha_max,
-energy,
log0)).sum()
if self._conditional:
diff_x = s_cond_x
diff_y = s_cond_y - 1
rval = (lenscale_logprior
+ TT.dot(diff_x, diff_x)
+ TT.dot(diff_y, diff_y))
else:
rval = lenscale_logprior
assert rval.ndim == 0
return rval
开发者ID:gopal-m,项目名称:hyperopt-gpsmbo,代码行数:26,代码来源:prodkernels.py
示例3: backward_V_step
def backward_V_step(rewards,is_alive,next_Vpred,time_i,
next_Vref,
*args):
propagated_Vref = T.switch(is_alive,
rewards + gamma_or_gammas * next_Vref, #assumes optimal next action
0.
)
if n_steps is None:
this_Vref = propagated_Vref
else:
Vref_at_tmax = T.switch(is_alive,
rewards + gamma_or_gammas *next_Vpred,
0.
)
this_Vref = T.switch(T.eq(time_i % n_steps,0), #if Tmax
Vref_at_tmax, #use special case values
propagated_Vref #else use generic ones
)
return this_Vref
开发者ID:louiekang,项目名称:AgentNet,代码行数:30,代码来源:a2c_n_step.py
示例4: T_subspacel1_slow_shrinkage_conv
def T_subspacel1_slow_shrinkage_conv(a, L, lam_sparse, lam_slow, imshp,kshp,featshp,stride=(1,1),small_value=.001):
featshp = (imshp[0],kshp[0],featshp[2],featshp[3]) # num images, features, szy, szx
features = T.reshape(T.transpose(a),featshp,ndim=4)
amp = T.sqrt(features[:,::2,:,:]**2 + features[:,1::2,:,:]**2 + small_value)
#damp = amp[:,1:] - amp[:,:-1]
# compose slow shrinkage with subspace l1 shrinkage
# slow shrinkage
div = T.zeros_like(amp)
d1 = amp[1:,:,:,:] - amp[:-1,:,:,:]
d2 = d1[1:,:,:,:] - d1[:-1,:,:,:]
div = T.set_subtensor(div[1:-1,:,:,:], -d2)
div = T.set_subtensor(div[0,:,:,:], -d1[0,:,:,:])
div = T.set_subtensor(div[-1,:,:,:], d1[-1,:,:,:])
slow_amp_shrinkage = 1 - (lam_slow / L) * (div / amp)
slow_amp_value = T.switch(T.gt(slow_amp_shrinkage, 0), slow_amp_shrinkage, 0)
slow_shrinkage_prox_a = slow_amp_value * features[:, ::2, :,:]
slow_shrinkage_prox_b = slow_amp_value * features[:,1::2, :,:]
# subspace l1 shrinkage
amp_slow_shrinkage_prox = T.sqrt(slow_shrinkage_prox_a ** 2 + slow_shrinkage_prox_b ** 2)
#amp_shrinkage = 1. - (lam_slow*lam_sparse/L)*amp_slow_shrinkage_prox
amp_shrinkage = 1. - (lam_sparse / L) / amp_slow_shrinkage_prox
amp_value = T.switch(T.gt(amp_shrinkage, 0.), amp_shrinkage, 0.)
subspacel1_prox = T.zeros_like(features)
subspacel1_prox = T.set_subtensor(subspacel1_prox[:, ::2, :,:], amp_value * slow_shrinkage_prox_a)
subspacel1_prox = T.set_subtensor(subspacel1_prox[:,1::2, :,:], amp_value * slow_shrinkage_prox_b)
reshape_subspacel1_prox = T.transpose(T.reshape(subspacel1_prox,(featshp[0],featshp[1]*featshp[2]*featshp[3]),ndim=2))
return reshape_subspacel1_prox
开发者ID:baylabs,项目名称:hdl,代码行数:32,代码来源:theano_methods.py
示例5: create_cost_fun
def create_cost_fun (self):
# create a cost function that
# takes each prediction at every timestep
# and guesses next timestep's value:
what_to_predict = self.input_mat[:, 1:]
# because some sentences are shorter, we
# place masks where the sentences end:
# (for how long is zero indexed, e.g. an example going from `[2,3)`)
# has this value set 0 (here we substract by 1):
for_how_long = self.for_how_long - 1
# all sentences start at T=0:
starting_when = T.zeros_like(self.for_how_long)
self.lstm_cost = masked_loss(self.lstm_predictions,
what_to_predict,
for_how_long,
starting_when).sum()
zero_entropy = T.zeros_like(self.entropy)
real_entropy = T.switch(self.mask_matrix,self.entropy,zero_entropy)
zero_key_entropy = T.zeros_like(self.key_entropy)
real_key_entropy = T.switch(self.mask_matrix,self.key_entropy,zero_key_entropy)
self.final_cost = masked_loss(self.final_predictions,
what_to_predict,
for_how_long,
starting_when).sum()+self.entropy_reg*real_entropy.sum()+self.key_entropy_reg*real_key_entropy.sum()
开发者ID:darongliu,项目名称:Lstm_Turing_LM,代码行数:28,代码来源:lm_v4.py
示例6: out_shape
def out_shape(imgshape, ds, ignore_border=False):
"""Return the shape of the output from this op, for input of given shape and flags.
:param imgshape: the shape of a tensor of images. The last two elements are interpreted
as the number of rows, and the number of cols.
:type imgshape: tuple, list, or similar of integer or
scalar Theano variable.
:param ds: downsample factor over rows and columns
:type ds: list or tuple of two ints
:param ignore_border: if ds doesn't divide imgshape, do we include an extra row/col of
partial downsampling (False) or ignore it (True).
:type ignore_border: bool
:rtype: list
:returns: the shape of the output from this op, for input of given shape. This will
have the same length as imgshape, but with last two elements reduced as per the
downsampling & ignore_border flags.
"""
if len(imgshape) < 2:
raise TypeError("imgshape must have at least two elements (rows, cols)")
r, c = imgshape[-2:]
rval = list(imgshape[:-2]) + [r // ds[0], c // ds[1]]
if not ignore_border:
if isinstance(r, theano.Variable):
rval[-2] = tensor.switch(r % ds[0], rval[-2] + 1, rval[-2])
elif r % ds[0]:
rval[-2] += 1
if isinstance(c, theano.Variable):
rval[-1] = tensor.switch(c % ds[1], rval[-1] + 1, rval[-1])
elif c % ds[1]:
rval[-1] += 1
return rval
开发者ID:igul222,项目名称:Theano,代码行数:35,代码来源:downsample.py
示例7: mcmc
def mcmc(ll, *frvs):
full_observations = dict(observations)
full_observations.update(dict([(rv, s) for rv, s in zip(free_RVs, frvs)]))
loglik = -full_log_likelihood(full_observations)
proposals = free_RVs_prop
H = tensor.add(*[tensor.sum(tensor.sqr(p)) for p in proposals])/2. + loglik
# -- this should be an inner loop
g = []
g.append(tensor.grad(loglik, frvs))
proposals = [(p - epsilon*gg[0]/2.) for p, gg in zip(proposals, g)]
rvsp = [(rvs + epsilon*rvp) for rvs,rvp in zip(frvs, proposals)]
full_observations = dict(observations)
full_observations.update(dict([(rv, s) for rv, s in zip(free_RVs, rvsp)]))
new_loglik = -full_log_likelihood(full_observations)
gnew = []
gnew.append(tensor.grad(new_loglik, rvsp))
proposals = [(p - epsilon*gn[0]/2.) for p, gn in zip(proposals, gnew)]
# --
Hnew = tensor.add(*[tensor.sum(tensor.sqr(p)) for p in proposals])/2. + new_loglik
dH = Hnew - H
accept = tensor.or_(dH < 0., U < tensor.exp(-dH))
return [tensor.switch(accept, -new_loglik, ll)] + \
[tensor.switch(accept, p, f) for p, f in zip(rvsp, frvs)], \
{}, theano.scan_module.until(accept)
开发者ID:helson73,项目名称:MonteTheano,代码行数:34,代码来源:sample.py
示例8: _activation
def _activation(self, Y, L, M, W):
"""Returns the activation for a given input.
Derived from the generative model formulation of hierarchical
Poisson mixtures, the formular for the activation in the network
reads as follows:
I_c =
\sum_d \log(W_{cd})y_d + \log(M_{lc}) for labeled data
\sum_d \log(W_{cd})y_d + \log(\sum_k M_{kc}) for unlabeled data
s_c = softmax(I_c)
"""
# first: complete inference to find label
# Input integration:
I = T.tensordot(Y,T.log(W),axes=[1,1])
# recurrent term:
vM = M[L]
L_index = T.eq(L,-1).nonzero()
vM = T.set_subtensor(vM[L_index], T.sum(M, axis=0))
# numeric trick to prevent overflow in the exp-function
max_exponent = 86. - T.ceil(T.log(I.shape[1].astype('float32')))
scale = T.switch(
T.gt(T.max(I, axis=1, keepdims=True), max_exponent),
T.max(I, axis=1, keepdims=True) - max_exponent,
0.)
# numeric approximation to prevent underflow in the exp-function:
# map too low values of I to a fixed minimum value
min_exponent = -87. + T.ceil(T.log(I.shape[1].astype('float32')))
I = T.switch(
T.lt(I-scale, min_exponent),
scale+min_exponent,
I)
# activation: recurrent softmax with overflow protection
s = vM*T.exp(I-scale)/T.sum(vM*T.exp(I-scale), axis=1, keepdims=True)
return s
开发者ID:smajida,项目名称:NeSi,代码行数:34,代码来源:poisson_theano_scan.py
示例9: mixture_model
def mixture_model(random_seed=1234):
"""Sample mixture model to use in benchmarks"""
np.random.seed(1234)
size = 1000
w_true = np.array([0.35, 0.4, 0.25])
mu_true = np.array([0., 2., 5.])
sigma = np.array([0.5, 0.5, 1.])
component = np.random.choice(mu_true.size, size=size, p=w_true)
x = np.random.normal(mu_true[component], sigma[component], size=size)
with pm.Model() as model:
w = pm.Dirichlet('w', a=np.ones_like(w_true))
mu = pm.Normal('mu', mu=0., sd=10., shape=w_true.shape)
enforce_order = pm.Potential('enforce_order', tt.switch(mu[0] - mu[1] <= 0, 0., -np.inf) +
tt.switch(mu[1] - mu[2] <= 0, 0., -np.inf))
tau = pm.Gamma('tau', alpha=1., beta=1., shape=w_true.shape)
pm.NormalMixture('x_obs', w=w, mu=mu, tau=tau, observed=x)
# Initialization can be poorly specified, this is a hack to make it work
start = {
'mu': mu_true.copy(),
'tau_log__': np.log(1. / sigma**2),
'w_stickbreaking__': np.array([-0.03, 0.44])
}
return model, start
开发者ID:alexander-belikov,项目名称:pymc3,代码行数:25,代码来源:benchmarks.py
示例10: adamgc
def adamgc(cost, params, lr=0.0002, b1=0.1, b2=0.001, e=1e-8, max_magnitude=5.0, infDecay=0.1):
updates = []
grads = T.grad(cost, params)
norm = norm_gs(params, grads)
sqrtnorm = T.sqrt(norm)
not_finite = T.or_(T.isnan(sqrtnorm), T.isinf(sqrtnorm))
adj_norm_gs = T.switch(T.ge(sqrtnorm, max_magnitude), max_magnitude / sqrtnorm, 1.)
i = shared(floatX(0.))
i_t = i + 1.
fix1 = 1. - (1. - b1)**i_t
fix2 = 1. - (1. - b2)**i_t
lr_t = lr * (T.sqrt(fix2) / fix1)
for p, g in zip(params, grads):
g = T.switch(not_finite, infDecay * p, g * adj_norm_gs)
m = shared(p.get_value() * 0.)
v = shared(p.get_value() * 0.)
m_t = (b1 * g) + ((1. - b1) * m)
v_t = (b2 * T.sqr(g)) + ((1. - b2) * v)
g_t = m_t / (T.sqrt(v_t) + e)
p_t = p - (lr_t * g_t)
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, p_t))
updates.append((i, i_t))
return updates, norm
开发者ID:Weichern,项目名称:Theano-Lights,代码行数:27,代码来源:toolbox.py
示例11: _get_targets
def _get_targets(y, log_y_hat, y_mask, y_hat_mask):
'''
Returns the target values according to the CTC cost with respect to y_hat.
Note that this is part of the gradient with respect to the softmax output
and not with respect to the input of the original softmax function.
All computations are done in log scale
'''
num_classes = log_y_hat.shape[2] - 1
blanked_y, blanked_y_mask = _add_blanks(
y=y,
blank_symbol=num_classes,
y_mask=y_mask)
log_alpha, log_beta = _log_forward_backward(blanked_y,
log_y_hat, blanked_y_mask,
y_hat_mask, num_classes)
# explicitly not using a mask to prevent inf - inf
y_prob = _class_batch_to_labeling_batch(blanked_y, log_y_hat,
y_hat_mask=None)
marginals = log_alpha + log_beta - y_prob
max_marg = marginals.max(2)
max_marg = T.switch(T.le(max_marg, -np.inf), 0, max_marg)
log_Z = T.log(T.exp(marginals - max_marg[:,:, None]).sum(2))
log_Z = log_Z + max_marg
log_Z = T.switch(T.le(log_Z, -np.inf), 0, log_Z)
targets = _labeling_batch_to_class_batch(blanked_y,
T.exp(marginals -
log_Z[:,:, None]),
num_classes + 1)
return targets
开发者ID:trungnt13,项目名称:dnntoolkit,代码行数:30,代码来源:ctc_cost.py
示例12: __init__
def __init__(self, rng, f='ReLU', g=lambda x: x, params=None):
if f == 'ReLU':
if hasattr(T.nnet, 'relu'):
self.f = T.nnet.relu
else:
self.f = lambda x: T.switch(x<0,0,x)
self.g = lambda x: x
elif f == 'PReLU':
# Avoids dying ReLU units
if hasattr(T.nnet, 'relu'):
self.f = lambda x: T.nnet.relu(x, alpha=0.01)
else:
self.f = lambda x: T.switch(x<=0,a*x,x)
self.g = lambda x: x
elif f == 'tanh':
self.f = T.tanh
self.g = T.arctanh
elif f == 'sigmoid':
self.f = T.nnet.sigmoid
self.g = lambda x: x
elif f == 'softmax':
self.f = T.nnet.softmax
self.g = lambda x: x
elif f == 'softplus':
self.f = T.nnet.softplus
self.g = lambda x: x
elif f == 'identity':
self.f = lambda x: x
self.g = lambda x: x
else:
self.f = f
self.g = g
self.params = [] if params is None else params
开发者ID:Brimborough,项目名称:deep-motion-analysis,代码行数:34,代码来源:ActivationLayer.py
示例13: dlogp
def dlogp(inputs, gradients):
g_logp, = gradients
cov, delta = inputs
g_logp.tag.test_value = floatX(1.)
n, k = delta.shape
chol_cov = cholesky(cov)
diag = tt.nlinalg.diag(chol_cov)
ok = tt.all(diag > 0)
chol_cov = tt.switch(ok, chol_cov, tt.fill(chol_cov, 1))
delta_trans = solve_lower(chol_cov, delta.T).T
inner = n * tt.eye(k) - tt.dot(delta_trans.T, delta_trans)
g_cov = solve_upper(chol_cov.T, inner)
g_cov = solve_upper(chol_cov.T, g_cov.T)
tau_delta = solve_upper(chol_cov.T, delta_trans.T)
g_delta = tau_delta.T
g_cov = tt.switch(ok, g_cov, -np.nan)
g_delta = tt.switch(ok, g_delta, -np.nan)
return [-0.5 * g_cov * g_logp, -g_delta * g_logp]
开发者ID:alexander-belikov,项目名称:pymc3,代码行数:25,代码来源:dist_math.py
示例14: theano_digitize
def theano_digitize(x, bins):
"""
Equivalent to numpy digitize.
Parameters
----------
x : Theano tensor or array_like
The array or matrix to be digitized
bins : array_like
The bins with which x should be digitized
Returns
-------
A Theano tensor
The indices of the bins to which each value in input array belongs.
"""
binned = T.zeros_like(x) + len(bins)
for i in range(len(bins)):
bin=bins[i]
if i == 0:
binned=T.switch(T.lt(x,bin),i,binned)
else:
ineq = T.and_(T.ge(x,bins[i-1]),T.lt(x,bin))
binned=T.switch(ineq,i,binned)
binned=T.switch(T.isnan(x), len(bins), binned)
return binned
开发者ID:eglxiang,项目名称:xnn,代码行数:26,代码来源:utils.py
示例15: theano_sentence_prediction
def theano_sentence_prediction(self, Sentence, Chars, WordLengths):
input_lstm_res_f = self.input_lstm_forward_layer.function(Sentence, Chars, WordLengths)
input_lstm_res_b = self.input_lstm_backward_layer.function(Sentence, Chars, WordLengths)
input_combined = T.concatenate((input_lstm_res_f, input_lstm_res_b), axis=1)
#Make pairwise features. This is really just "tensor product with concatenation instead of multiplication". Is there a command for that?
full_matrix, _ = theano.scan(fn=self.__pairwise_features,
outputs_info=None,
sequences=input_combined,
non_sequences=[input_combined, Sentence.shape[0]])
if len(self.lstm_layers) > 0 and self.lstm_layers[0].training:
srng = RandomStreams(seed=12345)
full_matrix = T.switch(srng.binomial(size=(Sentence.shape[0], Sentence.shape[0]+1, self.hidden_dimension*4), p=0.5), full_matrix, 0)
else:
full_matrix = 0.5 * full_matrix
full_matrix = self.transition_layer.function(full_matrix)
for layer in self.lstm_layers:
if layer.training:
print("hah-train")
full_matrix = T.switch(srng.binomial(size=(Sentence.shape[0], Sentence.shape[0]+1, self.hidden_dimension*4), p=0.5), full_matrix, 0)
else:
print("heh-notrain")
full_matrix = 0.5 * full_matrix
full_matrix = layer.function(full_matrix)
final_matrix = self.output_convolution.function(full_matrix)
return T.nnet.softmax(final_matrix)
开发者ID:MichSchli,项目名称:Speciale,代码行数:34,代码来源:fourway_lstm_both.py
示例16: compute_updates
def compute_updates(self, training_cost, params):
updates = []
grads = T.grad(training_cost, params)
grads = OrderedDict(zip(params, grads))
# Clip stuff
c = numpy.float32(self.cutoff)
clip_grads = []
norm_gs = T.sqrt(sum(T.sum(g ** 2) for p, g in grads.items()))
normalization = T.switch(T.ge(norm_gs, c), c / norm_gs, np.float32(1.))
notfinite = T.or_(T.isnan(norm_gs), T.isinf(norm_gs))
for p, g in grads.items():
clip_grads.append((p, T.switch(notfinite, numpy.float32(.1) * p, g * normalization)))
grads = OrderedDict(clip_grads)
if self.updater == 'adagrad':
updates = Adagrad(grads, self.lr)
elif self.updater == 'sgd':
raise Exception("Sgd not implemented!")
elif self.updater == 'adadelta':
updates = Adadelta(grads)
elif self.updater == 'rmsprop':
updates = RMSProp(grads, self.lr)
elif self.updater == 'adam':
updates = Adam(grads)
else:
raise Exception("Updater not understood!")
return updates
开发者ID:npow,项目名称:hed-dlg,代码行数:32,代码来源:dialog_encdec.py
示例17: convert_method
def convert_method(self, method_string):
if method_string == 'sigmoid':
return Tensor.nnet.sigmoid
elif method_string == 'tanh':
return Tensor.tanh
elif method_string == 'scaled_tanh':
return lambda x: 1.7159 * Tensor.tanh(0.66 * x)
elif method_string == 'soft_sigmoid':
return soft_sigmoid
elif method_string == 'relu':
return lambda x: x * (x > 0)
elif method_string == 'relu2':
return lambda x: Tensor.switch(Tensor.lt(x, -1), -1, x) * Tensor.switch(Tensor.gt(x, 1), 1, x) / x
elif method_string == 'leakyrelu':
return lambda x: x * (x > 0) + 0.01 * x * (x < 0)
elif method_string == 'shiftedrelu':
return lambda x: x * (x > -1)
elif method_string == 'hard_sigmoid':
return Tensor.nnet.hard_sigmoid
elif method_string == 'none':
return lambda x: x
else:
raise Exception('method unknown')
开发者ID:aviveise,项目名称:double_encoder,代码行数:34,代码来源:configuration.py
示例18: tnormal_icdf
def tnormal_icdf(size, avg, std, lbound, ubound, theano_rng, dtype):
"""
Alternative Method:
sample = -Phi_inv(Phi(-lbound)*(1-u) + Phi(-ubound)*u)
"""
def Phi(x):
erfarg = (x - avg) / (std * SQRT2)
rval = 0.5 * (1. + T.erf(erfarg))
return rval.astype(dtype)
def Phi_inv(y, eps=3e-8):
""" eps was calibrated for cublas.erfinv using float32 """
temp = 2. * y - 1.
erfinv_input = T.clip(temp, -1+eps, 1-eps)
rval = avg + std * SQRT2 * T.erfinv(erfinv_input)
return rval.astype(dtype)
# center lower and upper bounds based on mean
u = theano_rng.uniform(size=size, dtype=dtype)
# Inverse CDF method. When method becomes numerically unstable, we simply
# return the bounds based on whether avg < lbound, or ubound < avg.
cdf_range = Phi(ubound) - Phi(lbound)
sample = T.switch(
T.or_(
T.lt(cdf_range, 3e-8),
T.gt(cdf_range, 1-3e-8)),
T.switch(
T.lt(avg, lbound),
lbound,
ubound),
Phi_inv(Phi(lbound) + u * cdf_range))
return sample
开发者ID:gdesjardins,项目名称:hossrbm,代码行数:35,代码来源:truncated.py
示例19: castray
def castray(ro, rd, shape_params, nprims, width, height):
tmin = 1.0
tmax = 20.0
precis = 0.002
m = -1.0
# There are a sequence of distances, d1, d2, ..., dn
# then theres the accumulated distances d1, d1+d2, d1+d2+d3....
# What we actually want in the output is the sfor each ray the distance to the surface
# So we want something like 0, 20, 25, 27, 28, 28, 28, 28, 28
# OK
max_num_steps = 25
# distcolors = map(ro + rd * 0, width, height) #FIXME, reshape instead of mul by 0
distcolors = mapedit(ro + rd * 0, shape_params, nprims, width, height)
dists = distcolors
steps = T.switch(dists < precis, T.zeros_like(dists), T.ones_like(dists))
accum_dists = T.reshape(dists, (width, height, 1))
for i in range(max_num_steps - 1):
# distcolors = map(ro + rd * accum_dists, width, height) #FIXME, reshape instead of mul by 0
distcolors = mapedit(ro + rd * accum_dists, shape_params, nprims, width, height) #FIXME, reshape instead of mul by 0
dists = distcolors
steps = steps + T.switch(dists < precis, T.zeros_like(dists), T.ones_like(dists))
accum_dists = accum_dists + T.reshape(dists, (width, height, 1))
last_depth = T.reshape(accum_dists, (width, height))
depthmap = T.switch(last_depth < tmax, last_depth / tmax, T.zeros_like(last_depth))
color = 1.0 - steps / float(max_num_steps)
# Distance marched along ray and delta between last two steps
return depthmap
开发者ID:zenna,项目名称:Arrows.jl,代码行数:31,代码来源:iq.py
示例20: cd_updates
def cd_updates(self):
"""
Return a dictionary of shared variable updates that implements contrastive divergence
learning by stochastic gradient descent with an annealed learning rate.
"""
ups = {}
if self.persistent_chains:
grads = self.contrastive_grads()
ups.update(dict(self.sampler.updates()))
else:
cd1_sampler, final_p, cd1_updates = self.rbm.CD1_sampler(self.visible_batch,
self.batchsize)
self._last_cd1_sampler = cd1_sampler # hacked in here for the unit test
#ignore the cd1_sampler
grads = self.contrastive_grads(neg_v = final_p)
ups.update(dict(cd1_updates))
# contrastive divergence updates
# TODO: sgd_updates is a particular optization algo (others are possible)
# parametrize so that algo is plugin
# the normalization normVF might be sgd-specific though...
# TODO: when sgd has an annealing schedule, this should
# go through that mechanism.
lr = TT.clip(
self.learn_rate * TT.cast(self.lr_anneal_start / (self.iter+1), floatX),
0.0, #min
self.learn_rate) #max
ups.update(dict(sgd_updates(
self.rbm.params(),
grads,
stepsizes=[a*lr for a in self.learn_rate_multipliers])))
ups[self.iter] = self.iter + 1
# add trainer updates (replace CD update of U)
ups[self.rbm.U], ups[self.normVF] = self.normalize_U(ups[self.rbm.U])
#l1_updates:
if (self.l1_penalty_start > 0) and (self.l1_penalty != 0.0):
ups[self.effective_l1_penalty] = TT.switch(
self.iter >= self.l1_penalty_start,
self.l1_penalty,
0.0)
if getattr(self,'p_lr', None):
ups[self.p_lr] = TT.switch(self.iter > self.p_training_start,
self.p_training_lr,
0)
new_P = ups[self.rbm.P] * self.p_mask
no_pos_P = TT.switch(new_P<0, new_P, 0)
ups[self.rbm.P] = - no_pos_P / no_pos_P.sum(axis=0) #normalize to that columns sum 1
return ups
开发者ID:GavinHwang,项目名称:DeepLearningTutorials,代码行数:60,代码来源:mcrbm.py
注:本文中的theano.tensor.switch函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论