本文整理汇总了Python中theano.tensor.lt函数的典型用法代码示例。如果您正苦于以下问题:Python lt函数的具体用法?Python lt怎么用?Python lt使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了lt函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: irprop_minus_updates
def irprop_minus_updates(params, grads):
# IRPROP- parameters
updates = []
deltas = 0.1*numpy.ones(len(params))
last_params = params
positiveStep = 1.2
negativeStep = 0.5
maxStep = 50.
minStep = math.exp(-6)
for param, gparam, delta, last_gparam in zip(params, grads, deltas, last_params):
# calculate change
change = T.sgn(gparam * last_gparam)
if T.gt(change, 0) :
delta = T.minimum(delta * positiveStep, maxStep)
if T.lt(delta, minStep):
delta = minStep
elif T.lt(change, 0):
delta = T.maximum(delta * negativeStep, minStep)
if T.gt(delta, params['maxStep']):
delta = params['maxStep']
last_gparam = 0
# update the weights
updates.append((param, param - T.sgn(gparam) * delta))
# store old change
last_gparam = gparam
return updates
开发者ID:andersjo,项目名称:vector-semantics,代码行数:34,代码来源:rprop.py
示例2: _backward_negative_z
def _backward_negative_z(inputs, weights, normed_relevances, bias=None):
inputs_plus = inputs * T.gt(inputs, 0)
weights_plus = weights * T.gt(weights, 0)
inputs_minus = inputs * T.lt(inputs, 0)
weights_minus = weights * T.lt(weights, 0)
# Compute weights+ * inputs- and weights- * inputs+
negative_part_a = conv2d(
normed_relevances, weights_plus.dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1], border_mode="full"
)
negative_part_a *= inputs_minus
negative_part_b = conv2d(
normed_relevances, weights_minus.dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1], border_mode="full"
)
negative_part_b *= inputs_plus
together = negative_part_a + negative_part_b
if bias is not None:
bias_negative = bias * T.lt(bias, 0)
bias_relevance = bias_negative.dimshuffle("x", 0, "x", "x") * normed_relevances
# Divide bias by weight size before convolving back
# mean across channel, 0, 1 dims (hope this is correct?)
fraction_bias = bias_relevance / T.prod(weights.shape[1:]).astype(theano.config.floatX)
bias_rel_in = conv2d(
fraction_bias, T.ones_like(weights).dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1], border_mode="full"
)
together += bias_rel_in
return together
开发者ID:robintibor,项目名称:braindecode,代码行数:27,代码来源:heatmap.py
示例3: build_update
def build_update(self, alpha=0.01, beta=0.0):
W = self.W
lambda_mult=self.lambda_mult
y=self.y
C = self.C
lower_bound = theano.shared(np.float32(0.0))
updates = build_gradDescent_step(W, lambda_mult, alpha,beta)
updatelambda_mult = updates[1] # \Longleftrightarrow <<===>> \lambda_i'(t+1)
updatelambda_mult = updatelambda_mult - T.dot(y,updatelambda_mult)/T.dot(y,y) * y # Longleftrightarrow <<===>> \lambda_i''(t+1)
# use theano.tensor.switch because we need an elementwise comparison
# if \lambda_I''(t+1)> C, C
updatelambda_mult = T.switch( T.lt( C , updatelambda_mult), C, updatelambda_mult)
updatelambda_mult = T.switch( T.lt( updatelambda_mult,lower_bound), lower_bound, updatelambda_mult)
updatelambda_mult = sandbox.cuda.basic_ops.gpu_from_host( updatelambda_mult)
updatefunction = theano.function(inputs=[],
outputs = W,
updates=[(lambda_mult, updatelambda_mult)])
self._update_lambda_mult_graph = updatelambda_mult
self.update_function = updatefunction
return updatelambda_mult, updatefunction
开发者ID:ernestyalumni,项目名称:MLgrabbag,代码行数:27,代码来源:SVM.py
示例4: theano_digitize
def theano_digitize(x, bins):
"""
Equivalent to numpy digitize.
Parameters
----------
x : Theano tensor or array_like
The array or matrix to be digitized
bins : array_like
The bins with which x should be digitized
Returns
-------
A Theano tensor
The indices of the bins to which each value in input array belongs.
"""
binned = T.zeros_like(x) + len(bins)
for i in range(len(bins)):
bin=bins[i]
if i == 0:
binned=T.switch(T.lt(x,bin),i,binned)
else:
ineq = T.and_(T.ge(x,bins[i-1]),T.lt(x,bin))
binned=T.switch(ineq,i,binned)
binned=T.switch(T.isnan(x), len(bins), binned)
return binned
开发者ID:eglxiang,项目名称:xnn,代码行数:26,代码来源:utils.py
示例5: __init__
def __init__(self, random_state=None, low=0.0, high=1.0):
super(Uniform, self).__init__(low=low, high=high,
random_state=random_state,
optimizer=None)
# pdf
self.pdf_ = T.switch(
T.or_(T.lt(self.X, self.low), T.ge(self.X, self.high)),
0.,
1. / (self.high - self.low)).ravel()
self.make_(self.pdf_, "pdf")
# -log pdf
self.nnlf_ = T.switch(
T.or_(T.lt(self.X, self.low), T.ge(self.X, self.high)),
np.inf,
T.log(self.high - self.low)).ravel()
self.make_(self.nnlf_, "nnlf")
# cdf
self.cdf_ = T.switch(
T.lt(self.X, self.low),
0.,
T.switch(
T.lt(self.X, self.high),
(self.X - self.low) / (self.high - self.low),
1.)).ravel()
self.make_(self.cdf_, "cdf")
# ppf
self.ppf_ = self.p * (self.high - self.low) + self.low
self.make_(self.ppf_, "ppf", args=[self.p])
开发者ID:ibab,项目名称:carl,代码行数:32,代码来源:uniform.py
示例6: gradients
def gradients(cost, parameters, lr=0.001):
updates = []
c = 0
for param in parameters:
update = param - lr * theano.grad(cost, param)
if c == 1 or c == 3:
# update = t.minimum(t.abs_(update), np.pi) * (update / abs(update))
#
# update = t.maximum(update, 0)
# update = t.minimum(update, np.pi)
update = ifelse(t.lt(update, 0), np.pi * 2 - 0.001, update)
update = ifelse(t.gt(update, np.pi * 2), 0.001, update)
if c == 2:
update = ifelse(t.lt(update, 2), float(20), update)
elif c == 5 or c == 6:
update = t.maximum(update, -5)
update = t.minimum(update, 5)
updates.append((param, update))
c += 1
return updates
开发者ID:dlacombejr,项目名称:sparse_filtering,代码行数:33,代码来源:gabor_fit.py
示例7: tnormal_icdf
def tnormal_icdf(size, avg, std, lbound, ubound, theano_rng, dtype):
"""
Alternative Method:
sample = -Phi_inv(Phi(-lbound)*(1-u) + Phi(-ubound)*u)
"""
def Phi(x):
erfarg = (x - avg) / (std * SQRT2)
rval = 0.5 * (1. + T.erf(erfarg))
return rval.astype(dtype)
def Phi_inv(y, eps=3e-8):
""" eps was calibrated for cublas.erfinv using float32 """
temp = 2. * y - 1.
erfinv_input = T.clip(temp, -1+eps, 1-eps)
rval = avg + std * SQRT2 * T.erfinv(erfinv_input)
return rval.astype(dtype)
# center lower and upper bounds based on mean
u = theano_rng.uniform(size=size, dtype=dtype)
# Inverse CDF method. When method becomes numerically unstable, we simply
# return the bounds based on whether avg < lbound, or ubound < avg.
cdf_range = Phi(ubound) - Phi(lbound)
sample = T.switch(
T.or_(
T.lt(cdf_range, 3e-8),
T.gt(cdf_range, 1-3e-8)),
T.switch(
T.lt(avg, lbound),
lbound,
ubound),
Phi_inv(Phi(lbound) + u * cdf_range))
return sample
开发者ID:gdesjardins,项目名称:hossrbm,代码行数:35,代码来源:truncated.py
示例8: generate_subpop_input
def generate_subpop_input(r_E, r_I, n_pairs):
c = T.scalar("c", dtype='float32')
h = T.matrix("h", dtype='float32')
W_EE = T.tensor3("W_EE", dtype='float32')
W_EI = T.tensor3("W_EI", dtype='float32')
W_IE = T.tensor3("W_IE", dtype='float32')
W_II = T.tensor3("W_II", dtype='float32')
r_e = T.matrix("r_e", dtype='float32')
r_i = T.matrix("r_i", dtype='float32')
I_E = T.matrix('I_E', dtype='float32')
I_I = T.matrix('I_I', dtype='float32')
I_thresh_E = T.matrix('I_thresh_E', dtype='float32')
I_thresh_I = T.matrix('I_thresh_I', dtype='float32')
# Compile functions:
I_E = c*h + T.sum(T.sum(W_EE*r_e,1),1).reshape((n_pairs, n_pairs)).T - T.sum(T.sum(W_EI*r_i,1),1).reshape((n_pairs, n_pairs)).T
I_I = c*h + T.sum(T.sum(W_IE*r_e,1),1).reshape((n_pairs, n_pairs)).T - T.sum(T.sum(W_II*r_i,1),1).reshape((n_pairs, n_pairs)).T
I_thresh_E = T.switch(T.lt(I_E,0), 0, I_E)
I_thresh_I = T.switch(T.lt(I_I,0), 0, I_I)
inputs = theano.function(inputs=[c,h,W_EE,W_EI,W_IE,W_II],
outputs=[I_thresh_E, I_thresh_I],
givens={r_e:r_E, r_i:r_I},
allow_input_downcast=True)
return inputs
开发者ID:benselby,项目名称:v1_modelling,代码行数:30,代码来源:ssn_subpop_tf.py
示例9: __init__
def __init__(self, x, lower, upper, *args, **kwargs):
super(Uniform, self).__init__(*args, **kwargs)
self._logp = T.log(T.switch(T.gt(x, upper), 0, T.switch(T.lt(x, lower), 0, 1/(upper - lower))))
self._cdf = T.switch(T.gt(x, up), 1, T.switch(T.lt(x, low), 0, (x - low)/(up - low)))
self._add_expr('x', x)
self._add_expr('lower', lower)
self._add_expr('upper', upper)
开发者ID:giangzuzana,项目名称:python-mle,代码行数:7,代码来源:__init__.py
示例10: _recursive_step
def _recursive_step(self, i, regs, tokens, seqs, back_routes, back_lens):
seq = seqs[i]
# Encoding
left, right, target = seq[0], seq[1], seq[2]
left_rep = ifelse(T.lt(left, 0), tokens[-left], regs[left])
right_rep = ifelse(T.lt(right, 0), tokens[-right], regs[right])
rep = self._encode_computation(left_rep, right_rep)
if self.deep:
inter_rep = rep
rep = self._deep_encode(inter_rep)
else:
inter_rep = T.constant(0)
new_regs = T.set_subtensor(regs[target], rep)
back_len = back_lens[i]
back_reps, lefts, rights = self._unfold(back_routes[i], new_regs, back_len)
gf_W_d1, gf_W_d2, gf_B_d1, gf_B_d2, distance, rep_gradient = self._unfold_gradients(back_reps, lefts, rights, back_routes[i],
tokens, back_len)
return ([rep, inter_rep, left_rep, right_rep, new_regs, rep_gradient, distance],
self.decode_optimizer.setup([self.W_d1, self.W_d2, self.B_d1, self.B_d2],
[gf_W_d1, gf_W_d2, gf_B_d1, gf_B_d2], method=self.optimization, beta=self.beta))
开发者ID:zomux,项目名称:nlpy,代码行数:28,代码来源:rae.py
示例11: interval_reduction
def interval_reduction(a, b, c, d, tol):
fc = f(c)
fd = f(d)
a, b, c, d = ifelse(T.lt(fc, fd), [a, d, d - golden_ratio * (d - a), c], [c, b, d, c + golden_ratio * (b - c)])
stoprule = theano.scan_module.until(T.lt(T.abs_(c - d), tol))
return [a, b, c, d], stoprule
开发者ID:itdxer,项目名称:neupy,代码行数:8,代码来源:golden_search.py
示例12: rprop
def rprop(param,learning_rate,gparam,mask,updates,current_cost,previous_cost,
eta_plus=1.2,eta_minus=0.5,max_delta=50, min_delta=10e-6):
previous_grad = sharedX(numpy.ones(param.shape.eval()),borrow=True)
delta = sharedX(learning_rate * numpy.ones(param.shape.eval()),borrow=True)
previous_inc = sharedX(numpy.zeros(param.shape.eval()),borrow=True)
zero = T.zeros_like(param)
one = T.ones_like(param)
change = previous_grad * gparam
new_delta = T.clip(
T.switch(
T.eq(gparam,0.),
delta,
T.switch(
T.gt(change,0.),
delta*eta_plus,
T.switch(
T.lt(change,0.),
delta*eta_minus,
delta
)
)
),
min_delta,
max_delta
)
new_previous_grad = T.switch(
T.eq(mask * gparam,0.),
previous_grad,
T.switch(
T.gt(change,0.),
gparam,
T.switch(
T.lt(change,0.),
zero,
gparam
)
)
)
inc = T.switch(
T.eq(mask * gparam,0.),
zero,
T.switch(
T.gt(change,0.),
- T.sgn(gparam) * new_delta,
T.switch(
T.lt(change,0.),
zero,
- T.sgn(gparam) * new_delta
)
)
)
updates.append((previous_grad,new_previous_grad))
updates.append((delta,new_delta))
updates.append((previous_inc,inc))
return param + inc * mask
开发者ID:nitbix,项目名称:ensemble-testing,代码行数:57,代码来源:mlp-old.py
示例13: get_output_for
def get_output_for(self, input, deterministic=False, **kwargs):
if deterministic or self.rate == 0:
return input
else:
drop = self._srng.uniform(input.shape)
z = T.lt(drop, 0.5 * self.rate)
o = T.lt(T.abs_(drop - 0.75 * self.rate), 0.25 * self.rate)
input = T.set_subtensor(input[z.nonzero()], 0.)
input = T.set_subtensor(input[o.nonzero()], 1.)
return input
开发者ID:sbos,项目名称:np-baselines,代码行数:10,代码来源:autoencoder.py
示例14: berhu
def berhu(predictions, targets,s=0.2,l=0.5,m=1.2):
# Compute mask
mask = T.gt(targets, l) * T.lt(targets,m)
# Compute n of valid pixels
n_valid = T.sum(mask)
# Redundant mult here
r = (predictions - targets) * mask
c = s * T.max(T.abs_(r))
a_r = T.abs_(r)
b = T.switch(T.lt(a_r, c), a_r, ((r**2) + (c**2))/(2*c))
return T.sum(b)/n_valid
开发者ID:sebastian-schlecht,项目名称:im2vol,代码行数:12,代码来源:losses.py
示例15: _step
def _step(
i,
pkm1, pkm2, qkm1, qkm2,
k1, k2, k3, k4, k5, k6, k7, k8, r
):
xk = -(x * k1 * k2) / (k3 * k4)
pk = pkm1 + pkm2 * xk
qk = qkm1 + qkm2 * xk
pkm2 = pkm1
pkm1 = pk
qkm2 = qkm1
qkm1 = qk
xk = (x * k5 * k6) / (k7 * k8)
pk = pkm1 + pkm2 * xk
qk = qkm1 + qkm2 * xk
pkm2 = pkm1
pkm1 = pk
qkm2 = qkm1
qkm1 = qk
old_r = r
r = tt.switch(tt.eq(qk, zero), r, pk/qk)
k1 += one
k2 += k26update
k3 += two
k4 += two
k5 += one
k6 -= k26update
k7 += two
k8 += two
big_cond = tt.gt(tt.abs_(qk) + tt.abs_(pk), BIG)
biginv_cond = tt.or_(
tt.lt(tt.abs_(qk), BIGINV),
tt.lt(tt.abs_(pk), BIGINV)
)
pkm2 = tt.switch(big_cond, pkm2 * BIGINV, pkm2)
pkm1 = tt.switch(big_cond, pkm1 * BIGINV, pkm1)
qkm2 = tt.switch(big_cond, qkm2 * BIGINV, qkm2)
qkm1 = tt.switch(big_cond, qkm1 * BIGINV, qkm1)
pkm2 = tt.switch(biginv_cond, pkm2 * BIG, pkm2)
pkm1 = tt.switch(biginv_cond, pkm1 * BIG, pkm1)
qkm2 = tt.switch(biginv_cond, qkm2 * BIG, qkm2)
qkm1 = tt.switch(biginv_cond, qkm1 * BIG, qkm1)
return ((pkm1, pkm2, qkm1, qkm2,
k1, k2, k3, k4, k5, k6, k7, k8, r),
until(tt.abs_(old_r - r) < (THRESH * tt.abs_(r))))
开发者ID:alexander-belikov,项目名称:pymc3,代码行数:52,代码来源:dist_math.py
示例16: rebuild
def rebuild(self):
for i, (inputs, f) in enumerate(self.wiring):
if not inputs:
continue
lin_comb = T.dot(T.concatenate([self._vlayers[j] for j in inputs], axis=1), self._vweights[i])
add_biases = lin_comb + self._vbiases[i]
self._vlayers[i] = f(add_biases)
self._output = T.concatenate([self._vlayers[j] for j in self.output_layers], axis=1)
self._targets = [T.matrix() for j in self.output_layers]
crossentropy = sum([(T.nnet.categorical_crossentropy(self._vlayers[j], self._targets[i])
if self.wiring[j][1] == SOFTMAX_FUN
else ((self._vlayers[j] - self._targets[i]) ** 2 / (1+self._targets[i].max())**2).sum())
for i, j in enumerate(self.output_layers)
])
self._cost = (crossentropy.sum() +
self.L2REG/(self.layers[i]) * sum((weight**2).sum() for weight in self._vweights if weight is not None)+ # + # L2 regularization
0.01* self.L2REG/math.sqrt(self.layers[i]) * sum((bias**2).sum() for j, bias in enumerate(self._vbiases) if bias is not None and self.wiring[j][1] != LINEAR_FUN)) # L2 regularization
self._costnoreg = crossentropy.sum()
self._derivatives = [None] * len(self.layers)
self._updates = []
MAX_DERIV = 1000
for i, (inputs, f) in enumerate(self.wiring):
if not inputs:
continue
deriv1 = T.grad(self._cost, self._vweights[i])
deriv1p = T.switch(T.lt(deriv1, MAX_DERIV), deriv1, MAX_DERIV)
deriv1pp = T.switch(T.gt(deriv1p, -MAX_DERIV), deriv1p, -MAX_DERIV)
#deriv1ppp = T.switch(T.isnan(deriv1pp), 0, deriv1pp)
deriv2 = T.grad(self._cost, self._vbiases[i])
deriv2p = T.switch(T.lt(deriv2, MAX_DERIV), deriv2, MAX_DERIV)
deriv2pp = T.switch(T.gt(deriv2p, -MAX_DERIV), deriv2p, -MAX_DERIV)
#deriv2ppp = T.switch(T.isnan(deriv2pp), 0, deriv2pp)
self._derivatives[i] = (deriv1pp, deriv2pp)
self._updates.append((self._vweights[i], self._vweights[i] - self.learning_rate * self._derivatives[i][0]))
self._updates.append((self._vbiases[i], self._vbiases[i] - self.learning_rate * self._derivatives[i][1]))
self._prediction = theano.function(inputs=[self._vlayers[i] for i in self.input_layers],
outputs=self._output)
self._train = theano.function(inputs=self._targets+[self._vlayers[i] for i in self.input_layers],
outputs=self._cost,
updates=self._updates, allow_input_downcast=True)
#mode=NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True)) # debug NaN
self._costfun = theano.function(inputs=self._targets+[self._vlayers[i] for i in self.input_layers],
outputs=self._costnoreg, allow_input_downcast=True)
开发者ID:fding,项目名称:evilpoker,代码行数:52,代码来源:neuralnet.py
示例17: _bpts_step
def _bpts_step(self, i, gradient_reg, seqs, reps, inter_reps, left_subreps, right_subreps, rep_gradients):
# BPTS
seq = seqs[i]
left, right, target = seq[0], seq[1], seq[2]
left_is_token = T.lt(left, 0)
right_is_token = T.lt(right, 0)
bpts_gradient = gradient_reg[target]
rep_gradient = rep_gradients[i] + bpts_gradient
if self.deep:
# Implementation note:
# As the gradient of deep encoding func wrt W_ee includes the input representation.
# If we let T.grad to find that input representation directly, it will stuck in an infinite loop.
# So we must use SRG in this case.
_fake_input_rep, = make_float_vectors("_fake_input_rep")
deep_rep = self._deep_encode(_fake_input_rep)
node_map = {deep_rep: reps[i], _fake_input_rep: inter_reps[i]}
g_wee = SRG(T.grad(T.sum(deep_rep), self.W_ee), node_map) * rep_gradient
g_bee = SRG(T.grad(T.sum(deep_rep), self.B_ee), node_map) * rep_gradient
g_inter_rep = SRG(T.grad(T.sum(deep_rep), _fake_input_rep), node_map) * rep_gradient
inter_rep = inter_reps[i]
else:
g_wee = T.constant(0)
g_bee = T.constant(0)
g_inter_rep = rep_gradient
inter_rep = reps[i]
# Accelerate computation by using saved internal values.
# For the limitation of SRG, known_grads can not be used here.
_fake_left_rep, _fake_right_rep = make_float_vectors("_fake_left_rep", "_fake_right_rep")
rep_node = self._encode_computation(_fake_left_rep, _fake_right_rep)
if self.deep:
rep_node = self._deep_encode(rep_node)
node_map = {_fake_left_rep: left_subreps[i], _fake_right_rep: right_subreps[i], rep_node: inter_rep}
g_we1 = SRG(T.grad(T.sum(rep_node), self.W_e1), node_map) * g_inter_rep
g_we2 = SRG(T.grad(T.sum(rep_node), self.W_e2), node_map) * g_inter_rep
g_be = SRG(T.grad(T.sum(rep_node), self.B_e), node_map) * g_inter_rep
g_left_p = SRG(T.grad(T.sum(rep_node), _fake_left_rep), node_map) * g_inter_rep
g_right_p = SRG(T.grad(T.sum(rep_node), _fake_right_rep), node_map) * g_inter_rep
gradient_reg = ifelse(left_is_token, gradient_reg, T.set_subtensor(gradient_reg[left], g_left_p))
gradient_reg = ifelse(right_is_token, gradient_reg, T.set_subtensor(gradient_reg[right], g_right_p))
return g_we1, g_we2, g_be, g_wee, g_bee, gradient_reg
开发者ID:zomux,项目名称:nlpy,代码行数:52,代码来源:rae.py
示例18: _forward_negative_z
def _forward_negative_z(inputs, weights, bias=None):
inputs_plus = inputs * T.gt(inputs, 0)
weights_plus = weights * T.gt(weights, 0)
inputs_minus = inputs * T.lt(inputs, 0)
weights_minus = weights * T.lt(weights, 0)
negative_part_a = conv2d(inputs_plus, weights_minus)
negative_part_b = conv2d(inputs_minus, weights_plus)
together = negative_part_a + negative_part_b
if bias is not None:
bias_negative = bias * T.lt(bias, 0)
together += bias_negative.dimshuffle("x", 0, "x", "x")
return together
开发者ID:robintibor,项目名称:braindecode,代码行数:13,代码来源:heatmap.py
示例19: relevance_conv_a_b_sign_switch
def relevance_conv_a_b_sign_switch(inputs, weights, out_relevances, a, b, bias=None):
assert a is not None
assert b is not None
assert a - b == 1
# For each input, determine what
outputs = conv2d(inputs, weights)
if bias is not None:
outputs += bias.dimshuffle("x", 0, "x", "x")
# do not use bias further, only to determine direction of outputs
bias = None
# stabilize
# prevent division by 0 and division by small numbers
eps = 1e-4
outputs += T.sgn(outputs) * eps
outputs += T.eq(outputs, 0) * eps
positive_forward = _forward_positive_z(inputs, weights, bias)
negative_forward = _forward_negative_z(inputs, weights, bias)
rel_for_positive_outputs = out_relevances * T.gt(outputs, 0)
rel_for_negative_outputs = out_relevances * T.lt(outputs, 0)
positive_norm_with_trend = positive_forward * T.gt(outputs, 0)
negative_norm_with_trend = negative_forward * T.lt(outputs, 0)
# minus to make overall norm positive
norm_with_trend = positive_norm_with_trend - negative_norm_with_trend
# stabilize also
norm_with_trend += T.eq(norm_with_trend, 0) * eps
in_positive_with_trend = _backward_positive_z(inputs, weights, rel_for_positive_outputs / norm_with_trend, bias)
in_negative_with_trend = _backward_negative_z(inputs, weights, rel_for_negative_outputs / norm_with_trend, bias)
# Minus in_negative since in_with_trend should not switch signs
in_with_trend = in_positive_with_trend - in_negative_with_trend
positive_norm_against_trend = positive_forward * T.lt(outputs, 0)
negative_norm_against_trend = negative_forward * T.gt(outputs, 0)
# minus to make overall norm positive
norm_against_trend = positive_norm_against_trend - negative_norm_against_trend
# stabilize also
norm_against_trend += T.eq(norm_against_trend, 0) * eps
in_positive_against_trend = _backward_positive_z(
inputs, weights, rel_for_negative_outputs / norm_against_trend, bias
)
in_negative_against_trend = _backward_negative_z(
inputs, weights, rel_for_positive_outputs / norm_against_trend, bias
)
# Minus in_negative since switching signs is done below
in_against_trend = in_positive_against_trend - in_negative_against_trend
in_relevances = a * in_with_trend - b * in_against_trend
return in_relevances
开发者ID:robintibor,项目名称:braindecode,代码行数:51,代码来源:heatmap.py
示例20: cubicBSpline
def cubicBSpline(self, L):
b = T.zeros_like(L)
idx4 = T.ge(L, 0) * T.lt(L, 1)
idx3 = T.ge(L, 1) * T.lt(L, 2)
idx2 = T.ge(L, 2) * T.lt(L, 3)
idx1 = T.ge(L, 3) * T.le(L, 4)
b = T.switch(T.eq(idx4, 1), T.pow(L, 3) / 6, b)
b = T.switch(T.eq(idx3, 1), (-3*T.pow(L-1,3) + 3*T.pow(L-1,2) + 3*(L-1) + 1) / 6, b)
b = T.switch(T.eq(idx2, 1), ( 3*T.pow(L-2,3) - 6*T.pow(L-2,2) + 4) / 6, b)
b = T.switch(T.eq(idx1, 1), (- T.pow(L-3,3) + 3*T.pow(L-3,2) - 3*(L-3) + 1) / 6, b)
return b.T # b is K x K' and thus, as we multiply from the right with
开发者ID:jonathanmasci,项目名称:ShapeNet,代码行数:14,代码来源:layers_lscnn.py
注:本文中的theano.tensor.lt函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论