本文整理汇总了Python中theano.ifelse.ifelse函数的典型用法代码示例。如果您正苦于以下问题:Python ifelse函数的具体用法?Python ifelse怎么用?Python ifelse使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ifelse函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: build
def build(self, output, tparams=None, BNparams=None):
if self.BN_mode:
self.BN_eps = npt(self.BN_eps)
if not hasattr(self, 'BN_mean'):
self.BN_mean = T.mean(output)
if not hasattr(self, 'BN_std'):
m2 = (1 + 1 / (T.prod(output.shape) - 1)).astype(floatX)
self.BN_std = T.sqrt(m2 * T.var(output) + self.BN_eps)
if self.BN_mode == 2:
t_mean = T.mean(output, axis=[0, 2, 3], keepdims=True)
t_var = T.var(output, axis=[0, 2, 3], keepdims=True)
BN_mean = BNparams[p_(self.prefix, 'mean')].dimshuffle(
'x', 0, 'x', 'x')
BN_std = BNparams[p_(self.prefix, 'std')].dimshuffle(
'x', 0, 'x', 'x')
output = ifelse(
self.training,
(output - t_mean) / T.sqrt(t_var + self.BN_eps),
(output - BN_mean) / BN_std)
output *= tparams[p_(self.prefix, 'BN_scale')].dimshuffle(
'x', 0, 'x', 'x')
output += tparams[p_(self.prefix, 'BN_shift')].dimshuffle(
'x', 0, 'x', 'x')
elif self.BN_mode == 1:
t_mean = T.mean(output)
t_var = T.var(output)
output = ifelse(
self.training,
(output - t_mean) / T.sqrt(t_var + self.BN_eps),
((output - BNparams[p_(self.prefix, 'mean')])
/ BNparams[p_(self.prefix, 'std')]))
output *= tparams[p_(self.prefix, 'BN_scale')]
output += tparams[p_(self.prefix, 'BN_shift')]
self.output = self.activation(output)
开发者ID:wufangjie,项目名称:dnn,代码行数:34,代码来源:layers.py
示例2: get_sensi_speci
def get_sensi_speci(y_hat, y):
# y_hat = T.concatenate(T.sum(input=y_hat[:, 0:2], axis=1), T.sum(input=y_hat[:, 2:], axis=1))
y_hat = T.stacklists([y_hat[:, 0] + y_hat[:, 1], y_hat[:, 2] + y_hat[:, 3] + y_hat[:, 4]]).T
y_hat = T.argmax(y_hat)
tag = 10 * y_hat + y
tneg = T.cast((T.shape(tag[(T.eq(tag, 0.)).nonzero()]))[0], config.floatX)
fneg = T.cast((T.shape(tag[(T.eq(tag, 1.)).nonzero()]))[0], config.floatX)
fpos = T.cast((T.shape(tag[(T.eq(tag, 10.)).nonzero()]))[0], config.floatX)
tpos = T.cast((T.shape(tag[(T.eq(tag, 11.)).nonzero()]))[0], config.floatX)
# assert fneg + fneg + fpos + tpos == 1380
# tneg.astype(config.floatX)
# fneg.astype(config.floatX)
# fpos.astype(config.floatX)
# tpos.astype(config.floatX)
speci = ifelse(T.eq((tneg + fpos), 0), np.float64(float('inf')), tneg / (tneg + fpos))
sensi = ifelse(T.eq((tpos + fneg), 0), np.float64(float('inf')), tpos / (tpos + fneg))
# keng die!!!
# if T.eq((tneg + fpos), 0):
# speci = float('inf')
# else:
# speci = tneg // (tneg + fpos)
# if T.eq((tpos + fneg), 0.):
# sensi = float('inf')
# else:
# sensi = tpos // (tpos + fneg)
# speci.astype(config.floatX)
# sensi.astype(config.floatX)
return [sensi, speci]
开发者ID:jackal092927,项目名称:pylearn2_med,代码行数:35,代码来源:test0.py
示例3: __init__
def __init__(self, factor=numpy.sqrt(2), decay=1.0, min_factor=None, padding=False, **kwargs):
super(ConvFMPLayer, self).__init__(**kwargs)
if min_factor is None:
min_factor = factor
factor = T.maximum(factor * (decay ** self.network.epoch), numpy.float32(min_factor))
sizes_raw = self.source.output_sizes
# handle size problems
if not padding:
padding = T.min(self.source.output_sizes / factor) <= 0
padding = theano.printing.Print(global_fn=maybe_print_pad_warning)(padding)
fixed_sizes = T.maximum(sizes_raw, T.cast(T.as_tensor(
[factor + self.filter_height - 1, factor + self.filter_width - 1]), 'float32'))
sizes = ifelse(padding, fixed_sizes, sizes_raw)
X_size = T.cast(T.max(sizes, axis=0), "int32")
def pad_fn(x_t, s):
x = T.alloc(numpy.cast["float32"](0), X_size[0], X_size[1], self.X.shape[3])
x = T.set_subtensor(x[:s[0], :s[1]], x_t[:s[0], :s[1]])
return x
fixed_X, _ = theano.scan(pad_fn, [self.X.dimshuffle(2, 0, 1, 3), T.cast(sizes_raw, "int32")])
fixed_X = fixed_X.dimshuffle(1, 2, 0, 3)
self.X = ifelse(padding, T.unbroadcast(fixed_X, 3), self.X)
conv_out = CuDNNConvHWBCOpValidInstance(self.X, self.W, self.b)
conv_out_sizes = self.conv_output_size_from_input_size(sizes)
self.output, self.output_sizes = fmp(conv_out, conv_out_sizes, T.cast(factor,'float32'))
开发者ID:rwth-i6,项目名称:returnn,代码行数:29,代码来源:NetworkTwoDLayer.py
示例4: _forward
def _forward(self):
eps = self.eps
param_size = (1, 1, self.n_output, 1, 1)
self.gamma = self.declare(param_size)
self.beta = self.declare(param_size)
mean = self.inpt.mean(axis=[0, 1, 3, 4], keepdims=False)
std = self.inpt.std(axis=[0, 1, 3, 4], keepdims=False)
self._setup_running_metrics(self.n_output)
self.running_mean.default_update = ifelse(
self.training,
(1.0 - self.alpha) * self.running_mean + self.alpha * mean,
self.running_mean
)
self.running_std.default_update = ifelse(
self.training,
(1.0 - self.alpha) * self.running_std + self.alpha * std,
self.running_std
)
# This will be optimized away, but ensures the running mean and the running std get updated.
# Reference: https://gist.github.com/f0k/f1a6bd3c8585c400c190#file-batch_norm-py-L86
mean += 0 * self.running_mean
std += 0 * self.running_std
use_mean = ifelse(self.training, mean, self.running_mean)
use_std = ifelse(self.training, std, self.running_std)
use_mean = use_mean.dimshuffle('x', 'x', 0, 'x', 'x')
use_std = use_std.dimshuffle('x', 'x', 0, 'x', 'x')
norm_inpt = (self.inpt - use_mean) / (use_std + eps)
self.output = self.gamma * norm_inpt + self.beta
开发者ID:jhzhou1111,项目名称:CNNbasedMedicalSegmentation,代码行数:34,代码来源:basic.py
示例5: AdaMaxAvg2
def AdaMaxAvg2(ws, objective, alpha=.01, beta1=.1, beta2=.001, beta3=0.01, n_accum=1):
if n_accum == 1:
return AdaMaxAvg(ws, objective, alpha, beta1, beta2, beta3)
print 'AdaMax_Avg2', 'alpha:',alpha,'beta1:',beta1,'beta2:',beta2,'beta3:',beta3,'n_accum:',n_accum
gs = G.ndict.T_grad(objective.sum(), ws, disconnected_inputs='raise')
new = OrderedDict()
from theano.ifelse import ifelse
it = G.sharedf(0.)
new[it] = it + 1
reset = T.eq(T.mod(it,n_accum), 0)
update = T.eq(T.mod(it,n_accum), n_accum-1)
ws_avg = []
for j in range(len(ws)):
w_avg = {}
for i in ws[j]:
_w = ws[j][i]
_g = gs[j][i]
#_g = T.switch(T.isnan(_g),T.zeros_like(_g),_g) #remove NaN's
mom1 = G.sharedf(_w.get_value() * 0.)
_max = G.sharedf(_w.get_value() * 0.)
w_avg[i] = G.sharedf(_w.get_value())
g_sum = G.sharedf(_w.get_value() * 0.)
new[g_sum] = ifelse(reset, _g, g_sum + _g)
new[mom1] = ifelse(update, (1-beta1) * mom1 + beta1 * new[g_sum], mom1)
new[_max] = ifelse(update, T.maximum((1-beta2)*_max, abs(new[g_sum]) + 1e-8), _max)
new[_w] = ifelse(update, _w + alpha * new[mom1] / new[_max], _w)
new[w_avg[i]] = ifelse(update, beta3 * new[_w] + (1.-beta3) * w_avg[i], w_avg[i])
ws_avg += [w_avg]
return new, ws_avg
开发者ID:gburt,项目名称:iaf,代码行数:34,代码来源:optim.py
示例6: call
def call(self, vals, mask=None):
block_out = vals[0]
prev_out = vals[1]
test_out = self.zi * block_out
return ifelse(self.test, test_out, ifelse(self.zi,block_out,prev_out))
开发者ID:caboj,项目名称:deep_learning_depth,代码行数:7,代码来源:resnet.py
示例7: gate_layer
def gate_layer(tparams, X_word, X_char, options, prefix, pretrain_mode, activ='lambda x: x', **kwargs):
"""
compute the forward pass for a gate layer
Parameters
----------
tparams : OrderedDict of theano shared variables, {parameter name: value}
X_word : theano 3d tensor, word input, dimensions: (num of time steps, batch size, dim of vector)
X_char : theano 3d tensor, char input, dimensions: (num of time steps, batch size, dim of vector)
options : dictionary, {hyperparameter: value}
prefix : string, layer name
pretrain_mode : theano shared scalar, 0. = word only, 1. = char only, 2. = word & char
activ : string, activation function: 'liner', 'tanh', or 'rectifier'
Returns
-------
X : theano 3d tensor, final vector, dimensions: (num of time steps, batch size, dim of vector)
"""
# compute gating values, Eq.(3)
G = tensor.nnet.sigmoid(tensor.dot(X_word, tparams[p_name(prefix, 'v')]) + tparams[p_name(prefix, 'b')][0])
X = ifelse(tensor.le(pretrain_mode, numpy.float32(1.)),
ifelse(tensor.eq(pretrain_mode, numpy.float32(0.)), X_word, X_char),
G[:, :, None] * X_char + (1. - G)[:, :, None] * X_word)
return eval(activ)(X)
开发者ID:nyu-dl,项目名称:gated_word_char_rlm,代码行数:25,代码来源:layers.py
示例8: more_complex_test
def more_complex_test():
notimpl = NotImplementedOp()
ifelseifelseif = IfElseIfElseIf()
x1 = T.scalar('x1')
x2 = T.scalar('x2')
c1 = T.scalar('c1')
c2 = T.scalar('c2')
t1 = ifelse(c1, x1, notimpl(x2))
t1.name = 't1'
t2 = t1 * 10
t2.name = 't2'
t3 = ifelse(c2, t2, x1 + t1)
t3.name = 't3'
t4 = ifelseifelseif(T.eq(x1, x2), x1, T.eq(x1, 5), x2, c2, t3, t3 + 0.5)
t4.name = 't4'
f = function([c1, c2, x1, x2], t4, mode=Mode(linker='vm',
optimizer='fast_run'))
if theano.config.vm.lazy is False:
try:
f(1, 0, numpy.array(10, dtype=x1.dtype), 0)
assert False
except NotImplementedOp.E:
pass
else:
print(f(1, 0, numpy.array(10, dtype=x1.dtype), 0))
assert f(1, 0, numpy.array(10, dtype=x1.dtype), 0) == 20.5
print('... passed')
开发者ID:Ambier,项目名称:Theano,代码行数:29,代码来源:test_lazy.py
示例9: get_aggregator
def get_aggregator(self):
initialized = shared_like(0.)
numerator_acc = shared_like(self.numerator)
denominator_acc = shared_like(self.denominator)
conditional_update_num = ifelse(initialized,
self.numerator + numerator_acc,
self.numerator)
conditional_update_den = ifelse(initialized,
self.denominator + denominator_acc,
self.denominator)
initialization_updates = [(numerator_acc,
tensor.zeros_like(numerator_acc)),
(denominator_acc,
tensor.zeros_like(denominator_acc)),
(initialized, 0.)]
accumulation_updates = [(numerator_acc,
conditional_update_num),
(denominator_acc,
conditional_update_den),
(initialized, 1.)]
aggregator = Aggregator(aggregation_scheme=self,
initialization_updates=initialization_updates,
accumulation_updates=accumulation_updates,
readout_variable=(numerator_acc /
denominator_acc))
return aggregator
开发者ID:Fdenpc,项目名称:blocks,代码行数:28,代码来源:aggregation.py
示例10: build_model
def build_model(self):
print '\n... building the model with unroll=%d, backroll=%d' \
% (self.source.unroll, self.source.backroll)
x = T.imatrix('x')
y = T.imatrix('y')
reset = T.scalar('reset')
hiddens = [h['init'] for h in self.hiddens.values()]
outputs_info = [None] * 3 + hiddens
[losses, probs, errors, hids], updates = \
theano.scan(self.step, sequences=[x, y], outputs_info=outputs_info)
loss = losses.sum()
error = errors.sum() / T.cast((T.neq(y, 255).sum()), floatX)
hidden_updates_train = []
hidden_updates_test = []
for h in self.hiddens.values():
h_train = ifelse(T.eq(reset, 0), \
hids[-1-self.source.backroll, :], T.ones_like(h['init']))
h_test = ifelse(T.eq(reset, 0), \
hids[-1, :], T.ones_like(h['init']))
hidden_updates_train.append((h['init'], h_train))
hidden_updates_test.append((h['init'], h_test))
updates = self.source.get_updates(loss, self.sgd_params)
updates += hidden_updates_train
rets = [loss, probs[-1, :], error]
mode = theano.Mode(linker='cvm')
train_model = theano.function([x, y, reset, self.lr], rets, \
updates=updates, mode=mode)
test_model = theano.function([x, y, reset], rets, \
updates=hidden_updates_test, mode=mode)
return train_model, test_model
开发者ID:ivanhe,项目名称:rnn,代码行数:30,代码来源:model.py
示例11: norm_col
def norm_col(w, h):
"""normalize the column vector w (Theano function).
Apply the invert normalization on h such that w.h does not change
Parameters
----------
w: Theano vector
vector to be normalised
h: Ttheano vector
vector to be normalised by the invert normalistation
Returns
-------
w : Theano vector with the same shape as w
normalised vector (w/norm)
h : Theano vector with the same shape as h
h*norm
"""
norm = w.norm(2, 0)
eps = 1e-12
size_norm = (T.ones_like(w)).norm(2, 0)
w = ifelse(T.gt(norm, eps),
w/norm,
(w+eps)/(eps*size_norm).astype(theano.config.floatX))
h = ifelse(T.gt(norm, eps),
h*norm,
(h*eps*size_norm).astype(theano.config.floatX))
return w, h
开发者ID:rserizel,项目名称:groupNMF,代码行数:28,代码来源:base.py
示例12: get_aggregator
def get_aggregator(self):
initialized = shared_like(0.)
numerator_acc = shared_like(self.numerator)
denominator_acc = shared_like(self.denominator)
# Dummy default expression to use as the previously-aggregated
# value, that has the same shape as the new result
numerator_zeros = tensor.as_tensor(self.numerator).zeros_like()
denominator_zeros = tensor.as_tensor(self.denominator).zeros_like()
conditional_update_num = self.numerator + ifelse(initialized,
numerator_acc,
numerator_zeros)
conditional_update_den = self.denominator + ifelse(initialized,
denominator_acc,
denominator_zeros)
initialization_updates = [(numerator_acc,
tensor.zeros_like(numerator_acc)),
(denominator_acc,
tensor.zeros_like(denominator_acc)),
(initialized, 0.)]
accumulation_updates = [(numerator_acc,
conditional_update_num),
(denominator_acc,
conditional_update_den),
(initialized, 1.)]
aggregator = Aggregator(aggregation_scheme=self,
initialization_updates=initialization_updates,
accumulation_updates=accumulation_updates,
readout_variable=(numerator_acc /
denominator_acc))
return aggregator
开发者ID:AdityoSanjaya,项目名称:blocks,代码行数:33,代码来源:aggregation.py
示例13: test_merge_ifs_true_false
def test_merge_ifs_true_false(self):
raise SkipTest("Optimization temporarily disabled")
x1 = tensor.scalar('x1')
x2 = tensor.scalar('x2')
y1 = tensor.scalar('y1')
y2 = tensor.scalar('y2')
w1 = tensor.scalar('w1')
w2 = tensor.scalar('w2')
c = tensor.iscalar('c')
out = ifelse(c,
ifelse(c, x1, x2) + ifelse(c, y1, y2) + w1,
ifelse(c, x1, x2) + ifelse(c, y1, y2) + w2)
f = theano.function([x1, x2, y1, y2, w1, w2, c], out,
allow_input_downcast=True)
assert len([x for x in f.maker.env.toposort()
if isinstance(x.op, IfElse)]) == 1
rng = numpy.random.RandomState(utt.fetch_seed())
vx1 = rng.uniform()
vx2 = rng.uniform()
vy1 = rng.uniform()
vy2 = rng.uniform()
vw1 = rng.uniform()
vw2 = rng.uniform()
assert numpy.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 1),
vx1 + vy1 + vw1)
assert numpy.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 0),
vx2 + vy2 + vw2)
开发者ID:glorotxa,项目名称:Theano,代码行数:29,代码来源:test_ifelse.py
示例14: _recursive_step
def _recursive_step(self, i, regs, tokens, seqs, back_routes, back_lens):
seq = seqs[i]
# Encoding
left, right, target = seq[0], seq[1], seq[2]
left_rep = ifelse(T.lt(left, 0), tokens[-left], regs[left])
right_rep = ifelse(T.lt(right, 0), tokens[-right], regs[right])
rep = self._encode_computation(left_rep, right_rep)
if self.deep:
inter_rep = rep
rep = self._deep_encode(inter_rep)
else:
inter_rep = T.constant(0)
new_regs = T.set_subtensor(regs[target], rep)
back_len = back_lens[i]
back_reps, lefts, rights = self._unfold(back_routes[i], new_regs, back_len)
gf_W_d1, gf_W_d2, gf_B_d1, gf_B_d2, distance, rep_gradient = self._unfold_gradients(back_reps, lefts, rights, back_routes[i],
tokens, back_len)
return ([rep, inter_rep, left_rep, right_rep, new_regs, rep_gradient, distance],
self.decode_optimizer.setup([self.W_d1, self.W_d2, self.B_d1, self.B_d2],
[gf_W_d1, gf_W_d2, gf_B_d1, gf_B_d2], method=self.optimization, beta=self.beta))
开发者ID:zomux,项目名称:nlpy,代码行数:28,代码来源:rae.py
示例15: decay
def decay(self):
updates = []
new_batch = ifelse(T.gt(self.batch, self.decay_batch), sharedX(0), self.batch+1)
new_lr = ifelse(T.gt(self.batch, self.decay_batch), self.lr*self.lr_decay_factor, self.lr)
updates.append((self.batch, new_batch))
updates.append((self.lr, new_lr))
return updates
开发者ID:ColaWithIce,项目名称:Mozi,代码行数:7,代码来源:learning_method.py
示例16: gradients
def gradients(cost, parameters, lr=0.001):
updates = []
c = 0
for param in parameters:
update = param - lr * theano.grad(cost, param)
if c == 1 or c == 3:
# update = t.minimum(t.abs_(update), np.pi) * (update / abs(update))
#
# update = t.maximum(update, 0)
# update = t.minimum(update, np.pi)
update = ifelse(t.lt(update, 0), np.pi * 2 - 0.001, update)
update = ifelse(t.gt(update, np.pi * 2), 0.001, update)
if c == 2:
update = ifelse(t.lt(update, 2), float(20), update)
elif c == 5 or c == 6:
update = t.maximum(update, -5)
update = t.minimum(update, 5)
updates.append((param, update))
c += 1
return updates
开发者ID:dlacombejr,项目名称:sparse_filtering,代码行数:33,代码来源:gabor_fit.py
示例17: test_pushout1
def test_pushout1(self):
raise SkipTest("Optimization temporarily disabled")
x1 = tensor.scalar('x1')
x2 = tensor.scalar('x2')
y1 = tensor.scalar('y1')
y2 = tensor.scalar('y2')
w1 = tensor.scalar('w1')
w2 = tensor.scalar('w2')
c = tensor.iscalar('c')
x, y = ifelse(c, (x1, y1), (x2, y2), name='f1')
z = ifelse(c, w1, w2, name='f2')
out = x * z * y
f = theano.function([x1, x2, y1, y2, w1, w2, c], out,
allow_input_downcast=True)
assert isinstance(f.maker.env.toposort()[-1].op, IfElse)
rng = numpy.random.RandomState(utt.fetch_seed())
vx1 = rng.uniform()
vx2 = rng.uniform()
vy1 = rng.uniform()
vy2 = rng.uniform()
vw1 = rng.uniform()
vw2 = rng.uniform()
assert numpy.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 1),
vx1 * vy1 * vw1)
assert numpy.allclose(f(vx1, vx2, vy1, vy2, vw1, vw2, 0),
vx2 * vy2 * vw2)
开发者ID:glorotxa,项目名称:Theano,代码行数:28,代码来源:test_ifelse.py
示例18: beta_div
def beta_div(X, W, H, beta):
"""Compute beta divergence D(X|WH)
Parameters
----------
X : Theano tensor
data
W : Theano tensor
Bases
H : Theano tensor
activation matrix
beta : Theano scalar
Returns
-------
div : Theano scalar
beta divergence D(X|WH)"""
div = ifelse(
T.eq(beta, 2),
T.sum(1. / 2 * T.power(X - T.dot(H, W), 2)),
ifelse(
T.eq(beta, 0),
T.sum(X / T.dot(H, W) - T.log(X / T.dot(H, W)) - 1),
ifelse(
T.eq(beta, 1),
T.sum(T.mul(X, (T.log(X) - T.log(T.dot(H, W)))) + T.dot(H, W) - X),
T.sum(1. / (beta * (beta - 1.)) * (T.power(X, beta) +
(beta - 1.) * T.power(T.dot(H, W), beta) -
beta * T.power(T.mul(X, T.dot(H, W)), (beta - 1)))))))
return div
开发者ID:rserizel,项目名称:beta_nmf,代码行数:31,代码来源:costs.py
示例19: momentum_normscaled
def momentum_normscaled(loss, all_params, lr, mom, batch_size, max_norm=np.inf, weight_decay=0.0,verbose=False):
updates = []
#all_grads = [theano.grad(loss, param) for param in all_params]
all_grads = theano.grad(gradient_clipper(loss),all_params)
grad_lst = [ T.sum( ( grad / float(batch_size) )**2 ) for grad in all_grads ]
grad_norm = T.sqrt( T.sum( grad_lst ))
if verbose:
grad_norm = theano.printing.Print('MOMENTUM GRAD NORM1:')(grad_norm)
all_grads = ifelse(T.gt(grad_norm, max_norm),
[grads*(max_norm / grad_norm) for grads in all_grads],
all_grads)
if verbose:
grad_lst = [ T.sum( ( grad / float(batch_size) )**2 ) for grad in all_grads ]
grad_norm = T.sqrt( T.sum( grad_lst ))
grad_norm = theano.printing.Print('MOMENTUM GRAD NORM2:')(grad_norm)
all_grads = ifelse(T.gt(grad_norm, np.inf),
[grads*(max_norm / grad_norm) for grads in all_grads],
all_grads)
for param_i, grad_i in zip(all_params, all_grads):
mparam_i = theano.shared(np.zeros(param_i.get_value().shape, dtype=theano.config.floatX))
v = mom * mparam_i - lr*(weight_decay*param_i + grad_i)
updates.append( (mparam_i, v) )
updates.append( (param_i, param_i + v) )
return updates
开发者ID:benathi,项目名称:nntools,代码行数:31,代码来源:LSTMTrainingFunctions.py
示例20: build_model
def build_model(shared_params, options, other_params):
"""
Build the complete neural network model and return the symbolic variables
"""
# symbolic variables
x = tensor.matrix(name="x", dtype=floatX)
y1 = tensor.iscalar(name="y1")
y2 = tensor.iscalar(name="y2")
# lstm cell
(ht, ct) = lstm_cell(x, shared_params, options, other_params) # gets the ht, ct
# softmax 1 i.e. frame type prediction
activation = tensor.dot(shared_params['softmax1_W'], ht).transpose() + shared_params['softmax1_b']
frame_pred = tensor.nnet.softmax(activation) # .transpose()
# softmax 2 i.e. gesture class prediction
#
# predicted probability for frame type
f_pred_prob = theano.function([x], frame_pred, name="f_pred_prob")
# predicted frame type
f_pred = theano.function([x], frame_pred.argmax(), name="f_pred")
# cost
cost = ifelse(tensor.eq(y1, 1), -tensor.log(frame_pred[0, 0] + options['log_offset'])
* other_params['begin_cost_factor'],
ifelse(tensor.eq(y1, 2), -tensor.log(frame_pred[0, 1] + options['log_offset'])
* other_params['end_cost_factor'],
ifelse(tensor.eq(y1, 3), -tensor.log(frame_pred[0, 2] + options['log_offset']),
tensor.abs_(tensor.log(y1)))), name='ifelse_cost')
# function for output of the currect lstm cell and softmax prediction
f_model_cell_output = theano.function([x], (ht, ct, frame_pred), name="f_model_cell_output")
# return the model symbolic variables and theano functions
return x, y1, y2, f_pred_prob, f_pred, cost, f_model_cell_output
开发者ID:inblueswithu,项目名称:Theano_Trail,代码行数:35,代码来源:lstm_model_3b.py
注:本文中的theano.ifelse.ifelse函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论