本文整理汇总了Python中theano.tensor.add函数的典型用法代码示例。如果您正苦于以下问题:Python add函数的具体用法?Python add怎么用?Python add使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了add函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: mcmc
def mcmc(ll, *frvs):
full_observations = dict(observations)
full_observations.update(dict([(rv, s) for rv, s in zip(free_RVs, frvs)]))
loglik = -full_log_likelihood(full_observations)
proposals = free_RVs_prop
H = tensor.add(*[tensor.sum(tensor.sqr(p)) for p in proposals])/2. + loglik
# -- this should be an inner loop
g = []
g.append(tensor.grad(loglik, frvs))
proposals = [(p - epsilon*gg[0]/2.) for p, gg in zip(proposals, g)]
rvsp = [(rvs + epsilon*rvp) for rvs,rvp in zip(frvs, proposals)]
full_observations = dict(observations)
full_observations.update(dict([(rv, s) for rv, s in zip(free_RVs, rvsp)]))
new_loglik = -full_log_likelihood(full_observations)
gnew = []
gnew.append(tensor.grad(new_loglik, rvsp))
proposals = [(p - epsilon*gn[0]/2.) for p, gn in zip(proposals, gnew)]
# --
Hnew = tensor.add(*[tensor.sum(tensor.sqr(p)) for p in proposals])/2. + new_loglik
dH = Hnew - H
accept = tensor.or_(dH < 0., U < tensor.exp(-dH))
return [tensor.switch(accept, -new_loglik, ll)] + \
[tensor.switch(accept, p, f) for p, f in zip(rvsp, frvs)], \
{}, theano.scan_module.until(accept)
开发者ID:helson73,项目名称:MonteTheano,代码行数:34,代码来源:sample.py
示例2: t_forward_step
def t_forward_step(self, mask, cur_w_in_sig, pre_out_sig, pre_cell_sig, w_ifco, b_ifco,ln_b1,ln_s1, ln_b2,ln_s2,ln_b3,ln_s3,
t_n_out):
cur_w_in_sig_ln = self.ln(cur_w_in_sig, ln_b1, ln_s1)
pre_w_out_sig = T.dot(pre_out_sig, w_ifco)
pre_w_out_sig_ln = self.ln(pre_w_out_sig, ln_b2, ln_s2)
preact = T.add(cur_w_in_sig_ln, pre_w_out_sig_ln, b_ifco)
inner_act = self.activation # T.nnet.hard_sigmoid #T.tanh # T.nnet.hard_sigmoid T.tanh
gate_act = self.sigmoid() # T.nnet.hard_sigmoid #T.nnet.sigmoid
# Input Gate
ig_t1 = gate_act(preact[:, 0:t_n_out])
# Forget Gate
fg_t1 = gate_act(preact[:, 1 * t_n_out:2 * t_n_out])
# Cell State
cs_t1 = T.add(T.mul(fg_t1, pre_cell_sig), T.mul(ig_t1, inner_act(preact[:, 2 * t_n_out:3 * t_n_out])))
mask = T.addbroadcast(mask, 1)
cs_t1 = mask * cs_t1 + (1. - mask) * pre_cell_sig
cs_t1_ln = self.ln(cs_t1, ln_b3, ln_s3)
# Output Gate
og_t1 = gate_act(preact[:, 3 * t_n_out:4 * t_n_out])
# Output LSTM
out_sig = T.mul(og_t1, inner_act(cs_t1_ln))
out_sig = mask * out_sig + (1. - mask) * pre_out_sig
return [out_sig, cs_t1]
开发者ID:dzungcamlang,项目名称:recnet,代码行数:34,代码来源:ln_reccurent_layer.py
示例3: __init__
def __init__(self, rng, input3, initial_hidden, n_in, n_hidden):
self.input3 = input3
self.initial_hidden = initial_hidden
matrix1 = numpy.asarray( rng.uniform(
low = - numpy.sqrt(6./(n_in + n_hidden)),
high = numpy.sqrt(6./(n_in + n_hidden)),
size = (n_in, n_hidden)), dtype = 'float32')
self.W1 = theano.shared(value = matrix1, name = 'W1')
matrix2 = numpy.asarray( rng.uniform(
low = - numpy.sqrt(6./(n_hidden + n_hidden)),
high = numpy.sqrt(6./(n_hidden + n_hidden)),
size = (n_hidden, n_hidden)), dtype = 'float32')
self.W2 = theano.shared(value = matrix2, name = 'W2')
b_values = numpy.zeros((n_hidden,), dtype= 'float32')
self.b = theano.shared(value = b_values, name ='b')
#self.intial_hidden = theano.shared(numpy.zeros(n_hidden, ), dtype = 'float32', name = 'intial_hidden')
self.output = T.tanh( T.add(T.add(T.dot(self.input3, self.W1), T.dot(self.initial_hidden, self.W2)), self.b))
self.params = [self.W2, self.b, self.W1]
开发者ID:andersonhaynes,项目名称:nnlm,代码行数:27,代码来源:mlprnn.py
示例4: output
def output(self, train):
X = self.get_input(train) # shape: (nb_samples, time (padded with zeros at the end), input_dim)
# new shape: (time, nb_samples, input_dim) -> because theano.scan iterates over main dimension
X = X.dimshuffle((1, 0, 2))
xf = self.activation(T.dot(X, self.W_if) + self.b_if)
xb = self.activation(T.dot(X, self.W_ib) + self.b_ib)
b_o=self.b_o
b_on= T.repeat(T.repeat(b_o.reshape((1,self.output_dim)),X.shape[0],axis=0).reshape((1,X.shape[0],self.output_dim)),X.shape[1],axis=0)
# Iterate forward over the first dimension of the x array (=time).
outputs_f, updates_f = theano.scan(
self._step, # this will be called with arguments (sequences[i], outputs[i-1], non_sequences[i])
sequences=xf, # tensors to iterate over, inputs to _step
# initialization of the output. Input to _step with default tap=-1.
outputs_info=alloc_zeros_matrix(X.shape[1], self.output_dim),
non_sequences=[self.W_ff,self.b_f], # static inputs to _step
truncate_gradient=self.truncate_gradient
)
# Iterate backward over the first dimension of the x array (=time).
outputs_b, updates_b = theano.scan(
self._step, # this will be called with arguments (sequences[i], outputs[i-1], non_sequences[i])
sequences=xb, # tensors to iterate over, inputs to _step
# initialization of the output. Input to _step with default tap=-1.
outputs_info=alloc_zeros_matrix(X.shape[1], self.output_dim),
non_sequences=[self.W_bb,self.b_b], # static inputs to _step
truncate_gradient=self.truncate_gradient,
go_backwards=True # Iterate backwards through time
)
#return outputs_f.dimshuffle((1, 0, 2))
if self.return_sequences:
return T.add(T.tensordot(T.add(outputs_f.dimshuffle((1, 0, 2)), outputs_b[::-1].dimshuffle((1,0,2))),self.W_o,[[2],[0]]),b_on)
return T.concatenate((outputs_f[-1], outputs_b[0]))
开发者ID:CVML,项目名称:CRCN,代码行数:32,代码来源:recurrent.py
示例5: output
def output(self, train):
X = self.get_input(train)
X = X.dimshuffle((1,0,2))
if self.is_entity:
Entity = X[-1:].dimshuffle(1,0,2)
X = X[:-1]
b_y = self.b_y
b_yn = T.repeat(T.repeat(b_y.reshape((1,self.output_dim)),X.shape[0],axis=0).reshape((1,X.shape[0],self.output_dim)), X.shape[1], axis=0)
xif = T.dot(X, self.W_if) + self.b_if
xib = T.dot(X, self.W_ib) + self.b_ib
xff = T.dot(X, self.W_ff) + self.b_ff
xfb = T.dot(X, self.W_fb) + self.b_fb
xcf = T.dot(X, self.W_cf) + self.b_cf
xcb = T.dot(X, self.W_cb) + self.b_cb
xof = T.dot(X, self.W_of) + self.b_of
xob = T.dot(X, self.W_ob) + self.b_ob
[outputs_f, memories_f], updates_f = theano.scan(
self._step,
sequences=[xif, xff, xof, xcf],
outputs_info=[
alloc_zeros_matrix(X.shape[1], self.output_dim),
alloc_zeros_matrix(X.shape[1], self.output_dim)
],
non_sequences=[self.U_if, self.U_ff, self.U_of, self.U_cf],
truncate_gradient=self.truncate_gradient
)
[outputs_b, memories_b], updates_b = theano.scan(
self._step,
sequences=[xib, xfb, xob, xcb],
outputs_info=[
alloc_zeros_matrix(X.shape[1], self.output_dim),
alloc_zeros_matrix(X.shape[1], self.output_dim)
],
non_sequences=[self.U_ib, self.U_fb, self.U_ob, self.U_cb],
truncate_gradient=self.truncate_gradient
)
if self.return_sequences:
y = T.add(T.add(
T.tensordot(outputs_f.dimshuffle((1,0,2)), self.W_yf, [[2],[0]]),
T.tensordot(outputs_b[::-1].dimshuffle((1,0,2)), self.W_yb, [[2],[0]])),
b_yn)
# y = T.add(T.tensordot(
# T.add(outputs_f.dimshuffle((1, 0, 2)),
# outputs_b[::-1].dimshuffle((1,0,2))),
# self.W_y,[[2],[0]]),b_yn)
if self.is_entity:
return T.concatenate([y, Entity], axis=1)
else:
return y
return T.concatenate((outputs_f[-1], outputs_b[0]))
开发者ID:whyjay,项目名称:CRCN,代码行数:58,代码来源:recurrent.py
示例6: f1_score
def f1_score(self, y):
n_total = y.shape[0]
n_relevant_documents_predicted = T.sum(T.eq(T.ones(self.y_pred.shape), self.y_pred))
two_vector = T.add(T.ones(self.y_pred.shape), T.ones(self.y_pred.shape))
n_relevant_predicted_correctly = T.sum(T.eq(T.add(self.y_pred, y), two_vector))
precision = T.true_div(n_relevant_predicted_correctly, n_relevant_documents_predicted)
recall = T.true_div(n_relevant_predicted_correctly, n_total)
f1_score = T.mul(2.0, T.true_div(T.mul(precision, recall), T.add(precision, recall)))
return [f1_score, precision, recall]
开发者ID:ericrincon,项目名称:Deep-Learning-NLP,代码行数:9,代码来源:LogisticRegression.py
示例7: __call__
def __call__(self,M,*inputs):
summands = [Xi.dot(Wiz) for (Xi,Wiz) in zip(inputs,self.Wizs)] + [M.dot(self.Wmz),self.bz]
z = TT.nnet.sigmoid(TT.add(*summands))
summands = [Xi.dot(Wir) for (Xi,Wir) in zip(inputs,self.Wirs)] + [M.dot(self.Wmr),self.br]
r = TT.nnet.sigmoid(TT.add(*summands))
summands = [Xi.dot(Wim) for (Xi,Wim) in zip(inputs,self.Wims)] + [(r*M).dot(self.Wmm),self.bm]
Mtarg = TT.tanh(TT.add(*summands)) #pylint: disable=E1111
Mnew = (1-z)*M + z*Mtarg
return Mnew
开发者ID:SFPD,项目名称:rlreloaded,代码行数:12,代码来源:rnn.py
示例8: scan_y
def scan_y(cur_step):
# Compute pairwise affinities
sum_y = tensor.sum(tensor.square(y_arg), 1)
num = 1 / (1 + tensor.add(tensor.add(-2 * tensor.dot(y_arg, y_arg.T), sum_y).T, sum_y))
num = tensor.set_subtensor(num[range(n),range(n)], 0)
Q = num / tensor.sum(num)
Q = tensor.maximum(Q, 1e-12)
PQ = p_arg - Q
def inner(pq_i, num_i, y_arg_i):
return tensor.sum(tensor.tile(pq_i * num_i, (no_dims, 1)).T * (y_arg_i - y_arg), 0)
dy_arg, _ = theano.scan(inner,
outputs_info = None,
sequences = [PQ, num, y_arg])
dy_arg = tensor.cast(dy_arg,FLOATX)
# dy_arg = y_arg
momentum = ifelse(tensor.lt(cur_step, 20),
initial_momentum_f,
final_momentum_f)
indexsa = tensor.neq((dy_arg>0), (iy_arg>0)).nonzero()
indexsb = tensor.eq((dy_arg>0), (iy_arg>0)).nonzero()
resulta = tensor.set_subtensor(gains_arg[indexsa], gains_arg[indexsa]+0.2)
resultb = tensor.set_subtensor(resulta[indexsb], resulta[indexsb]*0.8)
indexs_min = (resultb<min_gain_f).nonzero()
new_gains_arg = tensor.set_subtensor(resultb[indexs_min], min_gain_f)
# last step in simple version of SNE
new_iy_arg = momentum * iy_arg - eta * (new_gains_arg * dy_arg)
new_y_arg = y_arg + new_iy_arg
new_y_arg = new_y_arg - tensor.tile(tensor.mean(new_y_arg, 0), (n, 1))
# # Compute current value of cost function
# if (cur_step + 1) % 10 == 0:
# C = tensor.sum(p_arg * tensor.log(p_arg / Q))
# print "Iteration ", (cur_step + 1), ": error is ", C
# Stop lying about P-values
# new_p_arg = p_arg
# if cur_step == 2:
# new_p_arg = p_arg / 4
# p_arg = p_arg / 4
# p_arg.set_value(p_arg.get_value / 4)
new_p_arg = ifelse(tensor.eq(cur_step, 100),
p_arg / 4,
p_arg)
return [(y_arg,new_y_arg),(iy_arg,new_iy_arg), (gains_arg,new_gains_arg),(p_arg,new_p_arg)]
开发者ID:jichen3000,项目名称:codes,代码行数:52,代码来源:theano_tsne_using_updates_and_gpu.py
示例9: logp
def logp(self, value):
if self.constant:
x = tt.add(*[self.rho[i + 1] * value[self.p - (i + 1):-(i + 1)] for i in range(self.p)])
eps = value[self.p:] - self.rho[0] - x
else:
if self.p == 1:
x = self.rho * value[:-1]
else:
x = tt.add(*[self.rho[i] * value[self.p - (i + 1):-(i + 1)] for i in range(self.p)])
eps = value[self.p:] - x
innov_like = Normal.dist(mu=0.0, tau=self.tau).logp(eps)
init_like = self.init.logp(value[:self.p])
return tt.sum(innov_like) + tt.sum(init_like)
开发者ID:aloctavodia,项目名称:pymc3,代码行数:15,代码来源:timeseries.py
示例10: logp
def logp(self, z):
factors = ([tt.sum(var.logpt)for var in self.model.basic_RVs] +
[tt.sum(var) for var in self.model.potentials])
p = self.approx.to_flat_input(tt.add(*factors))
p = theano.clone(p, {self.input: z})
return p
开发者ID:taku-y,项目名称:pymc3,代码行数:7,代码来源:opvi.py
示例11: _mean_h_given_v
def _mean_h_given_v(self, v):
alpha = self.usable_alpha()
return tensor.add(
self.b,
-0.5 * ldot(v * v, self.Lambda) if self.Lambda else 0,
self.mu * ldot(v, self.W),
0.5 * tensor.sqr(ldot(v, self.W))/alpha)
开发者ID:jaberg,项目名称:ssrbm,代码行数:7,代码来源:rbm.py
示例12: __call__
def __call__(self, X):
XY = X.dot(X.T)
x2 = tt.sum(X ** 2, axis=1).dimshuffle(0, 'x')
X2e = tt.repeat(x2, X.shape[0], axis=1)
H = X2e + X2e.T - 2. * XY
V = tt.sort(H.flatten())
length = V.shape[0]
# median distance
m = tt.switch(tt.eq((length % 2), 0),
# if even vector
tt.mean(V[((length // 2) - 1):((length // 2) + 1)]),
# if odd vector
V[length // 2])
h = .5 * m / tt.log(floatX(H.shape[0]) + floatX(1))
# RBF
Kxy = tt.exp(-H / h / 2.0)
# Derivative
dxkxy = -tt.dot(Kxy, X)
sumkxy = tt.sum(Kxy, axis=1).dimshuffle(0, 'x')
dxkxy = tt.add(dxkxy, tt.mul(X, sumkxy)) / h
return Kxy, dxkxy
开发者ID:aasensio,项目名称:pymc3,代码行数:26,代码来源:test_functions.py
示例13: __init__
def __init__(self, **kwargs):
super(ResNet, self).__init__(**kwargs)
assert self.status[1] == 2, "Only accept 2 sources!"
assert self.status[0], "Only accept cnn layers!"
x = self.sources[0]
f_x = self.sources[1]
time = x.output.shape[0]
batch = x.output.shape[1]
self.input = T.add(x.Output, f_x.Output)
self.Output = T.nnet.relu(self.input)
if self.attrs['batch_norm']:
self.Output = self.batch_norm(
h=self.Output.reshape(
(self.Output.shape[0],
self.Output.shape[1] * self.Output.shape[2] * self.Output.shape[3])
),
dim=self.attrs['n_out'],
force_sample=self.force_sample
).reshape(self.Output.shape)
output2 = self.Output.dimshuffle(0, 2, 3, 1) # (time*batch, out-row, out-col, nb feature maps)
self.output = output2.reshape((time, batch, output2.shape[1] * output2.shape[2] * output2.shape[3])) # (time, batch, out-dim)
开发者ID:rwth-i6,项目名称:returnn,代码行数:27,代码来源:NetworkCNNLayer.py
示例14: sum_logdets
def sum_logdets(self):
dets = [self.logdet]
current = self
while not current.isroot:
current = current.parent
dets.append(current.logdet)
return tt.add(*dets)
开发者ID:aasensio,项目名称:pymc3,代码行数:7,代码来源:flows.py
示例15: hmc_updates
def hmc_updates(positions, stepsize, avg_acceptance_rate, final_pos, accept,
target_acceptance_rate, stepsize_inc, stepsize_dec,
stepsize_min, stepsize_max, avg_acceptance_slowness):
# broadcast `accept` scalar to tensor with the same dimensions as final_pos.
accept_matrix = accept.dimshuffle(0, *(('x',) * (final_pos.ndim - 1)))
# if accept is True, update to `final_pos` else stay put
new_positions = TT.switch(accept_matrix, final_pos, positions)
## STEPSIZE UPDATES ##
# if acceptance rate is too low, our sampler is too "noisy" and we reduce
# the stepsize. If it is too high, our sampler is too conservative, we can
# get away with a larger stepsize (resulting in better mixing).
_new_stepsize = TT.switch(avg_acceptance_rate > target_acceptance_rate,
stepsize * stepsize_inc, stepsize * stepsize_dec)
# maintain stepsize in [stepsize_min, stepsize_max]
new_stepsize = TT.clip(_new_stepsize, stepsize_min, stepsize_max)
# perform exponential moving average
mean_dtype = theano.scalar.upcast(accept.dtype, avg_acceptance_rate.dtype)
new_acceptance_rate = TT.add(
avg_acceptance_slowness * avg_acceptance_rate,
(1.0 - avg_acceptance_slowness) * accept.mean(dtype=mean_dtype))
return [(positions, new_positions), (stepsize, new_stepsize), (avg_acceptance_rate, new_acceptance_rate)]
开发者ID:swordli,项目名称:DeepLearning,代码行数:30,代码来源:HybridMonteCarlo.py
示例16: test_softmax_optimizations_w_bias2
def test_softmax_optimizations_w_bias2(self):
x = tensor.matrix('x')
b = tensor.vector('b')
c = tensor.vector('c')
one_of_n = tensor.lvector('one_of_n')
op = crossentropy_categorical_1hot
env = gof.Env(
[x, b, c, one_of_n],
[op(softmax(T.add(x,b,c)), one_of_n)])
assert env.outputs[0].owner.op == op
print 'BEFORE'
for node in env.toposort():
print node.op
print '----'
theano.compile.mode.optdb.query(
theano.compile.mode.OPT_FAST_RUN).optimize(env)
print 'AFTER'
for node in env.toposort():
print node.op
print '===='
assert len(env.toposort()) == 3
assert str(env.outputs[0].owner.op) == 'OutputGuard'
assert env.outputs[0].owner.inputs[0].owner.op == crossentropy_softmax_argmax_1hot_with_bias
开发者ID:lberrada,项目名称:Theano,代码行数:28,代码来源:test_nnet.py
示例17: variational_gradient_estimate
def variational_gradient_estimate(
vars, model, minibatch_RVs=[], minibatch_tensors=[], total_size=None,
n_mcsamples=1, random_seed=20090425):
"""Calculate approximate ELBO and its (stochastic) gradient.
"""
theano.config.compute_test_value = 'ignore'
shared = make_shared_replacements(vars, model)
# Correction sample size
r = 1 if total_size is None else \
float(total_size) / minibatch_tensors[0].shape[0]
other_RVs = set(model.basic_RVs) - set(minibatch_RVs)
factors = [r * var.logpt for var in minibatch_RVs] + \
[var.logpt for var in other_RVs] + model.potentials
logpt = tt.add(*map(tt.sum, factors))
[logp], inarray = join_nonshared_inputs([logpt], vars, shared)
uw = dvector('uw')
uw.tag.test_value = np.concatenate([inarray.tag.test_value,
inarray.tag.test_value])
elbo = elbo_t(logp, uw, inarray, n_mcsamples=n_mcsamples, random_seed=random_seed)
# Gradient
grad = gradient(elbo, [uw])
return grad, elbo, shared, uw
开发者ID:abojchevski,项目名称:pymc3,代码行数:30,代码来源:advi.py
示例18: _lmul
def _lmul(self, x, T):
if T:
if len(self.col_shape())>1:
x2 = x.flatten(2)
else:
x2 = x
n_rows = x2.shape[0]
offset = 0
xWlist = []
assert len(self._col_sizes) == len(self._Wlist)
for size, W in zip(self._col_sizes, self._Wlist):
# split the output rows into pieces
x_s = x2[:,offset:offset+size]
# multiply each piece by one transform
xWlist.append(
W.lmul(
x_s.reshape(
(n_rows,)+W.col_shape()),
T))
offset += size
# sum the results
rval = tensor.add(*xWlist)
else:
# multiply the input by each transform
xWlist = [W.lmul(x,T).flatten(2) for W in self._Wlist]
# join the resuls
rval = tensor.join(1, *xWlist)
return rval
开发者ID:Alienfeel,项目名称:pylearn2,代码行数:28,代码来源:linear.py
示例19: ctc_loss
def ctc_loss(y_true, y_pred):
def path_probs(predict, y_sym):
pred_y = predict[:, y_sym]
rr = recurrence_relation(y_sym.shape[0])
def step(p_curr, p_prev,rr):
return p_curr * T.dot(p_prev, rr)
probabilities, _ = theano.scan(
step,
sequences=[pred_y],
outputs_info=[T.eye(y_sym.shape[0])[0]],
non_sequences=[rr]
)
return probabilities
y_sym_a=T.argmax(y_true,axis=-1)
n=T.cast(T.add(T.mul(2, y_true.shape[0] - T.sum(y_true[:,-1])),1),'int16')
y_sym=T.cast(y_sym_a[:n],'int16')
y_pred = T.clip(y_pred, epsilon, 1.0-epsilon)
forward_probs = path_probs(y_pred, y_sym)
backward_probs = path_probs(y_pred[::-1], y_sym[::-1])[::-1, ::-1]
probs = forward_probs * backward_probs / y_pred[:, y_sym]
total_probs = T.sum(probs)
#total_probs=T.sum(forward_probs[-1,-2:])
return -T.log(total_probs)
开发者ID:Michlong,项目名称:Keras_extraFunctions,代码行数:28,代码来源:ctc_loss.py
示例20: add_merge_MultiBatchBeamGradAddOp
def add_merge_MultiBatchBeamGradAddOp(node):
if node.op != T.add: return False
if len(node.inputs) < 2: return False
grad_op_idx = None
grad_op_v = None
grad_op = None
for i, input in enumerate(node.inputs):
if input.owner and isinstance(input.owner.op, MultiBatchBeamGradAddOp):
grad_op = input.owner.op
if not grad_op.inplace: # we cannot merge when we operate inplace on it
grad_op_v = input
grad_op_idx = i
break
if grad_op_idx is None: return False
sum_inputs = [node.inputs[i] for i in range(len(node.inputs)) if i != grad_op_idx]
if grad_op.zero_with_shape:
# Make new grad_op without zero_with_shape.
kwargs = {k: getattr(grad_op, k) for k in grad_op.__props__}
kwargs["zero_with_shape"] = False
grad_op = grad_op.__class__(**kwargs)
else:
old_grad_op_input0 = grad_op_v.owner.inputs[0]
sum_inputs = [old_grad_op_input0] + sum_inputs
assert len(sum_inputs) > 0
if len(sum_inputs) == 1:
new_grad_op_input0 = sum_inputs[0]
else:
new_grad_op_input0 = T.add(*sum_inputs)
new_grad_op_inputs = [new_grad_op_input0] + grad_op_v.owner.inputs[1:]
new_v = grad_op(*new_grad_op_inputs)
return [new_v]
开发者ID:atuxhe,项目名称:returnn,代码行数:31,代码来源:MultiBatchBeam.py
注:本文中的theano.tensor.add函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论