本文整理汇总了Python中theano.tensor.zeros函数的典型用法代码示例。如果您正苦于以下问题:Python zeros函数的具体用法?Python zeros怎么用?Python zeros使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了zeros函数的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: initial_glimpses
def initial_glimpses(self, batch_size, attended):
return ([tensor.zeros((batch_size, self.attended_dim))]
+ 2 * [tensor.concatenate([
tensor.ones((batch_size, 1)),
tensor.zeros((batch_size, attended.shape[0] - 1))],
axis=1)]
+ [tensor.zeros((batch_size,), dtype='int64')])
开发者ID:ZhangAustin,项目名称:attention-lvcsr,代码行数:7,代码来源:attention.py
示例2: initial_states
def initial_states(self, batch_size, *args, **kwargs):
r"""Return initial states for an application call.
Default implementation assumes that the recurrent application
method is called `apply`. It fetches the state names
from `apply.states` and a returns a zero matrix for each of them.
:class:`SimpleRecurrent`, :class:`LSTM` and :class:`GatedRecurrent`
override this method with trainable initial states initialized
with zeros.
Parameters
----------
batch_size : int
The batch size.
\*args
The positional arguments of the application call.
\*\*kwargs
The keyword arguments of the application call.
"""
result = []
for state in self.apply.states:
dim = self.get_dim(state)
if dim == 0:
result.append(tensor.zeros((batch_size,)))
else:
result.append(tensor.zeros((batch_size, dim)))
return result
开发者ID:SwordYork,项目名称:blocks,代码行数:29,代码来源:base.py
示例3: pad
def pad(inp, padding):
if all([padval == 0 for padval in pyk.flatten(padding)]):
return inp
if inp.ndim == 4:
# Make a zero tensor of the right shape
zt = T.zeros(shape=(inp.shape[0], inp.shape[1], inp.shape[2]+sum(padding[0]), inp.shape[3]+sum(padding[1])))
# Compute assignment slice
[[ystart, ystop], [xstart, xstop]] = [[padval[0], (-padval[1] if padval[1] != 0 else None)]
for padval in padding]
# Assign subtensor
padded = T.set_subtensor(zt[:, :, ystart:ystop, xstart:xstop], inp)
return padded
elif inp.ndim == 5:
# Make a zero tensor of the right shape
zt = T.zeros(shape=(inp.shape[0], inp.shape[1]+sum(padding[2]), inp.shape[2], inp.shape[3]+sum(padding[0]),
inp.shape[4]+sum(padding[1])))
# Compute assignment slice
[[ystart, ystop], [xstart, xstop], [zstart, zstop]] = [[padval[0], (-padval[1] if padval[1] != 0 else None)]
for padval in padding]
# Assign subtensor
padded = T.set_subtensor(zt[:, zstart:zstop, :, ystart:ystop, xstart:xstop], inp)
return padded
else:
raise NotImplementedError("Padding is only implemented for 4 and 5 dimensional tensors.")
开发者ID:abailoni,项目名称:greedy_CNN,代码行数:27,代码来源:netutils.py
示例4: spatial_2d_padding
def spatial_2d_padding(x, padding=(1, 1), dim_ordering='th'):
'''Pad the 2nd and 3rd dimensions of a 4D tensor
with "padding[0]" and "padding[1]" (resp.) zeros left and right.
'''
input_shape = x.shape
if dim_ordering == 'th':
output_shape = (input_shape[0],
input_shape[1],
input_shape[2] + 2 * padding[0],
input_shape[3] + 2 * padding[1])
output = T.zeros(output_shape)
indices = (slice(None),
slice(None),
slice(padding[0], input_shape[2] + padding[0]),
slice(padding[1], input_shape[3] + padding[1]))
elif dim_ordering == 'tf':
output_shape = (input_shape[0],
input_shape[1] + 2 * padding[0],
input_shape[2] + 2 * padding[1],
input_shape[3])
output = T.zeros(output_shape)
indices = (slice(None),
slice(padding[0], input_shape[1] + padding[0]),
slice(padding[1], input_shape[2] + padding[1]),
slice(None))
else:
raise Exception('Invalid dim_ordering: ' + dim_ordering)
return T.set_subtensor(output[indices], x)
开发者ID:sfwlily,项目名称:keras,代码行数:29,代码来源:theano_backend.py
示例5: __init__
def __init__(self, rng, input, mask, n_in, n_h):
# Init params
self.W_i = theano.shared(gauss_weight(rng, n_in, n_h), 'W_i', borrow=True)
self.W_f = theano.shared(gauss_weight(rng, n_in, n_h), 'W_f', borrow=True)
self.W_c = theano.shared(gauss_weight(rng, n_in, n_h), 'W_c', borrow=True)
self.W_o = theano.shared(gauss_weight(rng, n_in, n_h), 'W_o', borrow=True)
self.U_i = theano.shared(gauss_weight(rng, n_h), 'U_i', borrow=True)
self.U_f = theano.shared(gauss_weight(rng, n_h), 'U_f', borrow=True)
self.U_c = theano.shared(gauss_weight(rng, n_h), 'U_c', borrow=True)
self.U_o = theano.shared(gauss_weight(rng, n_h), 'U_o', borrow=True)
self.b_i = theano.shared(numpy.zeros((n_h,), dtype=config.floatX),
'b_i', borrow=True)
self.b_f = theano.shared(numpy.zeros((n_h,), dtype=config.floatX),
'b_f', borrow=True)
self.b_c = theano.shared(numpy.zeros((n_h,), dtype=config.floatX),
'b_c', borrow=True)
self.b_o = theano.shared(numpy.zeros((n_h,), dtype=config.floatX),
'b_o', borrow=True)
self.params = [self.W_i, self.W_f, self.W_c, self.W_o,
self.U_i, self.U_f, self.U_c, self.U_o,
self.b_i, self.b_f, self.b_c, self.b_o]
outputs_info = [T.zeros((input.shape[1], n_h)),
T.zeros((input.shape[1], n_h))]
rval, updates = theano.scan(self._step,
sequences=[mask, input],
outputs_info=outputs_info)
# self.output is in the format (length, batchsize, n_h)
self.output = rval[0]
开发者ID:1991lin,项目名称:data-science-ipython-notebooks,代码行数:35,代码来源:rnn_precompile.py
示例6: lllistool
def lllistool(i, inp, func):
if func == LSTM:
NUMS[i+1] *= 4
sdim = DIMS[i]
if func == SimpleRecurrent or func == LSTM:
sdim = DIMS[i] + DIMS[i+1]
l = Linear(input_dim=DIMS[i], output_dim=DIMS[i+1] * NUMS[i+1],
weights_init=IsotropicGaussian(std=sdim**(-0.5)),
biases_init=IsotropicGaussian(std=sdim**(-0.5)),
name='Lin{}'.format(i))
l.initialize()
if func == SimpleRecurrent:
gong = func(dim=DIMS[i+1], activation=Rectifier(), weights_init=IsotropicGaussian(std=sdim**(-0.5)))
gong.initialize()
ret = gong.apply(l.apply(inp))
elif func == LSTM:
gong = func(dim=DIMS[i+1], activation=Tanh(), weights_init=IsotropicGaussian(std=sdim**(-0.5)))
gong.initialize()
print(inp)
ret, _ = gong.apply(
l.apply(inp),
T.zeros((inp.shape[1], DIMS[i+1])),
T.zeros((inp.shape[1], DIMS[i+1])),
)
elif func == SequenceGenerator:
gong = func(
readout=None,
transition=SimpleRecurrent(dim=100, activation=Rectifier(), weights_init=IsotropicGaussian(std=0.1)))
ret = None
elif func == None:
ret = l.apply(inp)
else:
gong = func()
ret = gong.apply(l.apply(inp))
return ret
开发者ID:AlphaLambdaMuPi,项目名称:DLAlpha,代码行数:35,代码来源:rnn.py
示例7: initial_glimpses
def initial_glimpses(self, name, batch_size, sequence):
if name == "glimpses":
return tensor.zeros((batch_size, self.sequence_dim))
elif name == "weights":
return tensor.zeros((batch_size, sequence.shape[0]))
else:
raise ValueError("Unknown glimpse name {}".format(name))
开发者ID:jych,项目名称:blocks,代码行数:7,代码来源:attention.py
示例8: plotUpdate
def plotUpdate(self,updates):
'''
>>>get update info of each layer
>>>type updates: dict
>>>para updates: update dictionary
'''
maxdict=T.zeros(shape=(self.deep*2+1,))
mindict=T.zeros(shape=(self.deep*2+1,))
meandict=T.zeros(shape=(self.deep*2+1,))
for i in xrange(self.deep):
updw=updates[self.layers[i].w]-self.layers[i].w
maxdict=T.set_subtensor(maxdict[2*i],T.max(updw))
mindict=T.set_subtensor(mindict[2*i],T.min(updw))
meandict=T.set_subtensor(meandict[2*i],T.mean(updw))
updb=updates[self.layers[i].b]-self.layers[i].b
maxdict=T.set_subtensor(maxdict[2*i+1],T.max(updb))
mindict=T.set_subtensor(mindict[2*i+1],T.min(updb))
meandict=T.set_subtensor(meandict[2*i+1],T.mean(updb))
updw=updates[self.classifier.w]-self.classifier.w
maxdict=T.set_subtensor(maxdict[self.deep*2],T.max(updw))
mindict=T.set_subtensor(mindict[self.deep*2],T.min(updw))
meandict=T.set_subtensor(meandict[self.deep*2],T.mean(updw))
return [maxdict,mindict,meandict]
开发者ID:wolfhu,项目名称:RCNNSentence,代码行数:25,代码来源:dcnnModel.py
示例9: get_output
def get_output(self, train=False):
X = self.get_input().dimshuffle(1, 0, 2)
Vx = T.dot(X, self.V)
x_init = T.zeros((X.shape[1], self.input_dim))
s_init = T.zeros((X.shape[1], self.output_dim))
u_init = T.zeros((X.shape[1], self.causes_dim))
outputs, uptdates = scan(
self._step,
sequences=[X, Vx],
outputs_info=[x_init, s_init, u_init],
non_sequences=self.params,
truncate_gradient=self.truncate_gradient)
if self.return_mode == 'both':
return T.concatenate([outputs[1], outputs[2]],
axis=-1)
elif self.return_mode == 'states':
out = outputs[1]
elif self.return_mode == 'causes':
out = outputs[2]
else:
raise ValueError("return_model {0} not valid. Choose "
"'both', 'states' or 'causes'".format(
self.return_mode))
if self.return_sequences:
return out.dimshuffle(1, 0, 2)
else:
return out[-1]
开发者ID:jfsantos,项目名称:seya,代码行数:29,代码来源:tensor.py
示例10: __init__
def __init__(self, n_in, n_out, layers, decoder=linear.Linear, itype='int32'
, solver=solvers.RMSprop(0.01)):
self.data = T.matrix(dtype=itype)
self.x = self.data[:-1] # T.matrix(dtype=itype)
self.y = self.data[1:] # T.matrix(dtype=itype)
self.mask = T.matrix(dtype='int32')
self.weights = []
k,b = self.x.shape
y_layer = self.x
self.y_layers = []
m = n_in
for n in layers:
layer = lstm.LSTM(m, n)
self.weights.append(layer.weights)
y0 = T.zeros((b, n))
c0 = T.zeros((b, n))
y_layer, _ = layer.scanl(y0, c0, y_layer)
self.y_layers.append(y_layer)
m = n
decode = decoder(m, n_out)
self.weights.append(decode.weights)
yh = decode(y_layer)
self.yh = softmax.softmax(yh)
self.loss_t = T.sum(crossent.crossent(self.yh, self.y)*self.mask[1:])
self.correct = T.sum(T.eq(T.argmax(self.yh, axis=2), self.y)*self.mask[1:])
self.count = T.sum(self.mask[1:])
self.solver = solver
#compile theano functions
self._loss = theano.function([self.data, self.mask], [self.loss_t, self.correct, self.count])
self._activations = theano.function([self.data], self.y_layers+[self.yh], givens={self.x:self.data})
开发者ID:hhcho,项目名称:rnn,代码行数:30,代码来源:charrnn.py
示例11: function
def function(self, input_tensor):
init_hs = T.zeros((input_tensor.shape[1], self.output_neurons))
init_cs = T.zeros((input_tensor.shape[1], self.output_neurons))
lstm_out_1, _ = theano.scan(fn=lambda a,b,c: self.__lstm_wrapper(a,b,c,self.d_forward, go_forwards=True),
outputs_info=[init_hs,init_cs],
sequences=input_tensor,
non_sequences=None)
lstm_out_2, _ = theano.scan(fn=lambda a,b,c: self.__lstm_wrapper(a,b,c,self.d_backward, go_forwards=False),
outputs_info=[init_hs,init_cs],
sequences=input_tensor,
non_sequences=None)
lstm_out_3, _ = theano.scan(fn=lambda a,b,c: self.__lstm_wrapper(a,b,c,self.u_forward, go_forwards=True),
outputs_info=[init_hs,init_cs],
sequences=input_tensor,
non_sequences=None,
go_backwards=True)
lstm_out_4, _ = theano.scan(fn=lambda a,b,c: self.__lstm_wrapper(a,b,c,self.u_backward, go_forwards=False),
outputs_info=[init_hs,init_cs],
sequences=input_tensor,
non_sequences=None,
go_backwards=True)
return T.concatenate((lstm_out_1[0],
lstm_out_2[0],
lstm_out_3[0][::-1],
lstm_out_4[0][::-1]), axis=2)
开发者ID:MichSchli,项目名称:Speciale,代码行数:31,代码来源:network_ops.py
示例12: calc_CER
def calc_CER(self, resultseq, targetseq, resultseq_mask=None, targetseq_mask=None):
"""
Calculate the character error rate (CER) given ground truth 'targetseq' and CTC decoding output 'resultseq'
:param resultseq (T1, B)
:param resultseq_mask (T1, B)
:param targetseq (T2, B)
:param targetseq_mask (T2, B)
:return: CER scalar
"""
if resultseq_mask is None:
resultseq_mask = tensor.ones_like(resultseq)
if targetseq_mask is None:
targetseq_mask = tensor.ones_like(targetseq)
def step(result_seq, target_seq, result_seq_mask, target_seq_mask, TE, TG):
L1 = tensor.cast(result_seq_mask.sum(), 'int32')
L2 = tensor.cast(target_seq_mask.sum(), 'int32')
d = self._editdist(result_seq[0:L1], target_seq[0:L2])
TE += d
TG += target_seq_mask.sum()
return TE, TG
outputs, updates = theano.scan(fn=step,
sequences=[resultseq.T, targetseq.T, resultseq_mask.T, targetseq_mask.T],
outputs_info=[tensor.zeros(1), tensor.zeros(1)],
name='calc_CER')
TE, TG = outputs[0][-1], outputs[1][-1]
CER = TE/TG
return CER, TE, TG
开发者ID:DingKe,项目名称:Precise-CTC,代码行数:29,代码来源:ctc_theano.py
示例13: best_path_decode
def best_path_decode(self, scorematrix, scorematrix_mask=None, blank_symbol=None):
"""
Computes the best path by simply choosing most likely label at each timestep
:param scorematrix: (T, C+1, B)
:param scorematrix_mask: (T, B)
:param blank_symbol: = C by default
:return: resultseq (T, B), resultseq_mask(T, B)
Speed much slower than pure python version (normally ~40 times on HTR tasks)
"""
bestlabels = tensor.argmax(scorematrix, axis=1) # (T, B)
T, Cp, B = scorematrix.shape
resultseq, resultseq_mask = tensor.zeros([T, B], dtype=scorematrix.dtype)-1, tensor.zeros([T, B], dtype=scorematrix.dtype)
if blank_symbol is None:
blank_symbol = Cp - 1
if scorematrix_mask is None:
scorematrix_mask = tensor.ones([T, B], dtype=scorematrix.dtype)
def step(labelseq, labelseq_mask, idx, resultseq, resultseq_mask, blank_symbol):
seqlen = tensor.cast(labelseq_mask.sum(), 'int32')
labelseq = self._remove_adjdup(labelseq[0:seqlen])
labelseq = self._remove_value(labelseq, blank_symbol)
seqlen2 = labelseq.size
resultseq = tensor.set_subtensor(resultseq[0:seqlen2, idx], labelseq)
resultseq_mask = tensor.set_subtensor(resultseq_mask[0:seqlen2, idx], tensor.ones_like(labelseq))
idx += 1
return idx, resultseq, resultseq_mask
outputs, updates = theano.scan(fn = step,
sequences=[bestlabels.T, scorematrix_mask.T],
outputs_info=[0, resultseq, resultseq_mask],
non_sequences=[blank_symbol],
name='decode_scan')
resultseq, resultseq_mask = outputs[1][-1], outputs[2][-1]
return resultseq, resultseq_mask
开发者ID:DingKe,项目名称:Precise-CTC,代码行数:34,代码来源:ctc_theano.py
注:本文中的theano.tensor.zeros函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论