本文整理汇总了Python中theano.gradient.grad_undefined函数的典型用法代码示例。如果您正苦于以下问题:Python grad_undefined函数的具体用法?Python grad_undefined怎么用?Python grad_undefined使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了grad_undefined函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: grad
def grad(self, inputs, output_grads):
gradients = CPUCTCGrad()(*inputs)
return [
gradients,
grad_undefined(self, 1, inputs[1]),
grad_undefined(self, 2, inputs[2]),
grad_undefined(self, 3, inputs[3]),
]
开发者ID:ZhangAustin,项目名称:ctc,代码行数:8,代码来源:ctc.py
示例2: grad
def grad(self, inputs, output_grads):
# self.gradients.shape = [seqLen, batchSize, outputSize]
# output_grads[0].shape = [batchSize] (one cost per sequence)
# So, reshape output_grads to [1, batchSize, 1] for broadcasting
output_grad = output_grads[0].reshape( (1, -1, 1) )
return [output_grad * self.gradients,
grad_undefined(self, 1, inputs[1]),
grad_undefined(self, 2, inputs[2])]
开发者ID:mcf06,项目名称:theano_ctc,代码行数:8,代码来源:ctc_base.py
示例3: grad
def grad(self, inputs, output_grads):
# Enable gradient computation
self.computeGradient.set_value(np.asarray([1], dtype=np.int32))
return [self.gradients,
grad_undefined(self, 1, inputs[1]),
grad_undefined(self, 2, inputs[2]),
grad_undefined(self, 3, inputs[3]),
grad_undefined(self, 4, inputs[4])]
开发者ID:trungnt13,项目名称:theano_ctc,代码行数:9,代码来源:cpu_ctc.py
示例4: L_op
def L_op(self, inputs, outputs, output_grads):
assert self.compute_grad and len(outputs) == 2
gradients = outputs[1]
assert gradients is not None
grad_op = output_grads[0]
total_grad = T.basic.batched_dot(grad_op, gradients.dimshuffle(1, 0, 2)).dimshuffle(1, 0, 2)
return [total_grad,
grad_undefined(self, 1, inputs[1]),
grad_undefined(self, 2, inputs[2])]
开发者ID:rezaprimasatya,项目名称:Theano,代码行数:10,代码来源:ctc.py
示例5: grad
def grad(self, inputs, grads):
o, W, h, inputIdx, outputIdx = inputs
go = grads[0]
Wgrad = gpu_sparse_block_outer(W.zeros_like(), h, go, inputIdx, outputIdx)
hgrad = gpu_sparse_block_gemv(h.zeros_like(), W.dimshuffle((1, 0, 3, 2)), go, outputIdx, inputIdx)
return [
go,
Wgrad,
hgrad,
grad_undefined(self, 3, inputIdx, "grad of inputIdx makes no sense"),
grad_undefined(self, 4, outputIdx, "grad of outputIdx makes no sense"),
]
开发者ID:poolio,项目名称:Theano,代码行数:13,代码来源:blocksparse.py
示例6: grad
def grad(self, inputs, grads):
o, W, h, inputIdx, outputIdx = inputs
go = grads[0]
outer_fun = SparseBlockOuter(self.inplace)
gemv_fun = SparseBlockGemv(self.inplace)
Wgrad = outer_fun(W.zeros_like(), h, go, inputIdx, outputIdx)
hgrad = gemv_fun(h.zeros_like(), W.dimshuffle((1, 0, 3, 2)),
go, outputIdx, inputIdx)
return [go, Wgrad, hgrad,
grad_undefined(self, 3, inputIdx,
"grad of inputIdx makes no sense"),
grad_undefined(self, 4, outputIdx,
"grad of outputIdx makes no sense")]
开发者ID:12190143,项目名称:Theano,代码行数:15,代码来源:blocksparse.py
示例7: L_op
def L_op(self, inputs, outputs, output_grads):
# Gradients computed by Op
assert self.compute_grad and len(outputs) == 2
gradients = outputs[1]
assert gradients is not None
# Gradients of original function, to compose chain rule
grad_op = output_grads[0]
grad_shuffle = GpuDimShuffle(input_broadcastable=(False, False, False,),
new_order=(1, 0, 2))(gradients)
grad_bdot = T.basic.batched_dot(grad_op, grad_shuffle)
grad_shuffle_reverse = GpuDimShuffle(input_broadcastable=(False, False, False,),
new_order=(1, 0, 2))(grad_bdot)
return [grad_shuffle_reverse,
grad_undefined(self, 1, inputs[1]),
grad_undefined(self, 2, inputs[2])]
开发者ID:DEVESHTARASIA,项目名称:Theano,代码行数:16,代码来源:ctc.py
示例8: grad
def grad(self, inp, grads):
outs = self(*inp)
grad_op = ROIPoolingGradOp(
self.pooled_h, self.pooled_w, self.spatial_scale)
data_grad = grad_op(*(inp + [outs[1], grads[0]]))
return [data_grad, grad_undefined(self, 1, inp[1])]
开发者ID:hongyuanzhu,项目名称:theano-roi-pooling,代码行数:7,代码来源:roi_pooling.py
示例9: grad
def grad(self, inputs, grads):
o, W, h, inputIdx, outputIdx = inputs
go = grads[0]
# might revise that interface to not have a huge output
Wgrad = sparse_block_outer_ss(W.zeros_like(),
h, go, inputIdx, outputIdx)
hgrad = sparse_block_gemv_ss(h.zeros_like(),
W.dimshuffle((1, 0, 3, 2)),
go,
outputIdx, inputIdx)
return [go, Wgrad, hgrad,
grad_undefined(self, 3, inputIdx,
"grad of inputIdx makes no sense"),
grad_undefined(self, 4, outputIdx,
"grad of outputIdx makes no sense")]
开发者ID:Jerryzcn,项目名称:Theano,代码行数:16,代码来源:blocksparse.py
示例10: grad
def grad(self, inp, grads):
x, neib_shape, neib_step = inp
gz, = grads
if self.mode in ['valid', 'ignore_borders']:
if (neib_shape is neib_step or
neib_shape == neib_step or
# Theano Constant == do not compare the data
# the equals function do that.
(hasattr(neib_shape, "equals") and
neib_shape.equals(neib_step))):
return [neibs2images(gz, neib_shape, x.shape, mode=self.mode),
grad_undefined(self, 1, neib_shape),
grad_undefined(self, 2, neib_step)]
return [grad_not_implemented(self, 0, x),
grad_undefined(self, 1, neib_shape),
grad_undefined(self, 2, neib_step)]
开发者ID:c0g,项目名称:Theano,代码行数:17,代码来源:neighbours.py
示例11: grad
def grad(self, inputs, output_grads):
a, axis = inputs
indices = self.__get_argsort_indices(a, axis)
inp_grad = output_grads[0][tuple(indices)]
axis_grad = grad_undefined(
self, 1, axis,
"The gradient of sort is not defined "
"with respect to the integer axes itself")
return [inp_grad, axis_grad]
开发者ID:Theano,项目名称:Theano,代码行数:9,代码来源:sort.py
示例12: grad
def grad(self, inputs, grads):
logger.warning("BernoulliOp.grad(...) called")
prob = inputs[0]
noise = inputs[1]
#import ipdb; ipdb.set_trace()
#g0 = prob.zeros_like().astype(theano.config.floatX)
g0 = prob * grads[0]
g1 = grad_undefined(self, 1, noise)
return [g0, g1]
开发者ID:jbornschein,项目名称:bihm,代码行数:11,代码来源:distributions.py
示例13: L_op
def L_op(self, inputs, outputs, out_grads):
x, k = inputs
k_grad = grad_undefined(self, 1, k, 'topk: k is not differentiable')
if not (self.return_indices or self.return_values):
x_grad = grad_undefined(
self, 0, x, 'topk: cannot get gradient'
' without both indices and values')
else:
x_shp = theano.tensor.shape(x)
z_grad = out_grads[0]
ndim = x.ndim
axis = self.axis % ndim
grad_indices = [
arange(x_shp[i]).dimshuffle([0] + ['x'] * (ndim - i - 1))
if i != axis else outputs[-1] for i in range(ndim)]
x_grad = x.zeros_like(dtype=z_grad.dtype)
x_grad = set_subtensor(x_grad[tuple(grad_indices)], z_grad)
return [x_grad, k_grad]
开发者ID:Theano,项目名称:Theano,代码行数:20,代码来源:sort.py
示例14: grad
def grad(self, inputs, output_gradients):
V, W, b, d = inputs
dCdH, = output_gradients
# make all of these ops support broadcasting of scalar b to vector b and eplace the zeros_like in all their grads
# print dCdH.broadcastable
# print "dCdH.broadcastable"
# quit(-1)
# dCdH = printing.Print("dCdH = ",["shape"])
# Make sure the broadcasting pattern of the gradient is the the same
# as the initial variable
dCdV = theano.tensor.nnet.convTransp3D(
W, T.zeros_like(V[0, 0, 0, 0, :]), d, dCdH, V.shape[1:4])
dCdV = T.patternbroadcast(dCdV, V.broadcastable)
WShape = W.shape
dCdW = theano.tensor.nnet.convGrad3D(V, d, WShape, dCdH)
dCdW = T.patternbroadcast(dCdW, W.broadcastable)
dCdb = T.sum(dCdH, axis=(0, 1, 2, 3))
dCdb = T.patternbroadcast(dCdb, b.broadcastable)
dCdd = grad_undefined(
self, 3, inputs[3],
"The gradient of Conv3D with respect to the convolution"
" stride is undefined because Conv3D is only defined for"
" integer strides.")
if 'name' in dir(dCdH) and dCdH.name is not None:
dCdH_name = dCdH.name
else:
dCdH_name = 'anon_dCdH'
if 'name' in dir(V) and V.name is not None:
V_name = V.name
else:
V_name = 'anon_V'
if 'name' in dir(W) and W.name is not None:
W_name = W.name
else:
W_name = 'anon_W'
if 'name' in dir(b) and b.name is not None:
b_name = b.name
else:
b_name = 'anon_b'
dCdV.name = 'Conv3D_dCdV(dCdH=' + dCdH_name + ',V=' + V_name + ')'
dCdW.name = ('Conv3D_dCdW(dCdH=' + dCdH_name + ',V=' + V_name +
',W=' + W_name + ')')
dCdb.name = ('Conv3D_dCdb(dCdH=' + dCdH_name + ',V=' + V_name +
',W=' + W_name + ',b=' + b_name + ')')
return [dCdV, dCdW, dCdb, dCdd]
开发者ID:ALISCIFP,项目名称:Segmentation,代码行数:52,代码来源:Conv3D.py
示例15: grad
def grad(self, inp, grads):
axis, tensors = inp[0], inp[1:]
gz, = grads
rval = [grad_undefined(self, 0, axis)]
out = ConcatenateGrad()(gz, axis, *tensors)
if not isinstance(out, list):
out = [out]
rval = rval + out
return rval
开发者ID:pcs-theano,项目名称:Theano,代码行数:13,代码来源:mkl_concatenate.py
示例16: grad
def grad(self, inputs, output_gradients):
C, d, WShape, B = inputs
dLdA, = output_gradients
z = T.zeros_like(C[0, 0, 0, 0, :])
dLdC = convTransp3D(dLdA, z, d, B, C.shape[1:4])
# d actually does affect the outputs, so it's not disconnected
dLdd = grad_undefined(self, 1, d)
# The shape of the weights doesn't affect the output elements
dLdWShape = DisconnectedType()()
dLdB = conv3D(C, dLdA, T.zeros_like(B[0, 0, 0, 0, :]), d)
return [dLdC, dLdd, dLdWShape, dLdB]
开发者ID:amanrajdce,项目名称:Theano,代码行数:13,代码来源:ConvGrad3D.py
示例17: grad
def grad(self, inp, grads):
x, neib_shape, neib_step = inp
gz, = grads
if self.mode in ['valid', 'ignore_borders']:
if (neib_shape is neib_step or
neib_shape == neib_step or
# Theano Constant == do not compare the data
# the equals function do that.
(hasattr(neib_shape, "equals") and
neib_shape.equals(neib_step))):
return [neibs2images(gz, neib_shape, x.shape, mode=self.mode),
grad_undefined(self, 1, neib_shape),
grad_undefined(self, 2, neib_step)]
if self.mode in ['valid']:
# Iterate over neighborhood positions, summing contributions.
def pos2map(pidx, pgz, prior_result, neib_shape, neib_step):
'''
Helper function that adds gradient contribution from a single
neighborhood position i,j.
pidx = Index of position within neighborhood.
pgz = Gradient of shape (batch_size*num_channels*neibs)
prior_result = Shape (batch_size, num_channnels, rows, cols)
neib_shape = Number of rows, cols in a neighborhood.
neib_step = Step sizes from image2neibs.
'''
nrows, ncols = neib_shape
rstep, cstep = neib_step
batch_size, num_channels, rows, cols = prior_result.shape
i = pidx // ncols
j = pidx - (i * ncols)
# This position does not touch some img pixels in valid mode.
result_indices = prior_result[:, :,
i:(rows - nrows + i + 1):rstep,
j:(cols - ncols + j + 1):cstep]
newshape = (batch_size, num_channels) + \
((rows - nrows) // rstep + 1,) + \
((cols - ncols) // cstep + 1,)
return T.inc_subtensor(result_indices, pgz.reshape(newshape))
indices = T.arange(neib_shape[0] * neib_shape[1])
pgzs = gz.dimshuffle((1, 0))
result, _ = theano.scan(fn=pos2map,
sequences=[indices, pgzs],
outputs_info=T.zeros(x.shape),
non_sequences=[neib_shape, neib_step])
grad_input = result[-1]
return [grad_input,
grad_undefined(self, 1, neib_shape),
grad_undefined(self, 2, neib_step)]
return [grad_not_implemented(self, 0, x),
grad_undefined(self, 1, neib_shape),
grad_undefined(self, 2, neib_step)]
开发者ID:Faruk-Ahmed,项目名称:Theano,代码行数:54,代码来源:neighbours.py
示例18: grad
def grad(self, inputs, output_gradients):
W, b, d, H, RShape = inputs
dCdR, = output_gradients
dCdH = theano.tensor.nnet.conv3D(dCdR, W, T.zeros_like(H[0, 0, 0, 0, :]), d)
WShape = W.shape
dCdW = theano.tensor.nnet.convGrad3D(dCdR, d, WShape, H)
dCdb = T.sum(dCdR, axis=(0, 1, 2, 3))
# not differentiable, since d affects the output elements
dCdd = grad_undefined(self, 2, d)
# disconnected, since RShape just determines the output shape
dCdRShape = DisconnectedType()()
if 'name' in dir(dCdR) and dCdR.name is not None:
dCdR_name = dCdR.name
else:
dCdR_name = 'anon_dCdR'
if 'name' in dir(H) and H.name is not None:
H_name = H.name
else:
H_name = 'anon_H'
if 'name' in dir(W) and W.name is not None:
W_name = W.name
else:
W_name = 'anon_W'
if 'name' in dir(b) and b.name is not None:
b_name = b.name
else:
b_name = 'anon_b'
dCdW.name = ('ConvTransp3D_dCdW.H=' + H_name + ',dCdR=' + dCdR_name +
',W=' + W_name)
dCdb.name = ('ConvTransp3D_dCdb.H=' + H_name + ',dCdR=' + dCdR_name +
',W=' + W_name + ',b=' + b_name)
dCdH.name = 'ConvTransp3D_dCdH.H=' + H_name + ',dCdR=' + dCdR_name
return [dCdW, dCdb, dCdd, dCdH, dCdRShape]
开发者ID:ALISCIFP,项目名称:Segmentation,代码行数:39,代码来源:ConvTransp3D.py
示例19: grad
def grad(self, inp, grads):
return [grad_undefined(self, i, inp[i])
for i in xrange(2)]
开发者ID:DEVESHTARASIA,项目名称:Theano,代码行数:3,代码来源:test_cgpukernelbase.py
示例20: grad
def grad(self,inp,grads):
x,indx,=inp
gz, = grads
return [GpuAssigner()(x,indx,gz), grad_undefined(self,1,inp[1])]
开发者ID:hydercps,项目名称:hred-qs,代码行数:4,代码来源:theano_extensions.py
注:本文中的theano.gradient.grad_undefined函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论