本文整理汇总了Python中theano.tensor.advanced_inc_subtensor1函数的典型用法代码示例。如果您正苦于以下问题:Python advanced_inc_subtensor1函数的具体用法?Python advanced_inc_subtensor1怎么用?Python advanced_inc_subtensor1使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了advanced_inc_subtensor1函数的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_incsub_f16
def test_incsub_f16():
shp = (3, 3)
shared = gpuarray_shared_constructor
xval = np.arange(np.prod(shp), dtype='float16').reshape(shp) + 1
yval = np.empty((2,) + shp[1:], dtype='float16')
yval[:] = 2
x = shared(xval, name='x')
y = tensor.tensor(dtype='float16',
broadcastable=(False,) * len(shp),
name='y')
expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])
f = theano.function([y], expr, mode=mode_with_gpu)
assert sum([isinstance(node.op, GpuAdvancedIncSubtensor1)
for node in f.maker.fgraph.toposort()]) == 1
rval = f(yval)
rep = xval.copy()
np.add.at(rep, [[0, 2]], yval)
assert np.allclose(rval, rep)
expr = tensor.inc_subtensor(x[1:], y)
f = theano.function([y], expr, mode=mode_with_gpu)
assert sum([isinstance(node.op, GpuIncSubtensor)
for node in f.maker.fgraph.toposort()]) == 1
rval = f(yval)
rep = xval.copy()
rep[1:] += yval
assert np.allclose(rval, rep)
开发者ID:Thrandis,项目名称:Theano,代码行数:27,代码来源:test_subtensor.py
示例2: test_advinc_subtensor1
def test_advinc_subtensor1():
""" Test the second case in the opt local_gpu_advanced_incsubtensor1 """
shared = cuda.shared_constructor
# shared = tensor.shared
xval = numpy.asarray([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype="float32")
yval = numpy.asarray([[10, 10, 10], [10, 10, 10]], dtype="float32")
x = shared(xval, name="x")
y = T.fmatrices("y")
expr = T.advanced_inc_subtensor1(x, y, [0, 2])
f = theano.function([y], expr, mode=mode_with_gpu)
assert sum([isinstance(node.op, cuda.GpuAdvancedIncSubtensor1) for node in f.maker.env.toposort()]) == 1
assert numpy.allclose(f(yval), [[11.0, 12.0, 13.0], [4.0, 5.0, 6.0], [17.0, 18.0, 19.0]])
开发者ID:olivierverdier,项目名称:Theano,代码行数:12,代码来源:test_basic_ops.py
示例3: test_advinc_subtensor1
def test_advinc_subtensor1():
""" Test the second case in the opt local_gpu_advanced_incsubtensor1 """
for shp in [(3, 3), (3, 3, 3)]:
shared = gpuarray_shared_constructor
xval = numpy.arange(numpy.prod(shp), dtype='float32').reshape(shp) + 1
yval = numpy.empty((2,) + shp[1:], dtype='float32')
yval[:] = 10
x = shared(xval, name='x')
y = tensor.tensor(dtype='float32',
broadcastable=(False,) * len(shp),
name='y')
expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])
f = theano.function([y], expr, mode=mode_with_gpu)
assert sum([isinstance(node.op, GpuAdvancedIncSubtensor1)
for node in f.maker.fgraph.toposort()]) == 1
rval = f(yval)
rep = xval.copy()
rep[[0, 2]] += yval
assert numpy.allclose(rval, rep)
开发者ID:317070,项目名称:Theano,代码行数:19,代码来源:test_subtensor.py
示例4: test_deterministic_flag
def test_deterministic_flag():
shp = (3, 4)
for dtype1, dtype2 in [('float32', 'int8')]:
shared = gpuarray_shared_constructor
xval = np.arange(np.prod(shp), dtype=dtype1).reshape(shp) + 1
yval = np.empty((2,) + shp[1:], dtype=dtype2)
yval[:] = 10
x = shared(xval, name='x')
y = tensor.tensor(dtype=yval.dtype,
broadcastable=(False,) * len(yval.shape),
name='y')
expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])
f = theano.function([y], expr, mode=mode_with_gpu)
assert sum([isinstance(node.op, GpuAdvancedIncSubtensor1)
for node in f.maker.fgraph.toposort()]) == 1
rval = f(yval)
rep = xval.copy()
np.add.at(rep, [[0, 2]], yval)
assert np.allclose(rval, rep)
开发者ID:Thrandis,项目名称:Theano,代码行数:19,代码来源:test_subtensor.py
示例5: test_advinc_subtensor1_vector_scalar
def test_advinc_subtensor1_vector_scalar():
# Test the case where x is a vector and y a scalar
shp = (3,)
for dtype1, dtype2 in [('float32', 'int8'), ('float32', 'float64')]:
shared = gpuarray_shared_constructor
xval = np.arange(np.prod(shp), dtype=dtype1).reshape(shp) + 1
yval = np.asarray(10, dtype=dtype2)
x = shared(xval, name='x')
y = tensor.tensor(dtype=yval.dtype,
broadcastable=(False,) * len(yval.shape),
name='y')
expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])
f = theano.function([y], expr, mode=mode_with_gpu)
assert sum([isinstance(node.op, GpuAdvancedIncSubtensor1_dev20)
for node in f.maker.fgraph.toposort()]) == 1
rval = f(yval)
rep = xval.copy()
rep[[0, 2]] += yval
assert np.allclose(rval, rep)
开发者ID:MarcCote,项目名称:Theano,代码行数:19,代码来源:test_subtensor.py
示例6: test_advinc_subtensor1_dtype
def test_advinc_subtensor1_dtype():
# Test the mixed dtype case
shp = (3, 4)
for dtype1, dtype2 in [('float32', 'int8'), ('float32', 'float64')]:
shared = gpuarray_shared_constructor
xval = numpy.arange(numpy.prod(shp), dtype=dtype1).reshape(shp) + 1
yval = numpy.empty((2,) + shp[1:], dtype=dtype2)
yval[:] = 10
x = shared(xval, name='x')
y = tensor.tensor(dtype=yval.dtype,
broadcastable=(False,) * len(yval.shape),
name='y')
expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])
f = theano.function([y], expr, mode=mode_with_gpu)
assert sum([isinstance(node.op, GpuAdvancedIncSubtensor1_dev20)
for node in f.maker.fgraph.toposort()]) == 1
rval = f(yval)
rep = xval.copy()
rep[[0, 2]] += yval
assert numpy.allclose(rval, rep)
开发者ID:bouthilx,项目名称:Theano,代码行数:20,代码来源:test_subtensor.py
注:本文中的theano.tensor.advanced_inc_subtensor1函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论