本文整理汇总了Python中theano.sandbox.cuda.shared_constructor函数的典型用法代码示例。如果您正苦于以下问题:Python shared_constructor函数的具体用法?Python shared_constructor怎么用?Python shared_constructor使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了shared_constructor函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: cmp
def cmp(a_shp, b_shp):
a0 = my_rand(*a_shp)
a = tcn.shared_constructor(a0, 'a')
cval = my_rand(a_shp[0], b_shp[1])
c = tcn.shared_constructor(cval.copy(), 'c')
b = tcn.fmatrix('b')
b2 = tcn.fmatrix('b2')
f = pfunc(
[b, b2],
[tensor.dot(a, b2) + c],
updates=[(a, tensor.dot(a, b) + c)],
mode=mode_with_gpu)
assert any([node.op == tcn.blas.gpu_gemm_no_inplace
for node in f.maker.fgraph.toposort()])
bval = my_rand(*b_shp)
bval2 = my_rand(*b_shp)
rval = f(bval, bval2)
assert numpy.allclose(numpy.dot(a0, bval) + cval, a.get_value())
assert numpy.allclose(numpy.dot(a0, bval2) + cval, rval)
# Try with a matrix equal to a0, but with strides in both dims
a.set_value(a0)
a.set_value(
a.get_value(borrow=True,
return_internal_type=True)[::-1, ::-1],
borrow=True)
f(bval, bval2)
开发者ID:gyenney,项目名称:Tools,代码行数:31,代码来源:test_blas.py
示例2: test_elemwise_composite_support_code
def test_elemwise_composite_support_code():
"""
This was generating an error at compile time.
Commit 3d1690fa346103594356ecaeceeb2c6757b45d2b fixed that.
"""
X = tcn.shared_constructor(value=numpy.zeros((100, 10), dtype="float32"),
name='X')
W = tcn.shared_constructor(value=numpy.zeros((10, 1), dtype="float32"),
name='W')
U = T.dot(X, W)
Y = tcn.shared_constructor(value=numpy.zeros((100, 1), dtype="float32"),
name='Y')
P = T.exp(-(Y - U) ** 2)
epsilon = numpy.asarray(0.001, dtype="float32")
NLL = -T.mean(T.log(P + epsilon)) # SupportCodeError
G = T.grad(NLL, wrt=[W])
backup = theano.config.warn.identify_1pexp_bug
theano.config.warn.identify_1pexp_bug = False
try:
f_grad = theano.function(inputs=[], outputs=G, mode=mode_with_gpu)
finally:
theano.config.warn.identify_1pexp_bug = backup
f_grad()
topo = f_grad.maker.env.toposort()
assert sum([isinstance(node.op, T.Elemwise) for node in topo]) == 1
assert sum([isinstance(node.op, tcn.GpuElemwise) for node in topo]) == 1
开发者ID:gexarcha,项目名称:Theano,代码行数:28,代码来源:test_basic_ops.py
示例3: test_opt_gpujoin_joinvectors_elemwise_then_minusone
def test_opt_gpujoin_joinvectors_elemwise_then_minusone():
# from a bug in gpu normal sampling
_a = numpy.asarray([1, 2, 3, 4], dtype='float32')
_b = numpy.asarray([5, 6, 7, 8], dtype='float32')
a = cuda.shared_constructor(_a)
b = cuda.shared_constructor(_b)
a_prime = tensor.cos(a)
b_prime = tensor.sin(b)
c = tensor.join(0, a_prime, b_prime)
d = c[:-1]
f = theano.function([], d, mode=mode_with_gpu)
graph_nodes = f.maker.fgraph.toposort()
assert isinstance(graph_nodes[-1].op, cuda.HostFromGpu)
assert isinstance(graph_nodes[-2].op, cuda.GpuSubtensor)
assert isinstance(graph_nodes[-3].op, cuda.GpuJoin)
concat = numpy.concatenate([numpy.cos(_a), numpy.sin(_b)], axis=0)
concat = concat[:-1]
assert numpy.allclose(numpy.asarray(f()), concat)
开发者ID:Abioy,项目名称:Theano,代码行数:26,代码来源:test_opt.py
示例4: test_opt_gpujoin_onlyajoin
def test_opt_gpujoin_onlyajoin():
# from a bug in normal sampling
_a = numpy.asarray([[1, 2], [3, 4]], dtype='float32')
_b = numpy.asarray([[5, 6, 7], [8, 9, 10]], dtype='float32')
a = cuda.shared_constructor(_a)
b = cuda.shared_constructor(_b)
c = tensor.join(1, a, b)
f = theano.function([], c, mode=mode_with_gpu)
f()
graph_nodes = f.maker.fgraph.toposort()
assert isinstance(graph_nodes[-1].op, cuda.HostFromGpu)
assert isinstance(graph_nodes[-2].op, cuda.GpuJoin)
assert numpy.all(f() == numpy.concatenate([_a, _b], axis=1))
# test mixed dtype
_b = numpy.asarray([[5, 6, 7], [8, 9, 10]], dtype='float64')
b = theano.tensor.constant(_b)
c = tensor.join(1, a, b)
f = theano.function([], c, mode=mode_with_gpu)
f()
graph_nodes = f.maker.fgraph.toposort()
assert isinstance(graph_nodes[-1].op, theano.tensor.Join)
assert numpy.all(f() == numpy.concatenate([_a, _b], axis=1))
开发者ID:Abioy,项目名称:Theano,代码行数:34,代码来源:test_opt.py
示例5: test_pool
def test_pool():
#(batch, channel, x, y)
shps = [(1, 1, 2, 2),
]
shps = [(channel, x, y, batch) for (batch, channel, x, y) in shps]
#numpy.random.RandomState(unittest_tools.fetch_seed()).shuffle(shps)
warnings.warn("TODO: Razvan needs to finish this")
for shp in shps:
for ds in range(1, min(4, shp[2] + 1)):
for start in [0]:
for stride in range(1, min(shp[2], ds, 4) + 1):
#print 'test_pool shape=%s, ds=%d, stride=%d start=%d' % (
# str(shp), ds, stride, start)
va = my_rand(*shp)
tva = va.flatten()
#print 'va', tva, tva.max(), tva.argmax()
vb = my_rand(*shp)
tvb = vb.flatten()
#print 'vb', tvb, tvb.max(), tvb.argmax(),\
# tvb[tva.argmax()]
a = tcn.shared_constructor(va, 'a')
b = tcn.shared_constructor(vb, 'b')
op = MaxPool(ds=ds, stride=stride)
v = op(a)
rval = theano.tensor.Rop(v, a, b)
f = theano.function([], rval,
mode=mode_with_gpu)
print f.maker.fgraph.toposort()
#ssert any([isinstance(node.op, MaxPool)
# for node in f.maker.fgraph.toposort()])
out = numpy.asarray(f())
开发者ID:AlexArgus,项目名称:pylearn2,代码行数:34,代码来源:test_rop_pool.py
示例6: test_gpujoin_preserves_broadcasting
def test_gpujoin_preserves_broadcasting():
_a = numpy.asarray([[1, 2], [3, 4]], dtype="float32")
_b = numpy.asarray([[5, 6, 7], [8, 9, 10]], dtype="float32")
a = tcn.shared_constructor(_a)
b = tcn.shared_constructor(_b)
# [0,0] : the two original dims were non-broadcastable
# [1,x,0]: new order and broadcastability
gpu_dimshuffle = GpuDimShuffle([0, 0], [1, "x", 0])
a_shuffled = gpu_dimshuffle(a)
b_shuffled = gpu_dimshuffle(b)
c = gpu_join(0, a_shuffled, b_shuffled)
assert c.type.broadcastable == (False, True, False)
f = theano.function([], c, mode=mode_with_gpu)
res = f()
a_reshaped = numpy.asarray([[[1, 3]], [[2, 4]]], dtype="float32")
b_reshaped = numpy.asarray([[[5, 8]], [[6, 9]], [[7, 10]]], dtype="float32")
concat = numpy.concatenate([a_reshaped, b_reshaped], axis=0)
assert numpy.all(res == concat)
开发者ID:olivierverdier,项目名称:Theano,代码行数:27,代码来源:test_basic_ops.py
示例7: test_elemwise2
def test_elemwise2():
""" Several kinds of elemwise expressions with dimension permutations """
rng = numpy.random.RandomState(int(time.time()))
shape = (3, 5)
for pattern in [(0, 1), (1, 0)]:
a = tcn.shared_constructor(theano._asarray(rng.rand(*shape),
dtype='float32'), name=None)
b = tensor.Tensor(dtype='float32', broadcastable=[0] * len(shape))()
f = pfunc([b], [], updates=[(a, (a + b).dimshuffle(pattern))],
mode=mode_with_gpu)
has_elemwise = False
for i, node in enumerate(f.maker.env.toposort()):
has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)
assert not has_elemwise
#let debugmode catch errors
f(theano._asarray(rng.rand(*shape), dtype='float32') * .3)
shape = (3, 4, 5, 6)
a = tcn.shared_constructor(theano._asarray(rng.rand(*shape),
dtype='float32'), 'a')
b = tensor.Tensor(dtype='float32', broadcastable=[0] * len(shape))()
f = pfunc([b], [], updates=[(a, (a + b).dimshuffle([2, 0, 3, 1]) *
tensor.exp(b ** a).dimshuffle([2, 0, 3, 1]))], mode=mode_with_gpu)
has_elemwise = False
for i, node in enumerate(f.maker.env.toposort()):
has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)
assert not has_elemwise
#let debugmode catch errors
f(theano._asarray(rng.rand(*shape), dtype='float32'))
开发者ID:gexarcha,项目名称:Theano,代码行数:29,代码来源:test_basic_ops.py
示例8: test_nvidia_driver2
def test_nvidia_driver2():
""" Test that the gpu device is initialized by theano when
we manually make a shared variable on the gpu.
The driver should always be tested during theano initialization
of the gpu device
"""
a = numpy.random.rand(10000).astype("float32")
cuda.shared_constructor(a)
assert theano.sandbox.cuda.use.device_number is not None
开发者ID:npinto,项目名称:Theano,代码行数:10,代码来源:test_driver.py
示例9: test_gpujoin_twomatrices_joincolumns
def test_gpujoin_twomatrices_joincolumns():
_a = numpy.asarray([[1, 2], [3, 4]], dtype="float32")
_b = numpy.asarray([[5, 6, 7], [8, 9, 10]], dtype="float32")
a = tcn.shared_constructor(_a)
b = tcn.shared_constructor(_b)
c = gpu_join(1, a, b)
f = theano.function([], c)
assert numpy.all(f() == numpy.concatenate([_a, _b], axis=1))
开发者ID:olivierverdier,项目名称:Theano,代码行数:11,代码来源:test_basic_ops.py
示例10: run_nnet
def run_nnet(use_gpu, n_batch=60, n_in=1024, n_hid=2048, n_out=10,
n_train=100):
if config.mode == 'DEBUG_MODE':
n_train = 1
if use_gpu:
w = tcn.shared_constructor(0.01 * (my_rand(n_in, n_hid) - 0.5), 'w')
b = tcn.shared_constructor(my_zeros(n_hid), 'b')
v = tcn.shared_constructor(my_zeros((n_hid, n_out)), 'c')
c = tcn.shared_constructor(my_zeros(n_out), 'c')
else:
w = shared(0.01 * (my_rand(n_in, n_hid) - 0.5), 'w')
b = shared(my_zeros(n_hid), 'b')
v = shared(my_zeros((n_hid, n_out)), 'c')
c = shared(my_zeros(n_out), 'c')
x = tensor.fmatrix('x')
y = tensor.fmatrix('y')
lr = tensor.fscalar('lr')
hid = tensor.tanh(tensor.dot(x, w) + b)
out = tensor.tanh(tensor.dot(hid, v) + c)
loss = tensor.sum(0.5 * (out - y) ** 2 * lr)
if 0:
print('loss type', loss.type)
params = [w, b, v, c]
gparams = tensor.grad(loss, params)
mode = get_mode(use_gpu)
# print 'building pfunc ...'
train = pfunc([x, y, lr], [loss], mode=mode,
updates=[(p, p - g) for p, g in izip(params, gparams)])
if 0:
for i, n in enumerate(train.maker.fgraph.toposort()):
print(i, n)
xval = my_rand(n_batch, n_in)
yval = my_rand(n_batch, n_out)
lr = theano._asarray(0.01, dtype='float32')
t0 = time.time()
rval = []
for i in xrange(n_train):
rval.append(train(xval, yval, lr))
dt = time.time() - t0
print_mode(mode)
return numpy.asarray(rval), dt
开发者ID:12190143,项目名称:Theano,代码行数:52,代码来源:test_mlp.py
示例11: test_memory_lazy
def test_memory_lazy():
"""As test_memory, but with the ifelse op.
We need to test it as the ifelse op with the [c]vm create op not
executed in the graph. This mess with [c]vm gc implementation.
"""
shapes = (50, 100)
# more_alloc1 is not the same for both dtype.
# when dtype is float32, the computation is done on the gpu.
# This insert constant on the gpu during compilation
# that raise the number of alloc.
# When dtype is float64, only the shared is on the gpu and it is transferd
# to the cpu for computation. So no extra alloc after compilation.
# more_alloc1 if after the first compilation
for dtype, more_alloc1 in [("float32", 1),
("float64", 0)]:
print(dtype)
test_params = np.asarray(np.random.randn(np.prod(shapes)), dtype)
some_vector = tensor.vector('some_vector', dtype=dtype)
some_matrix = some_vector.reshape(shapes)
branch_select = tensor.iscalar()
mem1 = freemem()
print("Before shared variable", mem1)
variables = cuda.shared_constructor(np.ones((shapes[1],),
dtype='float32'))
derp = tensor.sum(tensor.dot(some_matrix[:shapes[0]], variables))
derp = ifelse.IfElse(1)(branch_select,
derp, some_matrix[:shapes[0]].sum())
derp += 1
print("Shared took ", np.prod(variables.get_value(
borrow=True,
return_internal_type=True).shape) * 4 / 1024, "kB")
mem2 = freemem()
print("Before compilation", mem2)
mem2_1 = freemem(extra_alloc=more_alloc1)
obj = theano.function([some_vector, branch_select], derp,
mode=mode_with_gpu)
#theano.printing.debugprint(obj, print_type=True)
mem3 = freemem()
print("After function compilation 1", mem3)
assert mem2_1 == mem3, (mem2_1, mem3)
for i in range(3):
obj(test_params, 1)
print("After function evaluation branch true", freemem())
assert mem2_1 == freemem(), (mem2_1, freemem())
obj(test_params, 0)
print("After function evaluation branch false", freemem())
assert mem2_1 == freemem(), (mem2_1, freemem())
del obj
print("After deleting function 1", freemem())
assert mem2 == freemem(), (mem2, freemem())
del derp, variables
print("After deleting shared variable and ref to it", freemem())
assert mem1 == freemem(), (mem1, freemem())
开发者ID:ALISCIFP,项目名称:Segmentation,代码行数:60,代码来源:test_memory.py
示例12: test_elemwise1
def test_elemwise1():
""" Several kinds of elemwise expressions with no broadcasting,
non power-of-two shape """
shape = (3, 4)
a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),
dtype='float32') + 0.5, 'a')
b = tensor.fmatrix()
#let debugmode catch any mistakes
print >> sys.stdout, "STARTING FUNCTION 1"
f = pfunc([b], [], updates=[(a, b ** a)], mode=mode_with_gpu)
for i, node in enumerate(f.maker.env.toposort()):
print i, node
f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)
print >> sys.stdout, "STARTING FUNCTION 2"
#let debugmode catch any mistakes
f = pfunc([b], [], updates=[(a, tensor.exp(b ** a))], mode=mode_with_gpu)
for i, node in enumerate(f.maker.env.toposort()):
print i, node
f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)
print >> sys.stdout, "STARTING FUNCTION 3"
#let debugmode catch any mistakes
f = pfunc([b], [], updates=[(a, a + b * tensor.exp(b ** a))],
mode=mode_with_gpu)
f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)
开发者ID:fivejjs,项目名称:Theano,代码行数:28,代码来源:test_basic_ops.py
示例13: cmp
def cmp(a_shp, b_shp):
a0 = my_rand(*a_shp)
a = tcn.shared_constructor(a0, 'a')
b = tensor.fmatrix('b')
c = tensor.fmatrix('c')
f = pfunc([b, c], [], updates=[(a, tensor.dot(a, b) + tensor.exp(c))],
mode=mode_with_gpu)
assert any([node.op == tcn.blas.gpu_gemm_inplace
for node in f.maker.env.toposort()])
bval = my_rand(*b_shp)
cval = my_rand(a_shp[0], b_shp[1])
f(bval, cval)
assert numpy.allclose(numpy.dot(a0, bval) + numpy.exp(cval),
a.get_value())
# Try with a matrix equal to a0, but with strides in both dims
a.set_value(a0)
a.set_value(
a.get_value(borrow=True,
return_internal_type=True)[::-1, ::-1],
borrow=True)
f(bval, cval)
开发者ID:NicolasBouchard,项目名称:Theano,代码行数:26,代码来源:test_blas.py
示例14: test_downsample
def test_downsample():
shps = [
(1, 1, 1, 12),
(1, 1, 2, 2),
(1, 1, 1, 1),
(1, 1, 4, 4),
(1, 1, 10, 11),
(1, 2, 2, 2),
(3, 5, 4, 4),
(25, 1, 7, 7),
(1, 1, 12, 12),
(1, 1, 2, 14),
(1, 1, 12, 14),
(1, 1, 14, 14),
(1, 1, 16, 16),
(1, 1, 18, 18),
(1, 1, 24, 24),
(1, 6, 24, 24),
(10, 1, 24, 24),
(10, 6, 24, 24),
(30, 6, 12, 12),
(30, 2, 24, 24),
(30, 6, 24, 24),
(10, 10, 10, 11),
(1, 1, 10, 1025),
(1, 1, 10, 1023),
(1, 1, 1025, 10),
(1, 1, 1023, 10),
]
numpy.random.RandomState(unittest_tools.fetch_seed()).shuffle(shps)
for shp in shps:
for ds in (2, 2), (3, 2), (1, 1):
if ds[0] > shp[2]:
continue
if ds[1] > shp[3]:
continue
# GpuDownsampleFactorMax doesn't like having more than 512 columns
# in the output tensor.
if float(shp[3]) / ds[1] > 512:
continue
for ignore_border in (True, False):
print "test_downsample", shp, ds, ignore_border
ds_op = DownsampleFactorMax(ds, ignore_border=ignore_border)
a = tcn.shared_constructor(my_rand(*shp), "a")
f = pfunc([], ds_op(tensor.as_tensor_variable(a)), mode=mode_with_gpu)
f2 = pfunc([], ds_op(tensor.as_tensor_variable(a)), mode=mode_without_gpu)
assert any([isinstance(node.op, tcn.blas.GpuDownsampleFactorMax) for node in f.maker.env.toposort()])
assert any([isinstance(node.op, DownsampleFactorMax) for node in f2.maker.env.toposort()])
assert numpy.allclose(f(), f2())
g = pfunc([], tensor.grad(ds_op(tensor.as_tensor_variable(a)).sum(), a), mode=mode_with_gpu)
g2 = pfunc([], tensor.grad(ds_op(tensor.as_tensor_variable(a)).sum(), a), mode=mode_without_gpu)
assert any(
[isinstance(node.op, tcn.blas.GpuDownsampleFactorMaxGrad) for node in g.maker.env.toposort()]
)
assert any([isinstance(node.op, DownsampleFactorMaxGrad) for node in g2.maker.env.toposort()])
assert numpy.allclose(g(), g2())
开发者ID:pascanur,项目名称:Theano,代码行数:60,代码来源:test_blas.py
示例15: test_gpuspecifyshape
def test_gpuspecifyshape():
x = cuda.shared_constructor(numpy.ones(3, dtype='float32'), 'x')
m = theano.tensor.specify_shape(x + numpy.float32(1), (3,))
f = theano.function([], updates=[(x, m * numpy.float32(2))],
mode=mode_with_gpu)
l = f.maker.fgraph.toposort()
assert not numpy.any([isinstance(x.op, cuda.HostFromGpu) for x in l])
开发者ID:Abioy,项目名称:Theano,代码行数:7,代码来源:test_opt.py
示例16: test_gpujoin_twomatrices_badshapes
def test_gpujoin_twomatrices_badshapes():
_a = numpy.asarray([[1, 2], [3, 4]], dtype="float32")
_b = numpy.asarray([[5, 6, 7], [8, 9, 10]], dtype="float32")
a = tcn.shared_constructor(_a)
b = tcn.shared_constructor(_b)
# try to join on dimension 0 where they don't agree (2!=3)
c = gpu_join(0, a, b)
f = theano.function([], c)
try:
f()
assert False
except ValueError:
assert True
开发者ID:olivierverdier,项目名称:Theano,代码行数:16,代码来源:test_basic_ops.py
示例17: test_local_assert_no_cpu_op
def test_local_assert_no_cpu_op():
numpy.random.seed(1)
m = numpy.random.uniform(-1, 1, (10, 10)).astype("float32")
ms = cuda.shared_constructor(m, name="m_shared")
out = theano.tensor.tanh(ms).dot(ms.T)
mode_local_assert = mode_with_gpu.including("assert_no_cpu_op")
mode_local_assert = mode_local_assert.excluding("local_gpu_elemwise_0")
mode_local_assert = mode_local_assert.excluding("local_gpu_elemwise_1")
old = config.assert_no_cpu_op
old2 = config.on_opt_error
# If the flag is raise
try:
config.assert_no_cpu_op = 'raise'
config.on_opt_error = 'ignore'
assert_raises(AssertionError, theano.function,
[], out, mode=mode_local_assert)
finally:
config.assert_no_cpu_op = old
config.on_opt_error = old2
# If the flag is ignore
try:
config.assert_no_cpu_op = 'ignore'
theano.function([], out, mode=mode_local_assert)
finally:
config.assert_no_cpu_op = old
开发者ID:Abioy,项目名称:Theano,代码行数:29,代码来源:test_opt.py
示例18: shared
def shared(val):
# If we don't put shared on the GPU, we won't be able to test
# the no inplace version as the added transfer will make them inplace.
try:
return tcn.shared_constructor(val)
except TypeError:
return theano.shared(val)
开发者ID:gyenney,项目名称:Tools,代码行数:7,代码来源:test_blas.py
示例19: test_opt_gpujoin_joinvectors_negativeaxes
def test_opt_gpujoin_joinvectors_negativeaxes():
"""
Test that negative axis concatenation works as expected.
"""
# Test case for one-dimensional vectors
rng = numpy.random.RandomState(22)
x1 = rng.rand(5)
x2 = rng.rand(10)
t1 = cuda.shared_constructor(numpy.asarray(x1, "float32"))
t2 = cuda.shared_constructor(numpy.asarray(x2, "float32"))
t = tensor.concatenate([t1, t2], axis=-1)
f = theano.function(inputs=[], outputs=t)
assert(numpy.allclose(f(), numpy.concatenate([x1, x2], axis=-1)))
# Test case for two-dimensional vectors
x1 = rng.rand(5, 10)
x2 = rng.rand(10, 10)
t1 = cuda.shared_constructor(numpy.asarray(x1, "float32"))
t2 = cuda.shared_constructor(numpy.asarray(x2, "float32"))
t = tensor.concatenate([t1, t2], axis=-2)
f = theano.function(inputs=[], outputs=t)
assert(numpy.allclose(f(), numpy.concatenate([x1, x2], axis=-2)))
# Now check that a value error is raised when vectors don't match
# along the negative concatenation axis
try:
t = tensor.concatenate([t1, t2], axis=-1)
f = theano.function(inputs=[], outputs=t)
f()
assert(False)
except ValueError:
assert(True)
# Finally check that a value error is raised when negative
# axis is larger in absolute value than smallest number of dims
try:
t = tensor.concatenate([t1, t2], axis=-3)
f = theano.function(inputs=[], outputs=t)
f()
assert(False)
except IndexError:
assert(True)
开发者ID:Abioy,项目名称:Theano,代码行数:47,代码来源:test_opt.py
示例20: cmp
def cmp(a_shp, b_shp):
a = tcn.shared_constructor(my_rand(*a_shp), 'a')
cval = my_rand(a_shp[0], b_shp[1])
c = tcn.shared_constructor(cval.copy(), 'c')
b = tcn.fmatrix('b')
b2 = tcn.fmatrix('b2')
f = pfunc([b,b2], [tensor.dot(a,b2) + c], updates=[(a, tensor.dot(a,b) + c)], mode=mode_with_gpu)
a0 = a.get_value() * 1.0
assert any([node.op == tcn.blas.gpu_gemm_no_inplace for node in f.maker.env.toposort()])
bval = my_rand(*b_shp)
bval2 = my_rand(*b_shp)
rval = f(bval,bval2)
assert numpy.allclose(numpy.dot(a0, bval)+cval, a.get_value())
assert numpy.allclose(numpy.dot(a0, bval2)+cval, rval)
开发者ID:HaniAlmousli,项目名称:Theano,代码行数:18,代码来源:test_blas.py
注:本文中的theano.sandbox.cuda.shared_constructor函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论