本文整理汇总了Python中theano.gof.python25.any函数的典型用法代码示例。如果您正苦于以下问题:Python any函数的具体用法?Python any怎么用?Python any使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了any函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_default_conv
def test_default_conv():
"""Just test that we introduce the right GPU convolution
version.
"""
img = theano.tensor.ftensor4()
fil = theano.tensor.ftensor4()
c = theano.tensor.nnet.conv2d(img, fil)
f = theano.function([img, fil], c, mode=theano_mode)
if cuda.dnn.dnn_available():
assert any([isinstance(a.op, GpuDnnConv)
for a in f.maker.fgraph.apply_nodes])
else:
assert any([isinstance(a.op, cuda.blas.GpuCorrMM)
for a in f.maker.fgraph.apply_nodes])
mode = theano_mode.excluding('local_conv_dnn', 'local_conv_gemm')
f = theano.function([img, fil], c, mode=mode)
assert any([isinstance(a.op, cuda.blas.GpuConv)
for a in f.maker.fgraph.apply_nodes])
mode = theano_mode.excluding('conv_dnn', 'conv_gemm')
f = theano.function([img, fil], c, mode=mode)
assert any([isinstance(a.op, cuda.blas.GpuConv)
for a in f.maker.fgraph.apply_nodes])
开发者ID:317070,项目名称:Theano,代码行数:29,代码来源:test_conv_cuda_ndarray.py
示例2: test_downsample
def test_downsample():
shps = [
(1, 1, 1, 12),
(1, 1, 2, 2),
(1, 1, 1, 1),
(1, 1, 4, 4),
(1, 1, 10, 11),
(1, 2, 2, 2),
(3, 5, 4, 4),
(25, 1, 7, 7),
(1, 1, 12, 12),
(1, 1, 2, 14),
(1, 1, 12, 14),
(1, 1, 14, 14),
(1, 1, 16, 16),
(1, 1, 18, 18),
(1, 1, 24, 24),
(1, 6, 24, 24),
(10, 1, 24, 24),
(10, 6, 24, 24),
(30, 6, 12, 12),
(30, 2, 24, 24),
(30, 6, 24, 24),
(10, 10, 10, 11),
(1, 1, 10, 1025),
(1, 1, 10, 1023),
(1, 1, 1025, 10),
(1, 1, 1023, 10),
]
numpy.random.RandomState(unittest_tools.fetch_seed()).shuffle(shps)
for shp in shps:
for ds in (2, 2), (3, 2), (1, 1):
if ds[0] > shp[2]:
continue
if ds[1] > shp[3]:
continue
# GpuDownsampleFactorMax doesn't like having more than 512 columns
# in the output tensor.
if float(shp[3]) / ds[1] > 512:
continue
for ignore_border in (True, False):
print "test_downsample", shp, ds, ignore_border
ds_op = DownsampleFactorMax(ds, ignore_border=ignore_border)
a = tcn.shared_constructor(my_rand(*shp), "a")
f = pfunc([], ds_op(tensor.as_tensor_variable(a)), mode=mode_with_gpu)
f2 = pfunc([], ds_op(tensor.as_tensor_variable(a)), mode=mode_without_gpu)
assert any([isinstance(node.op, tcn.blas.GpuDownsampleFactorMax) for node in f.maker.env.toposort()])
assert any([isinstance(node.op, DownsampleFactorMax) for node in f2.maker.env.toposort()])
assert numpy.allclose(f(), f2())
g = pfunc([], tensor.grad(ds_op(tensor.as_tensor_variable(a)).sum(), a), mode=mode_with_gpu)
g2 = pfunc([], tensor.grad(ds_op(tensor.as_tensor_variable(a)).sum(), a), mode=mode_without_gpu)
assert any(
[isinstance(node.op, tcn.blas.GpuDownsampleFactorMaxGrad) for node in g.maker.env.toposort()]
)
assert any([isinstance(node.op, DownsampleFactorMaxGrad) for node in g2.maker.env.toposort()])
assert numpy.allclose(g(), g2())
开发者ID:pascanur,项目名称:Theano,代码行数:60,代码来源:test_blas.py
示例3: test_gpu_opt
def test_gpu_opt():
if not cuda.cuda_available:
# Skip test if cuda_ndarray is not available.
from nose.plugins.skip import SkipTest
raise SkipTest('Optional package cuda not available')
# We test the case where we put the op on the gpu when the output
# is moved to the gpu.
p = tensor.fmatrix()
u = tensor.fvector()
m = multinomial.MultinomialFromUniform('auto')(p, u)
assert m.dtype == 'float32', m.dtype
m_gpu = cuda.gpu_from_host(m)
f = function([p, u], m_gpu, allow_input_downcast=True, mode=get_mode(True))
assert any([type(node.op) is multinomial.GpuMultinomialFromUniform
for node in f.maker.fgraph.toposort()])
pval = numpy.arange(10000 * 4, dtype='float32').reshape((10000, 4))+0.1
pval = pval / pval.sum(axis=1)[:, None]
uval = numpy.ones_like(pval[:, 0]) * 0.5
mval = f(pval, uval)
# Test with a row, it was failing in the past.
r = tensor.frow()
m = multinomial.MultinomialFromUniform('auto')(r, u)
assert m.dtype == 'float32', m.dtype
m_gpu = cuda.gpu_from_host(m)
f = function([r, u], m_gpu, allow_input_downcast=True, mode=get_mode(True))
assert any([type(node.op) is multinomial.GpuMultinomialFromUniform
for node in f.maker.fgraph.toposort()])
pval = numpy.arange(1 * 4, dtype='float32').reshape((1, 4))+0.1
pval = pval / pval.sum(axis=1)[:, None]
uval = numpy.ones_like(pval[:, 0]) * 0.5
mval2 = f(pval, uval)
开发者ID:317070,项目名称:Theano,代码行数:35,代码来源:test_multinomial.py
示例4: test_neibs_gpu
def test_neibs_gpu():
if cuda.cuda_available == False:
raise SkipTest('Optional package cuda disabled')
for shape, pshape in [((100, 40, 18, 18), (2, 2)),
((100, 40, 6, 18), (3, 2)),
((10, 40, 66, 66), (33, 33)),
((10, 40, 68, 66), (34, 33))
]:
images = shared(numpy.arange(numpy.prod(shape),
dtype='float32').reshape(shape))
neib_shape = T.as_tensor_variable(pshape)
f = function([], images2neibs(images, neib_shape),
mode=mode_with_gpu)
f_gpu = function([], images2neibs(images, neib_shape),
mode=mode_with_gpu)
assert any([isinstance(node.op, GpuImages2Neibs)
for node in f_gpu.maker.env.toposort()])
#print images.get_value(borrow=True)
neibs = numpy.asarray(f_gpu())
assert numpy.allclose(neibs, f())
#print neibs
g = function([], neibs2images(neibs, neib_shape, images.shape),
mode=mode_with_gpu)
assert any([isinstance(node.op, GpuImages2Neibs)
for node in f.maker.env.toposort()])
#print numpy.asarray(g())
assert numpy.allclose(images.get_value(borrow=True), g())
开发者ID:jsalvatier,项目名称:Theano-1,代码行数:29,代码来源:test_neighbours.py
示例5: _compile_and_check
def _compile_and_check(self, inputs, outputs, numeric_inputs, cls,
excluding=None, warn=True, check_topo=True):
"""This tests the infer_shape method only
When testing with input values with shapes that take the same
value over different dimensions (for instance, a square
matrix, or a tensor3 with shape (n, n, n), or (m, n, m)), it
is not possible to detect if the output shape was computed
correctly, or if some shapes with the same value have been
mixed up. For instance, if the infer_shape uses the width of a
matrix instead of its height, then testing with only square
matrices will not detect the problem. If warn=True, we emit a
warning when testing with such values.
:param check_topo: If True, we check that the Op where removed
from the graph. False is useful to test not implemented case.
"""
mode = self.mode
if excluding:
mode = mode.excluding(*excluding)
if warn:
for var, inp in zip(inputs, numeric_inputs):
if isinstance(inp, (int, float, list, tuple)):
inp = var.type.filter(inp)
if not hasattr(inp, "shape"):
continue
# remove broadcasted dims as it is sure they can't be
# changed to prevent the same dim problem.
if hasattr(var.type, "broadcastable"):
shp = [inp.shape[i] for i in range(inp.ndim)
if not var.type.broadcastable[i]]
else:
shp = inp.shape
if len(set(shp)) != len(shp):
_logger.warn(
"While testing the shape inference, we received an"
" input with a shape that has some repeated values: %s"
", like a square matrix. This makes it impossible to"
" check if the values for these dimensions have been"
" correctly used, or if they have been mixed up.",
str(inp.shape))
break
outputs_function = theano.function(inputs, outputs, mode=mode)
shapes_function = theano.function(inputs, [o.shape for o in outputs],
mode=mode)
#theano.printing.debugprint(shapes_function)
# Check that the Op is removed from the compiled function.
if check_topo:
topo_shape = shapes_function.maker.fgraph.toposort()
assert not any(isinstance(t.op, cls) for t in topo_shape)
topo_out = outputs_function.maker.fgraph.toposort()
assert any(isinstance(t.op, cls) for t in topo_out)
# Check that the shape produced agrees with the actual shape.
numeric_outputs = outputs_function(*numeric_inputs)
numeric_shapes = shapes_function(*numeric_inputs)
for out, shape in zip(numeric_outputs, numeric_shapes):
assert numpy.all(out.shape == shape)
开发者ID:317070,项目名称:Theano,代码行数:59,代码来源:unittest_tools.py
示例6: _get_kernel_flags
def _get_kernel_flags(self, *dtypes):
dtypes = [numpy.dtype(d) for d in dtypes]
flags = ['GA_USE_CLUDA']
if any(d == numpy.float64 for d in dtypes):
flags.append('GA_USE_DOUBLE')
if any(d.itemsize < 4 for d in dtypes):
flags.append('GA_USE_SMALL')
return '|'.join(flags)
开发者ID:chagge,项目名称:Theano,代码行数:8,代码来源:basic_ops.py
示例7: test_GpuCrossentropySoftmax1HotWithBiasDx
def test_GpuCrossentropySoftmax1HotWithBiasDx():
"""
This is basic test for GpuCrossentropySoftmax1HotWithBiasDx
We check that we loop when their is too much threads
TODO: check that we loop when their is too much block(>32*1024)
"""
n_in = 1000
batch_size = 4097
n_out = 1250
# Seed numpy.random with config.unittests.rseed
utt.seed_rng()
softmax_output_value = numpy.random.rand(batch_size, n_out).astype("float32")
dnll_value = numpy.asarray(numpy.random.rand(batch_size), dtype="float32")
y_idx_value = numpy.random.randint(low=0, high=5, size=batch_size)
softmax_output = T.fmatrix()
softmax_output /= softmax_output.sum(axis=1).reshape(softmax_output.shape[1], 1)
op = theano.tensor.nnet.crossentropy_softmax_1hot_with_bias_dx(dnll_value, softmax_output, y_idx_value)
cpu_f = theano.function([softmax_output], op, mode=mode_without_gpu)
gpu_f = theano.function([softmax_output], op, mode=mode_with_gpu)
# theano.printing.debugprint(cpu_f)
# theano.printing.debugprint(gpu_f)
assert any(
[isinstance(node.op, T.nnet.CrossentropySoftmax1HotWithBiasDx) for node in cpu_f.maker.fgraph.toposort()]
)
assert any(
[isinstance(node.op, cuda.nnet.GpuCrossentropySoftmax1HotWithBiasDx) for node in gpu_f.maker.fgraph.toposort()]
)
cpu_out = cpu_f(softmax_output_value)
gpu_out = gpu_f(softmax_output_value)
rtol = 1e-5
atol = 1e-6
if not numpy.allclose(cpu_out, gpu_out, rtol=rtol, atol=atol):
abs_err, rel_err = T.numeric_grad.abs_rel_err(cpu_out, gpu_out)
scaled_err = numpy.minimum(abs_err / atol, rel_err / rtol)
max_i = scaled_err.argmax()
print "max err index:", max_i, max_i / batch_size,
print max_i % batch_size, max_i / n_out, max_i & n_out
print "At that index:"
print "err:", scaled_err.flatten()[max_i]
print "absolute error:", abs_err.flatten()[max_i]
print "relative error:", rel_err.flatten()[max_i]
print "cpu_out:", cpu_out.flatten()[max_i]
print "gpu_out:", gpu_out.flatten()[max_i]
print "softmax_output_value:", softmax_output_value.flatten()[max_i]
print "dnll_value:", dnll_value[max_i / n_out]
print "y_idx_value:", y_idx_value[max_i / n_out]
assert False, "numpy.allclose(cpu_out, gpu_out, rtol=%s, atol=%s)" % (rtol, atol)
开发者ID:npinto,项目名称:Theano,代码行数:58,代码来源:test_nnet.py
示例8: execute
def execute(execute=True, verbose=True, M=2000, N=2000, K=2000,
iters=10, order='C'):
"""
:param execute: If True, execute a Theano function that should call gemm.
:param verbose: If True, will print some Theano flags and env variables.
:param M,N,K: The M,N,K size used by gemm.
:param iters: The number of calls to gemm to do.
:return: a tuple (execution time,
str that represents the implementation used)
"""
a = theano.shared(numpy.ones((M, N), dtype=theano.config.floatX,
order=order))
b = theano.shared(numpy.ones((N, K), dtype=theano.config.floatX,
order=order))
c = theano.shared(numpy.ones((M, K), dtype=theano.config.floatX,
order=order))
f = theano.function([], updates={c: 0.4 * c + .8 * T.dot(a, b)})
if verbose:
print 'Some Theano flags:'
print ' blas.ldflags=', theano.config.blas.ldflags
print ' compiledir=', theano.config.compiledir
print ' floatX=', theano.config.floatX
print 'Some environment variables:'
print ' MKL_NUM_THREADS=', os.getenv('MKL_NUM_THREADS')
print ' OMP_NUM_THREADS=', os.getenv('OMP_NUM_THREADS')
print ' GOTO_NUM_THREADS=', os.getenv('GOTO_NUM_THREADS')
print
print ('Numpy config: (used when the Theano flag'
' "blas.ldflags" is empty)')
numpy.show_config()
print 'Numpy dot module:', numpy.dot.__module__
print 'Numpy location:', numpy.__file__
print 'Numpy version:', numpy.__version__
print
t0 = 0
t1 = -1
if any([x.op.__class__.__name__ == 'Gemm' for x in
f.maker.env.toposort()]):
impl = 'cpu'
elif any([x.op.__class__.__name__ == 'GpuGemm' for x in
f.maker.env.toposort()]):
impl = 'gpu'
else:
impl = 'ERROR, unable to tell if Theano used the cpu or the gpu:\n'
impl += str(f.maker.env.toposort())
if execute:
t0 = time.time()
for i in range(iters):
f()
t1 = time.time()
return t1 - t0, impl
开发者ID:vlb,项目名称:Theano,代码行数:56,代码来源:check_blas.py
示例9: execute
def execute(execute=True, verbose=True, M=2000, N=2000, K=2000, iters=10, order="C"):
"""
:param execute: If True, execute a Theano function that should call gemm.
:param verbose: If True, will print some Theano flags and env variables.
:param M,N,K: The M,N,K size used by gemm.
:param iters: The number of calls to gemm to do.
:return: a tuple (execution time,
str that represents the implementation used)
"""
if verbose:
print "Some Theano flags:"
print " blas.ldflags=", theano.config.blas.ldflags
print " compiledir=", theano.config.compiledir
print " floatX=", theano.config.floatX
print "Some environment variables:"
print " MKL_NUM_THREADS=", os.getenv("MKL_NUM_THREADS")
print " OMP_NUM_THREADS=", os.getenv("OMP_NUM_THREADS")
print " GOTO_NUM_THREADS=", os.getenv("GOTO_NUM_THREADS")
print
print ("Numpy config: (used when the Theano flag" ' "blas.ldflags" is empty)')
numpy.show_config()
print "Numpy dot module:", numpy.dot.__module__
print "Numpy location:", numpy.__file__
print "Numpy version:", numpy.__version__
print
a = theano.shared(numpy.ones((M, N), dtype=theano.config.floatX, order=order))
b = theano.shared(numpy.ones((N, K), dtype=theano.config.floatX, order=order))
c = theano.shared(numpy.ones((M, K), dtype=theano.config.floatX, order=order))
f = theano.function([], updates={c: 0.4 * c + 0.8 * T.dot(a, b)}, mode=theano.compile.ProfileMode())
if any([x.op.__class__.__name__ == "Gemm" for x in f.maker.env.toposort()]):
c_impl = f.profile.apply_cimpl.values()
assert len(c_impl) == 1
if c_impl[0]:
impl = "CPU (with direct Theano binding to blas)"
else:
impl = "CPU (without direct Theano binding to blas but with numpy/scipy binding to blas)"
elif any([x.op.__class__.__name__ == "GpuGemm" for x in f.maker.env.toposort()]):
impl = "GPU"
else:
impl = "ERROR, unable to tell if Theano used the cpu or the gpu:\n"
impl += str(f.maker.env.toposort())
t0 = 0
t1 = -1
if execute:
t0 = time.time()
for i in range(iters):
f()
t1 = time.time()
return t1 - t0, impl
开发者ID:jsalvatier,项目名称:Theano-1,代码行数:55,代码来源:check_blas.py
示例10: execute
def execute(execute=True, verbose=True):
a = theano.shared(numpy.ones(shapes, dtype=theano.config.floatX))
b = theano.shared(numpy.ones(shapes, dtype=theano.config.floatX))
c = theano.shared(numpy.ones(shapes, dtype=theano.config.floatX))
f = theano.function([], updates={c: 0.4 * c + .8 * T.dot(a, b)})
if verbose:
print 'Some theano flags:'
print ' blas.ldflags=', theano.config.blas.ldflags
print ' compiledir=', theano.config.compiledir
print ' floatX=', theano.config.floatX
print 'Some env flags:'
print ' MKL_NUM_THREADS=', os.getenv('MKL_NUM_THREADS')
print ' OMP_NUM_THREADS=', os.getenv('OMP_NUM_THREADS')
print ' GOTO_NUM_THREADS=', os.getenv('GOTO_NUM_THREADS')
print
print ('Numpy config: (used when the theano flags'
' "blas.ldflags" is empty)')
numpy.show_config()
print 'Numpy dot module:', numpy.dot.__module__
print 'Numpy file location that was loaded:', numpy.__file__
print 'Numpy version:', numpy.__version__
print
if any([x.op.__class__.__name__ == 'Gemm' for x in
f.maker.env.toposort()]):
print 'Used the cpu'
elif any([x.op.__class__.__name__ == 'GpuGemm' for x in
f.maker.env.toposort()]):
print 'Used the gpu'
else:
print 'ERROR, not able to tell if theano used the cpu or the gpu'
print f.maker.env.toposort()
t0 = 0
t1 = -1
if execute:
t0 = time.time()
for i in range(iters):
f()
t1 = time.time()
if verbose and execute:
print
print 'This execution time took %.2fs' % (t1 - t0)
print
print ('Try to run this script a few times. Experience show that'
' the first time is not as fast as followings call. The'
' difference is not big, but consistent.')
return t1 - t0
开发者ID:delallea,项目名称:Theano,代码行数:50,代码来源:check_blas.py
示例11: _compile_and_check
def _compile_and_check(self, inputs, outputs, numeric_inputs, cls):
outputs_function = theano.function(inputs, outputs, mode=self.mode)
shapes_function = theano.function(inputs, [o.shape for o in outputs],
mode=self.mode)
#theano.printing.debugprint(shapes_function)
# Check that the Op is removed from the compiled function.
topo_shape = shapes_function.maker.env.toposort()
assert not any(isinstance(t.op, cls) for t in topo_shape)
topo_out = outputs_function.maker.env.toposort()
assert any(isinstance(t.op, cls) for t in topo_out)
# Check that the shape produced agrees with the actual shape.
numeric_outputs = outputs_function(*numeric_inputs)
numeric_shapes = shapes_function(*numeric_inputs)
for out, shape in zip(numeric_outputs, numeric_shapes):
assert numpy.all(out.shape == shape)
开发者ID:fivejjs,项目名称:Theano,代码行数:15,代码来源:unittest_tools.py
示例12: list_of_nodes
def list_of_nodes(inputs, outputs):
""" Return the apply nodes of the graph between inputs and outputs """
return stack_search(
deque([o.owner for o in outputs]),
lambda o: [inp.owner for inp in o.inputs
if inp.owner
and not any(i in inp.owner.outputs for i in inputs)])
开发者ID:marcino239,项目名称:Theano,代码行数:7,代码来源:graph.py
示例13: body
def body(mode, gpu):
p = tensor.fmatrix()
u = tensor.fvector()
m = multinomial.MultinomialFromUniform('auto')(p,u)
f = function([p,u], m*2, allow_input_downcast=True, mode=mode)
if gpu:
assert any([type(node.op) is multinomial.GpuMultinomialFromUniform for node in f.maker.env.toposort()])
pval = numpy.arange(10000 * 4, dtype='float32').reshape((10000, 4))+0.1
pval = pval / pval.sum(axis=1)[:,None]
uval = numpy.ones_like(pval[:,0]) * 0.5
mval = f(pval,uval)
assert mval.shape == pval.shape
if config.cast_policy == 'custom':
assert mval.dtype == pval.dtype
elif config.cast_policy == 'numpy+floatX':
assert mval.dtype == config.floatX
elif config.cast_policy == 'numpy':
assert mval.dtype == 'float64'
else:
raise NotImplementedError(config.cast_policy)
assert numpy.allclose(mval.sum(axis=1), 2)
asdf = numpy.asarray([0, 0, 2, 0])+0*pval
assert numpy.allclose(mval, asdf) #broadcast over all rows
开发者ID:NicolasBouchard,项目名称:Theano,代码行数:25,代码来源:test_multinomial.py
示例14: cmp
def cmp(a_shp, b_shp):
a0 = my_rand(*a_shp)
a = tcn.shared_constructor(a0, 'a')
b = tensor.fmatrix('b')
c = tensor.fmatrix('c')
f = pfunc([b, c], [], updates=[(a, tensor.dot(a, b) + tensor.exp(c))],
mode=mode_with_gpu)
assert any([node.op == tcn.blas.gpu_gemm_inplace
for node in f.maker.fgraph.toposort()])
bval = my_rand(*b_shp)
cval = my_rand(a_shp[0], b_shp[1])
f(bval, cval)
assert numpy.allclose(numpy.dot(a0, bval) + numpy.exp(cval),
a.get_value())
# Try with a matrix equal to a0, but with strides in both dims
a.set_value(a0)
a.set_value(
a.get_value(borrow=True,
return_internal_type=True)[::-1, ::-1],
borrow=True)
f(bval, cval)
开发者ID:npinto,项目名称:Theano,代码行数:26,代码来源:test_blas.py
示例15: _toposort
def _toposort(edges):
""" Topological sort algorithm by Kahn [1] - O(nodes + vertices)
inputs:
edges - a dict of the form {a: {b, c}} where b and c depend on a
outputs:
L - an ordered list of nodes that satisfy the dependencies of edges
>>> _toposort({1: {2, 3}, 2: (3, )})
[1, 2, 3]
Closely follows the wikipedia page [2]
[1] Kahn, Arthur B. (1962), "Topological sorting of large networks",
Communications of the ACM
[2] http://en.wikipedia.org/wiki/Toposort#Algorithms
"""
incoming_edges = reverse_dict(edges)
incoming_edges = dict((k, set(val)) for k, val in incoming_edges.items())
S = set((v for v in edges if v not in incoming_edges))
L = []
while S:
n = S.pop()
L.append(n)
for m in edges.get(n, ()):
assert n in incoming_edges[m]
incoming_edges[m].remove(n)
if not incoming_edges[m]:
S.add(m)
if any(incoming_edges.get(v, None) for v in edges):
raise ValueError("Input has cycles")
return L
开发者ID:errord,项目名称:Theano,代码行数:33,代码来源:sched.py
示例16: test_neibs
def test_neibs(self):
for shape, pshape in [((100, 40, 18, 18), (2, 2)),
((100, 40, 6, 18), (3, 2)),
((10, 40, 66, 66), (33, 33)),
((10, 40, 68, 66), (34, 33))
]:
for border in ['valid', 'ignore_borders']:
for dtype in self.dtypes:
images = shared(
numpy.arange(numpy.prod(shape), dtype=dtype
).reshape(shape))
neib_shape = T.as_tensor_variable(pshape)
f = function([],
images2neibs(images, neib_shape, mode=border),
mode=self.mode)
#print images.get_value(borrow=True)
neibs = f()
#print neibs
g = function([],
neibs2images(neibs, neib_shape, images.shape),
mode=self.mode)
if border in ['valid']:
assert any([isinstance(node.op, self.op)
for node in f.maker.fgraph.toposort()])
#print g()
assert numpy.allclose(images.get_value(borrow=True), g())
开发者ID:SinaHonari,项目名称:Theano,代码行数:29,代码来源:test_neighbours.py
示例17: profile_printer
def profile_printer(fct_name, compile_time, fct_call_time, fct_call,
apply_time, apply_cimpl, message, outputs_size,
other_time):
# Scan overhead profile
if any([isinstance(node.op, Scan) and v > 0 for (_, node), v in
apply_time.items()]):
print
print 'Scan overhead:'
print ('<Scan op time(s)> <sub scan fct time(s)> <sub scan op '
'time(s)> <sub scan fct time(% scan op time)> <sub scan '
'op time(% scan op time)> <node>')
total_super_scan_time = 0
total_scan_fct_time = 0
total_scan_op_time = 0
for (_, node), v in apply_time.items():
if isinstance(node.op, Scan):
if v > 0:
scan_fct_time = node.op.mode_instance.fn_time
scan_op_time = node.op.mode_instance.local_time
total_super_scan_time += v
total_scan_fct_time += scan_fct_time
total_scan_op_time += scan_op_time
print ' %5.1fs %5.1fs %5.1fs %5.1f%% %5.1f%%' % (
v, scan_fct_time, scan_op_time,
scan_fct_time / v * 100, scan_op_time / v * 100), node
else:
print (' The node took 0s, so we can not compute the '
'overhead'), node
print ' total %5.1fs %5.1fs %5.1fs %5.1f%% %5.1f%%' % (
total_super_scan_time, total_scan_fct_time, total_scan_op_time,
total_scan_fct_time / total_super_scan_time * 100,
total_scan_op_time / total_super_scan_time * 100)
开发者ID:HaniAlmousli,项目名称:Theano,代码行数:32,代码来源:scan_op.py
示例18: test_logical_shapes
def test_logical_shapes(self):
seed_rng()
for stride in range(1, 4):
kshp = (10, 2, 10, 10)
featshp = (3, 10, 11, 11)
a = tensor.ftensor4()
A = tensor.ftensor4()
# Need to transpose first two dimensions of kernel, and reverse
# index kernel image dims (for correlation)
kernel_rotated = tensor.transpose(A, axes=[1, 0, 2, 3])
featshp_logical = (featshp[0], featshp[1], featshp[2] * stride,
featshp[3] * stride)
kshp_rotated = (kshp[1], kshp[0], kshp[2], kshp[3])
#print featshp, kshp_rotated, featshp_logical[1:], kshp[2:]
image_estimate = tensor.nnet.conv2d(a, kernel_rotated,
border_mode='full',
image_shape=featshp,
filter_shape=kshp_rotated,
imshp_logical=featshp_logical[1:],
kshp_logical=kshp[2:])
func = theano.function([a, A], image_estimate, mode=theano_mode)
#theano.printing.debugprint(func,)
assert any([isinstance(node.op, theano.sandbox.cuda.blas.GpuConv)
for node in func.maker.fgraph.toposort()])
a_in = numpy.random.randn(*featshp).astype("float32")
A_in = numpy.random.randn(*kshp).astype("float32")
func(a_in, A_in)
开发者ID:JoeGlobal2014,项目名称:Theano,代码行数:33,代码来源:test_conv_cuda_ndarray.py
示例19: test_elemwise_composite_float64
def test_elemwise_composite_float64():
# test that we don't fuse composite elemwise with float64 somewhere inside
# nvcc by default downcast them to float32. We would need to tell him not
# to do so, but that possible only on some device.
a = tensor.fmatrix()
b = tensor.fmatrix()
av = theano._asarray(numpy.random.rand(4, 4), dtype='float32')
bv = numpy.ones((4, 4), dtype='float32')
def get_all_basic_scalar(composite_op):
l = []
for i in composite_op.env.toposort():
if isinstance(i, theano.scalar.Composite):
l += get_all_basic_scalar(i)
else:
l.append(i)
return l
for mode in [mode_with_gpu, mode_with_gpu.excluding('gpu_after_fusion'),
mode_with_gpu.excluding('elemwise_fusion')]:
f = pfunc([a, b],
tensor.cast(tensor.lt(tensor.cast(a, 'float64') ** 2,
b),
'float32'), mode=mode)
out = f(av, bv)
assert numpy.all(out == ((av ** 2) < bv))
for node in f.maker.env.toposort():
if isinstance(node.op, cuda.GpuElemwise):
if isinstance(node.op.scalar_op, theano.scalar.Composite):
scals = get_all_basic_scalar(node.op.scalar_op)
for s in scals:
assert not any([i.type.dtype == 'float64'
for i in s.inputs + s.outputs])
开发者ID:gexarcha,项目名称:Theano,代码行数:33,代码来源:test_basic_ops.py
示例20: multinomial
def multinomial(self, size=None, n=1, pvals=None, ndim=None, dtype='int64',
nstreams=None):
"""
Sample `n` (currently `n` needs to be 1) times from a multinomial
distribution defined by probabilities pvals.
Example : pvals = [[.98, .01, .01], [.01, .98, .01]] will
probably result in [[1,0,0],[0,1,0]].
.. note::
`size` and `ndim` are only there keep the same signature as other
uniform, binomial, normal, etc.
todo : adapt multinomial to take that into account
"""
if pvals is None:
raise TypeError("You have to specify pvals")
pvals = as_tensor_variable(pvals)
if size is not None:
if any([isinstance(i, int) and i <= 0 for i in size]):
raise ValueError(
"The specified size contains a dimension with value <= 0",
size)
if n == 1 and pvals.ndim == 2:
ndim, size, bcast = raw_random._infer_ndim_bcast(
ndim, size, pvals[:,0])
assert ndim==1
bcast = bcast+(pvals.type.broadcastable[-1],)
unis = self.uniform(size=size, ndim=1, nstreams=nstreams)
op = multinomial.MultinomialFromUniform(dtype)
return op(pvals, unis)
else:
raise NotImplementedError(("MRG_RandomStreams.multinomial only"
" implemented with n == 1 and pvals.ndim = 2"))
开发者ID:NicolasBouchard,项目名称:Theano,代码行数:34,代码来源:rng_mrg.py
注:本文中的theano.gof.python25.any函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论