本文整理汇总了Python中theano.compat.python2x.any函数的典型用法代码示例。如果您正苦于以下问题:Python any函数的具体用法?Python any怎么用?Python any使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了any函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_default_conv
def test_default_conv():
"""Just test that we introduce the right GPU convolution
version.
"""
img = theano.tensor.ftensor4()
fil = theano.tensor.ftensor4()
c = theano.tensor.nnet.conv2d(img, fil)
f = theano.function([img, fil], c, mode=theano_mode)
if cuda.dnn.dnn_available():
assert any([isinstance(a.op, GpuDnnConv)
for a in f.maker.fgraph.apply_nodes])
else:
assert any([isinstance(a.op, cuda.blas.GpuCorrMM)
for a in f.maker.fgraph.apply_nodes])
mode = theano_mode.excluding('local_conv_dnn', 'local_conv_gemm')
f = theano.function([img, fil], c, mode=mode)
assert any([isinstance(a.op, cuda.blas.GpuConv)
for a in f.maker.fgraph.apply_nodes])
mode = theano_mode.excluding('conv_dnn', 'conv_gemm')
f = theano.function([img, fil], c, mode=mode)
assert any([isinstance(a.op, cuda.blas.GpuConv)
for a in f.maker.fgraph.apply_nodes])
开发者ID:gyenney,项目名称:Tools,代码行数:29,代码来源:test_conv_cuda_ndarray.py
示例2: test_log1msigm_to_softplus
def test_log1msigm_to_softplus(self):
x = T.matrix()
out = T.log(1 - sigmoid(x))
f = theano.function([x], out, mode=self.m)
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert isinstance(topo[0].op.scalar_op,
theano.tensor.nnet.sigm.ScalarSoftplus)
assert isinstance(topo[1].op.scalar_op, theano.scalar.Neg)
f(numpy.random.rand(54, 11).astype(config.floatX))
# Same test with a flatten
out = T.log(1 - T.flatten(sigmoid(x)))
f = theano.function([x], out, mode=self.m)
topo = f.maker.fgraph.toposort()
assert len(topo) == 3
assert isinstance(topo[0].op, T.Flatten)
assert isinstance(topo[1].op.scalar_op,
theano.tensor.nnet.sigm.ScalarSoftplus)
assert isinstance(topo[2].op.scalar_op, theano.scalar.Neg)
f(numpy.random.rand(54, 11).astype(config.floatX))
# Same test with a reshape
out = T.log(1 - sigmoid(x).reshape([x.size]))
f = theano.function([x], out, mode=self.m)
topo = f.maker.fgraph.toposort()
#assert len(topo) == 3
assert any(isinstance(node.op, T.Reshape) for node in topo)
assert any(isinstance(getattr(node.op, 'scalar_op', None),
theano.tensor.nnet.sigm.ScalarSoftplus)
for node in topo)
f(numpy.random.rand(54, 11).astype(config.floatX))
开发者ID:Jackwangyang,项目名称:Theano,代码行数:33,代码来源:test_sigm.py
示例3: test_gpu_opt
def test_gpu_opt():
if not cuda.cuda_available:
# Skip test if cuda_ndarray is not available.
from nose.plugins.skip import SkipTest
raise SkipTest('Optional package cuda not available')
# We test the case where we put the op on the gpu when the output
# is moved to the gpu.
p = tensor.fmatrix()
u = tensor.fvector()
m = multinomial.MultinomialFromUniform('auto')(p, u)
assert m.dtype == 'float32', m.dtype
m_gpu = cuda.gpu_from_host(m)
f = function([p, u], m_gpu, allow_input_downcast=True, mode=get_mode(True))
assert any([type(node.op) is multinomial.GpuMultinomialFromUniform
for node in f.maker.fgraph.toposort()])
pval = numpy.arange(10000 * 4, dtype='float32').reshape((10000, 4))+0.1
pval = pval / pval.sum(axis=1)[:, None]
uval = numpy.ones_like(pval[:, 0]) * 0.5
mval = f(pval, uval)
# Test with a row, it was failing in the past.
r = tensor.frow()
m = multinomial.MultinomialFromUniform('auto')(r, u)
assert m.dtype == 'float32', m.dtype
m_gpu = cuda.gpu_from_host(m)
f = function([r, u], m_gpu, allow_input_downcast=True, mode=get_mode(True))
assert any([type(node.op) is multinomial.GpuMultinomialFromUniform
for node in f.maker.fgraph.toposort()])
pval = numpy.arange(1 * 4, dtype='float32').reshape((1, 4))+0.1
pval = pval / pval.sum(axis=1)[:, None]
uval = numpy.ones_like(pval[:, 0]) * 0.5
mval2 = f(pval, uval)
开发者ID:Jackwangyang,项目名称:Theano,代码行数:35,代码来源:test_multinomial.py
示例4: _compile_and_check
def _compile_and_check(self, inputs, outputs, numeric_inputs, cls,
excluding=None, warn=True, check_topo=True):
"""This tests the infer_shape method only
When testing with input values with shapes that take the same
value over different dimensions (for instance, a square
matrix, or a tensor3 with shape (n, n, n), or (m, n, m)), it
is not possible to detect if the output shape was computed
correctly, or if some shapes with the same value have been
mixed up. For instance, if the infer_shape uses the width of a
matrix instead of its height, then testing with only square
matrices will not detect the problem. If warn=True, we emit a
warning when testing with such values.
:param check_topo: If True, we check that the Op where removed
from the graph. False is useful to test not implemented case.
"""
mode = self.mode
if excluding:
mode = mode.excluding(*excluding)
if warn:
for var, inp in zip(inputs, numeric_inputs):
if isinstance(inp, (int, float, list, tuple)):
inp = var.type.filter(inp)
if not hasattr(inp, "shape"):
continue
# remove broadcasted dims as it is sure they can't be
# changed to prevent the same dim problem.
if hasattr(var.type, "broadcastable"):
shp = [inp.shape[i] for i in range(inp.ndim)
if not var.type.broadcastable[i]]
else:
shp = inp.shape
if len(set(shp)) != len(shp):
_logger.warn(
"While testing the shape inference, we received an"
" input with a shape that has some repeated values: %s"
", like a square matrix. This makes it impossible to"
" check if the values for these dimensions have been"
" correctly used, or if they have been mixed up.",
str(inp.shape))
break
outputs_function = theano.function(inputs, outputs, mode=mode)
shapes_function = theano.function(inputs, [o.shape for o in outputs],
mode=mode)
#theano.printing.debugprint(shapes_function)
# Check that the Op is removed from the compiled function.
if check_topo:
topo_shape = shapes_function.maker.fgraph.toposort()
assert not any(isinstance(t.op, cls) for t in topo_shape)
topo_out = outputs_function.maker.fgraph.toposort()
assert any(isinstance(t.op, cls) for t in topo_out)
# Check that the shape produced agrees with the actual shape.
numeric_outputs = outputs_function(*numeric_inputs)
numeric_shapes = shapes_function(*numeric_inputs)
for out, shape in zip(numeric_outputs, numeric_shapes):
assert numpy.all(out.shape == shape), (out.shape, shape)
开发者ID:gyenney,项目名称:Tools,代码行数:59,代码来源:unittest_tools.py
示例5: test_pooling_opt
def test_pooling_opt():
if not cuda.dnn.dnn_available():
raise SkipTest(cuda.dnn.dnn_available.msg)
x = T.ftensor4()
f = theano.function([x], max_pool_2d(x, ds=(2, 2), ignore_border=True), mode=mode_with_gpu)
assert any([isinstance(n.op, cuda.dnn.GpuDnnPool) for n in f.maker.fgraph.toposort()])
f = theano.function(
[x], T.grad(max_pool_2d(x, ds=(2, 2), ignore_border=True).sum(), x), mode=mode_with_gpu.including("cudnn")
)
assert any([isinstance(n.op, cuda.dnn.GpuDnnPoolGrad) for n in f.maker.fgraph.toposort()])
开发者ID:dapeng2018,项目名称:Theano,代码行数:15,代码来源:test_dnn.py
示例6: body
def body(mode, gpu):
p = tensor.fmatrix()
u = tensor.fvector()
m = multinomial.MultinomialFromUniform('auto')(p, u)
f = function([p, u], m*2, allow_input_downcast=True, mode=mode)
if gpu:
assert any([type(node.op) is multinomial.GpuMultinomialFromUniform
for node in f.maker.fgraph.toposort()])
pval = numpy.arange(10000 * 4, dtype='float32').reshape((10000, 4))+0.1
pval = pval / pval.sum(axis=1)[:, None]
uval = numpy.ones_like(pval[:, 0]) * 0.5
mval = f(pval, uval)
assert mval.shape == pval.shape
if config.cast_policy == 'custom':
assert mval.dtype == pval.dtype
elif config.cast_policy == 'numpy+floatX':
assert mval.dtype == config.floatX
elif config.cast_policy == 'numpy':
assert mval.dtype == 'float64'
else:
raise NotImplementedError(config.cast_policy)
assert numpy.allclose(mval.sum(axis=1), 2)
asdf = numpy.asarray([0, 0, 2, 0])+0*pval
assert numpy.allclose(mval, asdf) # broadcast over all rows
开发者ID:Jackwangyang,项目名称:Theano,代码行数:26,代码来源:test_multinomial.py
示例7: test_dnn_tag
def test_dnn_tag():
"""
Test that if cudnn isn't avail we crash and that if it is avail, we use it.
"""
x = T.ftensor4()
old = theano.config.on_opt_error
theano.config.on_opt_error = "raise"
sio = StringIO()
handler = logging.StreamHandler(sio)
logging.getLogger("theano.compile.tests.test_dnn").addHandler(handler)
# Silence original handler when intentionnally generating warning messages
logging.getLogger("theano").removeHandler(theano.logging_default_handler)
raised = False
try:
f = theano.function([x], max_pool_2d(x, ds=(2, 2), ignore_border=True), mode=mode_with_gpu.including("cudnn"))
except (AssertionError, RuntimeError):
assert not cuda.dnn.dnn_available()
raised = True
finally:
theano.config.on_opt_error = old
logging.getLogger("theano.compile.tests.test_dnn").removeHandler(handler)
logging.getLogger("theano").addHandler(theano.logging_default_handler)
if not raised:
assert cuda.dnn.dnn_available()
assert any([isinstance(n.op, cuda.dnn.GpuDnnPool) for n in f.maker.fgraph.toposort()])
开发者ID:dapeng2018,项目名称:Theano,代码行数:27,代码来源:test_dnn.py
示例8: cmp
def cmp(a_shp, b_shp):
a0 = my_rand(*a_shp)
a = tcn.shared_constructor(a0, 'a')
cval = my_rand(a_shp[0], b_shp[1])
c = tcn.shared_constructor(cval.copy(), 'c')
b = tcn.fmatrix('b')
b2 = tcn.fmatrix('b2')
f = pfunc(
[b, b2],
[tensor.dot(a, b2) + c],
updates=[(a, tensor.dot(a, b) + c)],
mode=mode_with_gpu)
assert any([node.op == tcn.blas.gpu_gemm_no_inplace
for node in f.maker.fgraph.toposort()])
bval = my_rand(*b_shp)
bval2 = my_rand(*b_shp)
rval = f(bval, bval2)
assert numpy.allclose(numpy.dot(a0, bval) + cval, a.get_value())
assert numpy.allclose(numpy.dot(a0, bval2) + cval, rval)
# Try with a matrix equal to a0, but with strides in both dims
a.set_value(a0)
a.set_value(
a.get_value(borrow=True,
return_internal_type=True)[::-1, ::-1],
borrow=True)
f(bval, bval2)
开发者ID:gyenney,项目名称:Tools,代码行数:31,代码来源:test_blas.py
示例9: test_neibs
def test_neibs(self):
for shape, pshape in [((10, 7, 18, 18), (2, 2)),
((10, 7, 6, 18), (3, 2)),
((5, 7, 66, 66), (33, 33)),
((5, 7, 68, 66), (34, 33))
]:
for border in ['valid', 'ignore_borders']:
for dtype in self.dtypes:
images = shared(
numpy.arange(numpy.prod(shape), dtype=dtype
).reshape(shape))
neib_shape = T.as_tensor_variable(pshape)
f = function([],
images2neibs(images, neib_shape, mode=border),
mode=self.mode)
#print images.get_value(borrow=True)
neibs = f()
#print neibs
g = function([],
neibs2images(neibs, neib_shape, images.shape),
mode=self.mode)
assert any([isinstance(node.op, self.op)
for node in f.maker.fgraph.toposort()])
#print g()
assert numpy.allclose(images.get_value(borrow=True), g())
开发者ID:gyenney,项目名称:Tools,代码行数:28,代码来源:test_neighbours.py
示例10: test_logical_shapes
def test_logical_shapes(self):
seed_rng()
for stride in range(1, 4):
kshp = (10, 2, 10, 10)
featshp = (3, 10, 11, 11)
a = tensor.ftensor4()
A = tensor.ftensor4()
# Need to transpose first two dimensions of kernel, and reverse
# index kernel image dims (for correlation)
kernel_rotated = tensor.transpose(A, axes=[1, 0, 2, 3])
featshp_logical = (featshp[0], featshp[1], featshp[2] * stride,
featshp[3] * stride)
kshp_rotated = (kshp[1], kshp[0], kshp[2], kshp[3])
# print featshp, kshp_rotated, featshp_logical[1:], kshp[2:]
image_estimate = tensor.nnet.conv2d(a, kernel_rotated,
border_mode='full',
image_shape=featshp,
filter_shape=kshp_rotated,
imshp_logical=featshp_logical[1:],
kshp_logical=kshp[2:])
func = theano.function([a, A], image_estimate, mode=mode_with_gpu)
# theano.printing.debugprint(func,)
assert any([isinstance(node.op, GpuConv)
for node in func.maker.fgraph.toposort()])
a_in = numpy.random.randn(*featshp).astype("float32")
A_in = numpy.random.randn(*kshp).astype("float32")
func(a_in, A_in)
开发者ID:Jackwangyang,项目名称:Theano,代码行数:33,代码来源:test_conv_cuda_ndarray.py
示例11: _toposort
def _toposort(edges):
""" Topological sort algorithm by Kahn [1] - O(nodes + vertices)
inputs:
edges - a dict of the form {a: {b, c}} where b and c depend on a
outputs:
L - an ordered list of nodes that satisfy the dependencies of edges
>>> _toposort({1: {2, 3}, 2: (3, )})
[1, 2, 3]
Closely follows the wikipedia page [2]
[1] Kahn, Arthur B. (1962), "Topological sorting of large networks",
Communications of the ACM
[2] http://en.wikipedia.org/wiki/Toposort#Algorithms
"""
incoming_edges = reverse_dict(edges)
incoming_edges = dict((k, set(val)) for k, val in incoming_edges.items())
S = set((v for v in edges if v not in incoming_edges))
L = []
while S:
n = S.pop()
L.append(n)
for m in edges.get(n, ()):
assert n in incoming_edges[m]
incoming_edges[m].remove(n)
if not incoming_edges[m]:
S.add(m)
if any(incoming_edges.get(v, None) for v in edges):
raise ValueError("Input has cycles")
return L
开发者ID:gyenney,项目名称:Tools,代码行数:33,代码来源:sched.py
示例12: list_of_nodes
def list_of_nodes(inputs, outputs):
""" Return the apply nodes of the graph between inputs and outputs """
return stack_search(
deque([o.owner for o in outputs]),
lambda o: [inp.owner for inp in o.inputs
if inp.owner
and not any(i in inp.owner.outputs for i in inputs)])
开发者ID:gyenney,项目名称:Tools,代码行数:7,代码来源:graph.py
示例13: profile_printer
def profile_printer(fct_name, compile_time, fct_call_time, fct_call,
apply_time, apply_cimpl, message, outputs_size,
other_time):
# Scan overhead profile
if any([isinstance(node.op, Scan) and v > 0 for (_, node), v in
apply_time.items()]):
print
print 'Scan overhead:'
print ('<Scan op time(s)> <sub scan fct time(s)> <sub scan op '
'time(s)> <sub scan fct time(% scan op time)> <sub scan '
'op time(% scan op time)> <node>')
total_super_scan_time = 0
total_scan_fct_time = 0
total_scan_op_time = 0
for (_, node), v in apply_time.items():
if isinstance(node.op, Scan):
if v > 0:
scan_fct_time = node.op.mode_instance.fn_time
scan_op_time = node.op.mode_instance.local_time
total_super_scan_time += v
total_scan_fct_time += scan_fct_time
total_scan_op_time += scan_op_time
print ' %5.1fs %5.1fs %5.1fs %5.1f%% %5.1f%%' % (
v, scan_fct_time, scan_op_time,
scan_fct_time / v * 100, scan_op_time / v * 100), node
else:
print (' The node took 0s, so we can not compute the '
'overhead'), node
print ' total %5.1fs %5.1fs %5.1fs %5.1f%% %5.1f%%' % (
total_super_scan_time, total_scan_fct_time, total_scan_op_time,
total_scan_fct_time / total_super_scan_time * 100,
total_scan_op_time / total_super_scan_time * 100)
开发者ID:gyenney,项目名称:Tools,代码行数:32,代码来源:scan_op.py
示例14: local_gpua_subtensor
def local_gpua_subtensor(node):
x = node.inputs[0]
if (x.owner and isinstance(x.owner.op, HostFromGpu)):
gpu_x = x.owner.inputs[0]
if (gpu_x.owner and
isinstance(gpu_x.owner.op, GpuFromHost) and
# And it is a shared var or an input of the graph.
not gpu_x.owner.inputs[0].owner):
if len(x.clients) == 1:
if any([n == 'output' or any([isinstance(v.type, GpuArrayType)
for v in n.inputs + n.outputs])
for n,_ in node.outputs[0].clients]):
return
else:
return [host_from_gpu(gpu_from_host(node.outputs[0]))]
return GpuSubtensor(node.op.idx_list)
开发者ID:gyenney,项目名称:Tools,代码行数:17,代码来源:opt.py
示例15: compile_args
def compile_args():
"""
This args will be received by compile_str() in the preargs paramter.
They will also be included in the "hard" part of the key module.
"""
flags = [flag for flag in config.nvcc.flags.split(' ') if flag]
if config.nvcc.fastmath:
flags.append('-use_fast_math')
cuda_ndarray_cuh_hash = hash_from_file(
os.path.join(os.path.split(__file__)[0], 'cuda_ndarray.cuh'))
flags.append('-DCUDA_NDARRAY_CUH=' + cuda_ndarray_cuh_hash)
# NumPy 1.7 Deprecate the old API. I updated most of the places
# to use the new API, but not everywhere. When finished, enable
# the following macro to assert that we don't bring new code
# that use the old API.
flags.append("-D NPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION")
# numpy 1.7 deprecated the following macro but the didn't
# existed in the past
numpy_ver = [int(n) for n in numpy.__version__.split('.')[:2]]
if bool(numpy_ver < [1, 7]):
flags.append("-D NPY_ARRAY_ENSURECOPY=NPY_ENSURECOPY")
flags.append("-D NPY_ARRAY_ALIGNED=NPY_ALIGNED")
flags.append("-D NPY_ARRAY_WRITEABLE=NPY_WRITEABLE")
flags.append("-D NPY_ARRAY_UPDATE_ALL=NPY_UPDATE_ALL")
flags.append("-D NPY_ARRAY_C_CONTIGUOUS=NPY_C_CONTIGUOUS")
flags.append("-D NPY_ARRAY_F_CONTIGUOUS=NPY_F_CONTIGUOUS")
# If the user didn't specify architecture flags add them
if not any(['-arch=sm_' in f for f in flags]):
# We compile cuda_ndarray.cu during import.
# We should not add device properties at that time.
# As the device is not selected yet!
# TODO: re-compile cuda_ndarray when we bind to a GPU?
import theano.sandbox.cuda
if hasattr(theano.sandbox, 'cuda'):
n = theano.sandbox.cuda.use.device_number
if n is None:
_logger.warn(
"We try to get compilation arguments for CUDA"
" code, but the GPU device is not initialized."
" This is probably caused by an Op that work on"
" the GPU that don't inherit from GpuOp."
" We Initialize the GPU now.")
theano.sandbox.cuda.use(
"gpu",
force=True,
default_to_move_computation_to_gpu=False,
move_shared_float32_to_gpu=False,
enable_cuda=False)
n = theano.sandbox.cuda.use.device_number
p = theano.sandbox.cuda.device_properties(n)
flags.append('-arch=sm_' + str(p['major']) +
str(p['minor']))
return flags
开发者ID:Jackwangyang,项目名称:Theano,代码行数:57,代码来源:nvcc_compiler.py
示例16: filter_nvcc_flags
def filter_nvcc_flags(s):
assert isinstance(s, str)
flags = [flag for flag in s.split(' ') if flag]
if any([f for f in flags if not f.startswith("-")]):
raise ValueError(
"Theano nvcc.flags support only parameter/value pairs without"
" space between them. e.g.: '--machine 64' is not supported,"
" but '--machine=64' is supported. Please add the '=' symbol."
" nvcc.flags value is '%s'" % s)
return ' '.join(flags)
开发者ID:Jackwangyang,项目名称:Theano,代码行数:10,代码来源:nvcc_compiler.py
示例17: get_flags
def get_flags(*types):
def get_dtype(t):
if isinstance(t, (str, unicode)):
return numpy.dtype(t)
elif isinstance(t, Type):
return t.dtype
elif isinstance(t, Variable):
return t.type.dtype
else:
raise TypeError, "can't get a dtype from %s" % (type(t),)
dtypes = [get_dtype(t) for t in types]
flags = dict(cluda=True)
if any(d == numpy.float64 for d in dtypes):
flags['have_double'] = True
if any(d.itemsize < 4 for d in dtypes):
flags['have_small'] = True
if any(d.kind == 'c' for d in dtypes):
flags['have_complex'] = True
if any(d == numpy.float16 for d in dtypes):
flags['have_half'] = True
return flags
开发者ID:clorenz7,项目名称:Theano,代码行数:21,代码来源:basic_ops.py
示例18: test_nvidia_driver3
def test_nvidia_driver3():
""" Test that the gpu device is initialized by theano when
we build a function with gpu op.
The driver should always be tested during theano initialization
of the gpu device
"""
var = cuda.fvector()
f = theano.function([var], var + 1, mode=mode_with_gpu,
profile=False)
topo = f.maker.fgraph.toposort()
assert any([isinstance(node.op, cuda.GpuElemwise) for node in topo])
assert theano.sandbox.cuda.use.device_number is not None
开发者ID:Jackwangyang,项目名称:Theano,代码行数:13,代码来源:test_driver.py
示例19: test_local_sampling_dot_csr
def test_local_sampling_dot_csr():
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
mode = theano.compile.mode.get_default_mode()
mode = mode.including("specialize", "local_sampling_dot_csr")
for sp_format in ['csr']: # Not implemented for other format
inputs = [tensor.matrix(),
tensor.matrix(),
getattr(theano.sparse, sp_format + '_matrix')()]
f = theano.function(inputs,
sparse.sampling_dot(*inputs),
mode=mode)
if theano.config.blas.ldflags:
assert not any(isinstance(node.op, sparse.SamplingDot) for node
in f.maker.fgraph.toposort())
else:
# SamplingDotCSR's C implementation needs blas, so it should not
# be inserted
assert not any(isinstance(node.op, sparse.opt.SamplingDotCSR) for node
in f.maker.fgraph.toposort())
开发者ID:Jackwangyang,项目名称:Theano,代码行数:23,代码来源:test_opt.py
示例20: shape_of_variables
def shape_of_variables(fgraph, input_shapes):
"""
Compute the numeric shape of all intermediate variables given input shapes
Inputs:
fgraph - the theano.FunctionGraph in question
input_shapes - a dict mapping input to shape
Outputs:
shapes - a dict mapping variable to shape
WARNING : This modifies the fgraph. Not pure.
>>> import theano
>>> x = theano.tensor.matrix('x')
>>> y = x[512:]; y.name = 'y'
>>> fgraph = theano.FunctionGraph([x], [y], clone=False)
>>> shape_of_variables(fgraph, {x: (1024, 1024)})
{y: (512, 1024), x: (1024, 1024)}
"""
if not hasattr(fgraph, 'shape_feature'):
fgraph.attach_feature(theano.tensor.opt.ShapeFeature())
input_dims = [dimension for inp in fgraph.inputs
for dimension in fgraph.shape_feature.shape_of[inp]]
output_dims = [dimension for shape in fgraph.shape_feature.shape_of.values()
for dimension in shape]
compute_shapes = theano.function(input_dims, output_dims)
if any([i not in fgraph.inputs for i in input_shapes.keys()]):
raise ValueError(
"input_shapes keys aren't in the fgraph.inputs. FunctionGraph()"
" interface changed. Now by default, it clones the graph it receives."
" To have the old behavior, give it this new parameter `clone=False`.")
numeric_input_dims = [dim for inp in fgraph.inputs
for dim in input_shapes[inp]]
numeric_output_dims = compute_shapes(*numeric_input_dims)
sym_to_num_dict = dict(zip(output_dims, numeric_output_dims))
l = {}
for var in fgraph.shape_feature.shape_of:
l[var] = tuple(sym_to_num_dict[sym]
for sym in fgraph.shape_feature.shape_of[var])
return l
开发者ID:317070,项目名称:Theano,代码行数:49,代码来源:utils.py
注:本文中的theano.compat.python2x.any函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论