本文整理汇总了Python中theano.tensor.opt.get_scalar_constant_value函数的典型用法代码示例。如果您正苦于以下问题:Python get_scalar_constant_value函数的具体用法?Python get_scalar_constant_value怎么用?Python get_scalar_constant_value使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_scalar_constant_value函数的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: is_neg
def is_neg(var):
"""
Match a variable with the `-x` pattern.
:param var: The Variable to analyze.
:return: `x` if `var` is of the form `-x`, or None otherwise.
"""
apply = var.owner
if not apply:
return None
# First match against `tensor.neg`.
if apply.op == tensor.neg:
return apply.inputs[0]
# Then match against a multiplication by -1.
if apply.op == tensor.mul and len(apply.inputs) >= 2:
for idx, mul_input in enumerate(apply.inputs):
try:
constant = opt.get_scalar_constant_value(mul_input)
is_minus_1 = numpy.allclose(constant, -1)
except NotScalarConstantError:
is_minus_1 = False
if is_minus_1:
# Found a multiplication by -1.
if len(apply.inputs) == 2:
# Only return the other input.
return apply.inputs[1 - idx]
else:
# Return the multiplication of all other inputs.
return tensor.mul(*(apply.inputs[0:idx] +
apply.inputs[idx + 1:]))
# No match.
return None
开发者ID:Jackwangyang,项目名称:Theano,代码行数:33,代码来源:sigm.py
示例2: fibby_of_zero
def fibby_of_zero(node):
if node.op == fibby:
x = node.inputs[0]
try:
if numpy.all(0 == get_scalar_constant_value(x)):
return [x]
except NotScalarConstantError:
pass
开发者ID:AI-Cdrone,项目名称:Theano,代码行数:8,代码来源:test_tutorial.py
示例3: _is_1
def _is_1(expr):
"""rtype bool. True iff expr is a constant close to 1
"""
try:
v = opt.get_scalar_constant_value(expr)
return numpy.allclose(v, 1)
except tensor.NotScalarConstantError:
return False
开发者ID:Jackwangyang,项目名称:Theano,代码行数:8,代码来源:sigm.py
示例4: _is_1
def _is_1(expr):
"""
Returns
-------
bool
True iff expr is a constant close to 1.
"""
try:
v = opt.get_scalar_constant_value(expr)
return np.allclose(v, 1)
except tensor.NotScalarConstantError:
return False
开发者ID:DEVESHTARASIA,项目名称:Theano,代码行数:14,代码来源:sigm.py
示例5: local_1msigmoid
def local_1msigmoid(node):
"""
1-sigm(x) -> sigm(-x)
"""
if node.op == tensor.sub:
sub_l, sub_r = node.inputs
if len(sub_r.clients) > 1:
return # graph is using both sigm and 1-sigm
if sub_r.owner and sub_r.owner.op == sigmoid:
try:
val_l = opt.get_scalar_constant_value(sub_l)
except Exception, e:
return
if numpy.allclose(numpy.sum(val_l), 1):
return [sigmoid(-sub_r.owner.inputs[0])]
开发者ID:Jackwangyang,项目名称:Theano,代码行数:15,代码来源:sigm.py
示例6: local_1msigmoid
def local_1msigmoid(node):
"""
1-sigm(x) -> sigm(-x)
"""
if node.op == tensor.sub:
sub_l, sub_r = node.inputs
if len(sub_r.clients) > 1:
return # graph is using both sigm and 1-sigm
if sub_r.owner and sub_r.owner.op == sigmoid:
try:
val_l = opt.get_scalar_constant_value(sub_l)
except tensor.NotScalarConstantError:
return
if np.allclose(np.sum(val_l), 1):
out = sigmoid(-sub_r.owner.inputs[0])
copy_stack_trace([sub_r, node.outputs[0]], out)
return [out]
开发者ID:DEVESHTARASIA,项目名称:Theano,代码行数:18,代码来源:sigm.py
示例7: scan
#.........这里部分代码省略.........
def wrap_into_list(x):
'''
Wrap the input into a list if it is not already a list
'''
if x is None:
return []
elif not isinstance(x, (list, tuple)):
return [x]
else:
return list(x)
seqs = wrap_into_list(sequences)
outs_info = wrap_into_list(outputs_info)
# Make sure we get rid of numpy arrays or ints or anything like that
# passed as inputs to scan
non_seqs = []
for elem in wrap_into_list(non_sequences):
if not isinstance(elem, gof.Variable):
non_seqs.append(tensor.as_tensor_variable(elem))
else:
non_seqs.append(elem)
# If we provided a known number of steps ( before compilation)
# and if that number is 1 or -1, then we can skip the Scan Op,
# and just apply the inner function once
# To do that we check here to see the nature of n_steps
n_fixed_steps = None
if isinstance(n_steps, (float, int)):
n_fixed_steps = int(n_steps)
else:
try:
n_fixed_steps = opt.get_scalar_constant_value(n_steps)
except tensor.basic.NotScalarConstantError:
n_fixed_steps = None
# Check n_steps is an int
if (hasattr(n_steps, 'dtype') and
str(n_steps.dtype)[:3] not in ('uin', 'int')):
raise ValueError(' n_steps must be an int. dtype provided '
'is %s' % n_steps.dtype)
# compute number of sequences and number of outputs
n_seqs = len(seqs)
n_outs = len(outs_info)
return_steps = OrderedDict()
# wrap sequences in a dictionary if they are not already dictionaries
for i in xrange(n_seqs):
if not isinstance(seqs[i], dict):
seqs[i] = OrderedDict([('input', seqs[i]), ('taps', [0])])
elif seqs[i].get('taps', None) is not None:
seqs[i]['taps'] = wrap_into_list(seqs[i]['taps'])
elif seqs[i].get('taps', None) is None:
# seqs dictionary does not have the ``taps`` key
seqs[i]['taps'] = [0]
# wrap outputs info in a dictionary if they are not already in one
for i in xrange(n_outs):
if outs_info[i] is not None:
if isinstance(outs_info[i], dict):
# DEPRECATED :
if outs_info[i].get('return_steps', None) is not None:
raise ValueError(
"Using `return_steps` has been deprecated. "
开发者ID:Micseb,项目名称:Theano,代码行数:67,代码来源:scan.py
示例8: scan
#.........这里部分代码省略.........
def wrap_into_list(x):
'''
Wrap the input into a list if it is not already a list
'''
if x is None:
return []
elif not isinstance(x, (list, tuple)):
return [x]
else:
return list(x)
seqs = wrap_into_list(sequences)
outs_info = wrap_into_list(outputs_info)
# Make sure we get rid of numpy arrays or ints or anything like that
# passed as inputs to scan
non_seqs = []
for elem in wrap_into_list(non_sequences):
if not isinstance(elem, gof.Variable):
non_seqs.append(tensor.as_tensor_variable(elem))
else:
non_seqs.append(elem)
# If we provided a known number of steps ( before compilation)
# and if that number is 1 or -1, then we can skip the Scan Op,
# and just apply the inner function once
# To do that we check here to see the nature of n_steps
n_fixed_steps = None
if isinstance(n_steps, (float, int)):
n_fixed_steps = int(n_steps)
else:
try:
n_fixed_steps = opt.get_scalar_constant_value(n_steps)
except tensor.basic.NotScalarConstantError:
n_fixed_steps = None
# Check n_steps is an int
if (hasattr(n_steps, 'dtype') and
str(n_steps.dtype)[:3] not in ('uin', 'int')):
raise ValueError(' n_steps must be an int. dtype provided '
'is %s' % n_steps.dtype)
# compute number of sequences and number of outputs
n_seqs = len(seqs)
n_outs = len(outs_info)
return_steps = OrderedDict()
# wrap sequences in a dictionary if they are not already dictionaries
for i in xrange(n_seqs):
if not isinstance(seqs[i], dict):
seqs[i] = OrderedDict([('input', seqs[i]), ('taps', [0])])
elif seqs[i].get('taps', None) is not None:
seqs[i]['taps'] = wrap_into_list(seqs[i]['taps'])
elif seqs[i].get('taps', None) is None:
# seqs dictionary does not have the ``taps`` key
seqs[i]['taps'] = [0]
# wrap outputs info in a dictionary if they are not already in one
for i in xrange(n_outs):
if outs_info[i] is not None:
if isinstance(outs_info[i], dict):
# DEPRECATED :
if outs_info[i].get('return_steps', None) is not None:
raise ValueError(
"Using `return_steps` has been deprecated. "
开发者ID:Jackwangyang,项目名称:Theano,代码行数:67,代码来源:scan.py
示例9: scan
#.........这里部分代码省略.........
Wrap the input into a list if it is not already a list
"""
if x is None:
return []
elif not isinstance(x, (list, tuple)):
return [x]
else:
return list(x)
seqs = wrap_into_list(sequences)
outs_info = wrap_into_list(states)
if allow_gc is None:
allow_gc = config.scan.allow_gc
# Make sure we get rid of numpy arrays or ints or anything like that
# passed as inputs to scan
non_seqs = []
for elem in wrap_into_list(params):
if not isinstance(elem, gof.Variable):
non_seqs.append(tensor.as_tensor_variable(elem))
else:
non_seqs.append(elem)
# If we provided a known number of steps ( before compilation)
# and if that number is 1 or -1, then we can skip the Scan Op,
# and just apply the inner function once
# To do that we check here to see the nature of n_steps
n_fixed_steps = None
if isinstance(n_steps, (float, int)):
n_fixed_steps = int(n_steps)
else:
try:
n_fixed_steps = opt.get_scalar_constant_value(n_steps)
except tensor.basic.NotScalarConstantError:
n_fixed_steps = None
# Check n_steps is an int
if hasattr(n_steps, "dtype") and str(n_steps.dtype)[:3] not in ("uin", "int"):
raise ValueError(" n_steps must be an int. dtype provided " "is %s" % n_steps.dtype)
# compute number of sequences and number of outputs
n_seqs = len(seqs)
n_outs = len(outs_info)
return_steps = OrderedDict()
# wrap outputs info in a dictionary if they are not already in one
for i in xrange(n_outs):
if outs_info[i] is not None:
if not isinstance(outs_info[i], dict):
# by default any output has a tap value of -1
outs_info[i] = dict(membuf=outs_info[i], taps=[-1])
elif not outs_info[i].get("membuf", None) and outs_info[i].get("taps", None):
# ^ no initial state but taps provided
raise ValueError(
("If you are using slices of an output " "you need to provide a memory buffer for " "the state "),
outs_info[i],
)
elif outs_info[i].get("membuf", None) and not outs_info[i].get("taps", None):
# ^ initial state but taps not provided
if "taps" in outs_info[i]:
# ^ explicitly provided a None for taps
_logger.warning(
"Output %s (index %d) has a memory " "buffer but taps is explicitly set to None ",
getattr(outs_info[i]["membuf"], "name", "None"),
i,
开发者ID:amanrajdce,项目名称:Theano,代码行数:67,代码来源:scan.py
示例10: scan
#.........这里部分代码省略.........
other linkers this argument is useless)
:rtype: tuple
:return: tuple of the form (outputs, updates); ``outputs`` is either a
Theano variable or a list of Theano variables representing the
outputs of ``scan`` (in the same order as in
``outputs_info``). ``updates`` is a subclass of dictionary
specifying the
update rules for all shared variables used in scan
This dictionary should be passed to ``theano.function`` when
you compile your function. The change compared to a normal
dictionary is that we validate that keys are SharedVariable
and addition of those dictionary are validated to be consistent.
"""
# Note : see the internal documentation of the scan op for naming
# conventions and all other details
if options is None:
options = {}
rvals = scan_utils.canonical_arguments(sequences,
outputs_info,
non_sequences,
go_backwards,
n_steps)
inputs, states_and_outputs_info, parameters, T = rvals
# If we provided a known number of steps ( before compilation)
# and if that number is 1 or -1, then we can skip the Scan Op,
# and just apply the inner function once
# To do that we check here to see the nature of n_steps
T_value = None
if isinstance(n_steps, (float, int)):
T_value = int(n_steps)
else:
try:
T_value = opt.get_scalar_constant_value(n_steps)
except (TypeError, AttributeError):
T_value = None
if T_value in (1, -1):
return one_step_scan(fn,
inputs,
states_and_outputs_info,
parameters,
truncate_gradient)
# 1. Variable representing the current time step
t = scalar_shared(numpy.int64(0), name='t')
# 2. Allocate memory for the states of scan.
mintaps = []
lengths = []
for pos, arg_info in enumerate(states_and_outputs_info):
if arg_info.get('taps', None) == [-1]:
mintaps.append(1)
lengths.append(scalar_shared(numpy.int64(0),
name='l%d' % pos))
arg_info['initial'] = scan_utils.expand(tensor.unbroadcast(
tensor.shape_padleft(arg_info['initial']), 0), T)
elif arg_info.get('taps', None):
if numpy.any(numpy.array(arg_info.get('taps', [])) > 0):
# Make sure we do not have requests for future values of a
# sequence we can not provide such values
raise ValueError('Can not use future taps of outputs',
arg_info)
mintap = abs(numpy.min(arg_info['taps']))
lengths.append(scalar_shared(numpy.int64(0),
name='l%d' % pos))
开发者ID:Ambier,项目名称:Theano,代码行数:67,代码来源:scan.py
示例11: scan
#.........这里部分代码省略.........
def wrap_into_list(x):
'''
Wrap the input into a list if it is not already a list
'''
if x is None:
return []
elif not isinstance(x, (list, tuple)):
return [x]
else:
return list(x)
seqs = wrap_into_list(sequences)
outs_info = wrap_into_list(states)
# Make sure we get rid of numpy arrays or ints or anything like that
# passed as inputs to scan
non_seqs = []
for elem in wrap_into_list(params):
if not isinstance(elem, gof.Variable):
non_seqs.append(tensor.as_tensor_variable(elem))
else:
non_seqs.append(elem)
# If we provided a known number of steps ( before compilation)
# and if that number is 1 or -1, then we can skip the Scan Op,
# and just apply the inner function once
# To do that we check here to see the nature of n_steps
n_fixed_steps = None
if isinstance(n_steps, (float, int)):
n_fixed_steps = int(n_steps)
else:
try:
n_fixed_steps = opt.get_scalar_constant_value(n_steps)
except tensor.basic.NotScalarConstantError:
n_fixed_steps = None
# Check n_steps is an int
if (hasattr(n_steps, 'dtype') and
str(n_steps.dtype)[:3] not in ('uin', 'int')):
raise ValueError(' n_steps must be an int. dtype provided '
'is %s' % n_steps.dtype)
# compute number of sequences and number of outputs
n_seqs = len(seqs)
n_outs = len(outs_info)
return_steps = OrderedDict()
# wrap outputs info in a dictionary if they are not already in one
for i in xrange(n_outs):
if outs_info[i] is not None:
if not isinstance(outs_info[i], dict):
# by default any output has a tap value of -1
outs_info[i] = dict(membuf=outs_info[i], taps=[-1])
elif (not outs_info[i].get('membuf', None) and
outs_info[i].get('taps', None)):
# ^ no initial state but taps provided
raise ValueError(('If you are using slices of an output '
'you need to provide a memory buffer for '
'the state '), outs_info[i])
elif (outs_info[i].get('membuf', None) and
not outs_info[i].get('taps', None)):
# ^ initial state but taps not provided
if 'taps' in outs_info[i]:
# ^ explicitly provided a None for taps
_logger.warning(
开发者ID:DeepLearningIndia,项目名称:Theano,代码行数:67,代码来源:scan.py
注:本文中的theano.tensor.opt.get_scalar_constant_value函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论