本文整理汇总了Python中tensorflow.python.framework.tensor_util.constant_value函数的典型用法代码示例。如果您正苦于以下问题:Python constant_value函数的具体用法?Python constant_value怎么用?Python constant_value使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了constant_value函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _merge_batch_beams
def _merge_batch_beams(self, t, s=None):
"""Merges the tensor from a batch of beams into a batch by beams.
More exactly, t is a tensor of dimension [batch_size, beam_width, s]. We
reshape this into [batch_size*beam_width, s]
Args:
t: Tensor of dimension [batch_size, beam_width, s]
s: (Possibly known) depth shape.
Returns:
A reshaped version of t with dimension [batch_size * beam_width, s].
"""
if isinstance(s, ops.Tensor):
s = tensor_shape.as_shape(tensor_util.constant_value(s))
else:
s = tensor_shape.TensorShape(s)
t_shape = tf.shape(t)
static_batch_size = tensor_util.constant_value(self._batch_size)
batch_size_beam_width = (
None if static_batch_size is None
else static_batch_size * self._beam_width)
reshaped_t = tf.reshape(
t, tf.concat(
([self._batch_size * self._beam_width], t_shape[2:]), 0))
reshaped_t.set_shape(
(tensor_shape.TensorShape([batch_size_beam_width]).concatenate(s)))
return reshaped_t
开发者ID:seasky100,项目名称:tensorflow_end2end_speech_recognition,代码行数:25,代码来源:beam_search_decoder_from_tensorflow.py
示例2: _SliceShape
def _SliceShape(op):
"""Shape function for array_ops.slice."""
input_shape = op.inputs[0].get_shape()
begin_shape = op.inputs[1].get_shape().with_rank(1)
sizes_shape = op.inputs[2].get_shape().with_rank(1)
ndims = begin_shape.merge_with(sizes_shape)[0].value
if ndims is not None:
input_shape.assert_has_rank(ndims)
begin_value = tensor_util.constant_value(op.inputs[1])
sizes_value = tensor_util.constant_value(op.inputs[2])
if sizes_value is not None:
returned_dims = []
for i, slice_size in enumerate(sizes_value.ravel()):
if slice_size != -1:
returned_dims.append(slice_size)
elif begin_value is not None:
returned_dims.append(input_shape[i] - begin_value[i])
else:
returned_dims.append(None)
return [tensor_shape.TensorShape(returned_dims)]
else:
if input_shape.ndims is not None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
elif ndims is not None:
return [tensor_shape.unknown_shape(ndims=ndims)]
else:
return [tensor_shape.unknown_shape()]
开发者ID:sherrym,项目名称:tensorflow,代码行数:27,代码来源:array_ops.py
示例3: _split_batch_beams
def _split_batch_beams(self, t, s=None):
"""Splits the tensor from a batch by beams into a batch of beams.
More exactly, t is a tensor of dimension [batch_size*beam_width, s]. We
reshape this into [batch_size, beam_width, s]
Args:
t: Tensor of dimension [batch_size*beam_width, s].
s: (Possibly known) depth shape.
Returns:
A reshaped version of t with dimension [batch_size, beam_width, s].
Raises:
ValueError: If, after reshaping, the new tensor is not shaped
`[batch_size, beam_width, s]` (assuming batch_size and beam_width
are known statically).
"""
if isinstance(s, ops.Tensor):
s = tensor_shape.TensorShape(tensor_util.constant_value(s))
else:
s = tensor_shape.TensorShape(s)
t_shape = tf.shape(t)
reshaped_t = tf.reshape(
t, tf.concat(
([self._batch_size, self._beam_width], t_shape[1:]), 0))
static_batch_size = tensor_util.constant_value(self._batch_size)
expected_reshaped_shape = tensor_shape.TensorShape(
[static_batch_size, self._beam_width]).concatenate(s)
if not reshaped_t.shape.is_compatible_with(expected_reshaped_shape):
raise ValueError("Unexpected behavior when reshaping between beam width "
"and batch size. The reshaped tensor has shape: %s. "
"We expected it to have shape "
"(batch_size, beam_width, depth) == %s. Perhaps you "
"forgot to create a zero_state with "
"batch_size=encoder_batch_size * beam_width?"
% (reshaped_t.shape, expected_reshaped_shape))
reshaped_t.set_shape(expected_reshaped_shape)
return reshaped_t
开发者ID:seasky100,项目名称:tensorflow_end2end_speech_recognition,代码行数:35,代码来源:beam_search_decoder_from_tensorflow.py
示例4: _test
def _test(self, input_shape, block_shape, base_paddings):
input_shape = np.array(input_shape)
block_shape = np.array(block_shape)
if base_paddings is not None:
base_paddings = np.array(base_paddings)
# Check with constants.
paddings, crops = tf.required_space_to_batch_paddings(
input_shape, block_shape, base_paddings)
paddings_const = tensor_util.constant_value(paddings)
crops_const = tensor_util.constant_value(crops)
self.assertIsNotNone(paddings_const)
self.assertIsNotNone(crops_const)
self._checkProperties(input_shape, block_shape, base_paddings,
paddings_const, crops_const)
# Check with non-constants.
assignments = {}
input_shape_placeholder = tf.placeholder(tf.int32)
assignments[input_shape_placeholder] = input_shape
block_shape_placeholder = tf.placeholder(tf.int32, [len(block_shape)])
assignments[block_shape_placeholder] = block_shape
if base_paddings is not None:
base_paddings_placeholder = tf.placeholder(tf.int32,
[len(block_shape), 2])
assignments[base_paddings_placeholder] = base_paddings
else:
base_paddings_placeholder = None
t_paddings, t_crops = tf.required_space_to_batch_paddings(
input_shape_placeholder, block_shape_placeholder,
base_paddings_placeholder)
with self.test_session():
paddings_result = t_paddings.eval(assignments)
crops_result = t_crops.eval(assignments)
self.assertAllEqual(paddings_result, paddings_const)
self.assertAllEqual(crops_result, crops_const)
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:34,代码来源:spacetobatch_op_test.py
示例5: __init__
def __init__(self, input_dataset, map_func, batch_size, num_parallel_calls,
drop_remainder, use_legacy_function=False):
"""See `Dataset.map()` for details."""
self._input_dataset = input_dataset
self._map_func = dataset_ops.StructuredFunctionWrapper(
map_func,
"tf.data.experimental.map_and_batch()",
dataset=input_dataset,
use_legacy_function=use_legacy_function)
self._batch_size_t = ops.convert_to_tensor(
batch_size, dtype=dtypes.int64, name="batch_size")
self._num_parallel_calls_t = ops.convert_to_tensor(
num_parallel_calls, dtype=dtypes.int64, name="num_parallel_calls")
self._drop_remainder_t = ops.convert_to_tensor(
drop_remainder, dtype=dtypes.bool, name="drop_remainder")
constant_drop_remainder = tensor_util.constant_value(self._drop_remainder_t)
if constant_drop_remainder:
# NOTE(mrry): `constant_drop_remainder` may be `None` (unknown statically)
# or `False` (explicitly retaining the remainder).
self._structure = self._map_func.output_structure._batch( # pylint: disable=protected-access
tensor_util.constant_value(self._batch_size_t))
else:
self._structure = self._map_func.output_structure._batch(None) # pylint: disable=protected-access
variant_tensor = ged_ops.experimental_map_and_batch_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs,
f=self._map_func.function,
batch_size=self._batch_size_t,
num_parallel_calls=self._num_parallel_calls_t,
drop_remainder=self._drop_remainder_t,
preserve_cardinality=True,
**dataset_ops.flat_structure(self))
super(_MapAndBatchDataset, self).__init__(input_dataset, variant_tensor)
开发者ID:kylin9872,项目名称:tensorflow,代码行数:35,代码来源:batching.py
示例6: testConstant
def testConstant(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
tf_val = constant_op.constant(np_val)
self.assertAllClose(np_val, tensor_util.constant_value(tf_val))
np_val = np.random.rand(3, 0, 7).astype(np.float32)
tf_val = constant_op.constant(np_val)
self.assertAllClose(np_val, tensor_util.constant_value(tf_val))
开发者ID:ziky90,项目名称:tensorflow,代码行数:8,代码来源:tensor_util_test.py
示例7: _RangeShape
def _RangeShape(op):
start_value = tensor_util.constant_value(op.inputs[0])
limit_value = tensor_util.constant_value(op.inputs[1])
delta_value = tensor_util.constant_value(op.inputs[2])
if start_value is None or limit_value is None or delta_value is None:
return [tensor_shape.vector(None)]
else:
return [tensor_shape.vector((limit_value - start_value + delta_value - 1) // delta_value)]
开发者ID:sambrego,项目名称:tensorflow,代码行数:8,代码来源:math_ops.py
示例8: _broadcast_shape
def _broadcast_shape(shape1, shape2):
"""Convenience function which statically broadcasts shape when possible."""
if (tensor_util.constant_value(shape1) is not None and
tensor_util.constant_value(shape2) is not None):
return array_ops.broadcast_static_shape(
tensor_shape.TensorShape(tensor_util.constant_value(shape1)),
tensor_shape.TensorShape(tensor_util.constant_value(shape2)))
return array_ops.broadcast_dynamic_shape(shape1, shape2)
开发者ID:jzuern,项目名称:tensorflow,代码行数:8,代码来源:mvn_linear_operator.py
示例9: _concat
def _concat(prefix, suffix, static=False):
"""Concat that enables int, Tensor, or TensorShape values.
This function takes a size specification, which can be an integer, a
TensorShape, or a Tensor, and converts it into a concatenated Tensor
(if static = False) or a list of integers (if static = True).
Args:
prefix: The prefix; usually the batch size (and/or time step size).
(TensorShape, int, or Tensor.)
suffix: TensorShape, int, or Tensor.
static: If `True`, return a python list with possibly unknown dimensions.
Otherwise return a `Tensor`.
Returns:
shape: the concatenation of prefix and suffix.
Raises:
ValueError: if `suffix` is not a scalar or vector (or TensorShape).
ValueError: if prefix or suffix was `None` and asked for dynamic
Tensors out.
"""
if isinstance(prefix, ops.Tensor):
p = prefix
p_static = tensor_util.constant_value(prefix)
if p.shape.ndims == 0:
p = array_ops.expand_dims(p, 0)
elif p.shape.ndims != 1:
raise ValueError("prefix tensor must be either a scalar or vector, "
"but saw tensor: %s" % p)
else:
p = tensor_shape.as_shape(prefix)
p_static = p.as_list() if p.ndims is not None else None
p = (constant_op.constant(p.as_list(), dtype=dtypes.int32)
if p.is_fully_defined() else None)
if isinstance(suffix, ops.Tensor):
s = suffix
s_static = tensor_util.constant_value(suffix)
if s.shape.ndims == 0:
s = array_ops.expand_dims(s, 0)
elif s.shape.ndims != 1:
raise ValueError("suffix tensor must be either a scalar or vector, "
"but saw tensor: %s" % s)
else:
s = tensor_shape.as_shape(suffix)
s_static = s.as_list() if s.ndims is not None else None
s = (constant_op.constant(s.as_list(), dtype=dtypes.int32)
if s.is_fully_defined() else None)
if static:
shape = tensor_shape.as_shape(p_static).concatenate(s_static)
shape = shape.as_list() if shape.ndims is not None else None
else:
if p is None or s is None:
raise ValueError("Provided a prefix or suffix of None: %s and %s"
% (prefix, suffix))
shape = array_ops.concat((p, s), 0)
return shape
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:58,代码来源:rnn_cell_impl.py
示例10: testCast
def testCast(self):
np_val = np.random.rand(3, 4, 7).astype(np.float32)
tf_val = math_ops.cast(constant_op.constant(np_val), dtypes.float64)
c_val = tensor_util.constant_value(tf_val)
self.assertAllClose(np_val.astype(np.float64), c_val)
np_val = np.random.rand(3, 0, 7).astype(np.float32)
tf_val = math_ops.cast(constant_op.constant(np_val), dtypes.float64)
c_val = tensor_util.constant_value(tf_val)
self.assertAllClose(np_val.astype(np.float64), c_val)
开发者ID:ziky90,项目名称:tensorflow,代码行数:10,代码来源:tensor_util_test.py
示例11: testPack_Axis1
def testPack_Axis1(self):
inputs = [np.random.rand(4, 7) for _ in range(3)]
tf_val = array_ops.stack(inputs, axis=1)
c_val = tensor_util.constant_value(tf_val)
self.assertIsNone(c_val)
tf_val = array_ops.stack(
[inputs[0], array_ops.placeholder(dtypes.float32), inputs[2]], axis=1)
c_val = tensor_util.constant_value(tf_val)
self.assertIs(None, c_val)
开发者ID:ziky90,项目名称:tensorflow,代码行数:10,代码来源:tensor_util_test.py
示例12: make_dims
def make_dims(start_sum, size, name):
"""Closure to make dims range."""
start_sum = start_sum if start_sum else (array_ops.zeros((), dtype=dtypes.int32, name="zero"),)
if self._is_all_constant_helper(size, *start_sum):
start = sum([tensor_util.constant_value(s) for s in start_sum])
stop = start + tensor_util.constant_value(size)
return ops.convert_to_tensor(list(range(start, stop)), dtype=dtypes.int32, name=name)
else:
start = sum(start_sum)
return math_ops.range(start, start + size)
开发者ID:ppwwyyxx,项目名称:tensorflow,代码行数:10,代码来源:shape.py
示例13: slice_shape
def slice_shape(start_sum, size, name):
"""Closure to slice out shape."""
start_sum = start_sum if start_sum else (array_ops.zeros((), dtype=dtypes.int32, name="zero"),)
if x.get_shape().ndims is not None and self._is_all_constant_helper(size, *start_sum):
start = sum([tensor_util.constant_value(s) for s in start_sum])
stop = start + tensor_util.constant_value(size)
slice_ = x.get_shape()[start:stop].as_list()
if all(s is not None for s in slice_):
return ops.convert_to_tensor(slice_, dtype=dtypes.int32, name=name)
# Fall-through intended.
return array_ops.slice(array_ops.shape(x), (sum(start_sum),), (size,))
开发者ID:ppwwyyxx,项目名称:tensorflow,代码行数:11,代码来源:shape.py
示例14: testPack_Axis0
def testPack_Axis0(self):
inputs = [np.random.rand(4, 7) for _ in range(3)]
np_val = np.array(inputs)
tf_val = array_ops.stack(inputs)
c_val = tensor_util.constant_value(tf_val)
self.assertAllClose(np_val, c_val)
tf_val = array_ops.stack(
[inputs[0], array_ops.placeholder(dtypes.float32), inputs[2]])
c_val = tensor_util.constant_value(tf_val)
self.assertIs(None, c_val)
开发者ID:ziky90,项目名称:tensorflow,代码行数:11,代码来源:tensor_util_test.py
示例15: slice_shape
def slice_shape(start_sum, size, name):
"""Closure to slice out shape."""
start_sum = start_sum if start_sum else [
array_ops.zeros([], dtype=dtypes.int32, name="zero")]
if (x.get_shape().ndims is not None and
self._is_all_constant_helper(size, *start_sum)):
start = sum(tensor_util.constant_value(s) for s in start_sum)
stop = start + tensor_util.constant_value(size)
slice_ = x.get_shape()[start:stop].as_list()
if all(s is not None for s in slice_):
return ops.convert_to_tensor(slice_, dtype=dtypes.int32, name=name)
return array_ops.slice(array_ops.shape(x), [sum(start_sum)], [size])
开发者ID:ahmedsaiduk,项目名称:tensorflow,代码行数:12,代码来源:shape.py
示例16: _get_event_reduce_dims
def _get_event_reduce_dims(self, min_event_ndims, event_ndims):
"""Compute the reduction dimensions given event_ndims."""
min_event_ndims_ = (min_event_ndims if isinstance(min_event_ndims, int)
else tensor_util.constant_value(min_event_ndims))
event_ndims_ = (event_ndims if isinstance(event_ndims, int)
else tensor_util.constant_value(event_ndims))
if min_event_ndims_ is not None and event_ndims_ is not None:
return [-index for index in range(1, event_ndims_ - min_event_ndims_ + 1)]
else:
reduce_ndims = event_ndims - min_event_ndims
return math_ops.range(-reduce_ndims, 0)
开发者ID:kimr843,项目名称:tensorflow,代码行数:12,代码来源:bijector_impl.py
示例17: testEqual
def testEqual(self):
# Scalar inputs.
tf_val = math_ops.equal(constant_op.constant(1), constant_op.constant(1))
self.assertEqual(tensor_util.constant_value(tf_val), True)
tf_val = math_ops.equal(constant_op.constant(1), constant_op.constant(0))
self.assertEqual(tensor_util.constant_value(tf_val), False)
# Shaped inputs with broadcast semantics.
tf_val = math_ops.equal(constant_op.constant([[0, 1]]),
constant_op.constant([[0], [1]]))
c_val = tensor_util.constant_value(tf_val)
self.assertAllEqual(c_val, [[True, False], [False, True]])
开发者ID:ziky90,项目名称:tensorflow,代码行数:13,代码来源:tensor_util_test.py
示例18: _EditDistanceShape
def _EditDistanceShape(op):
"""Shape function for the EditDistance op."""
hypothesis_shape = tensor_util.constant_value(op.inputs[2])
truth_shape = tensor_util.constant_value(op.inputs[5])
if hypothesis_shape is not None and truth_shape is not None:
if len(hypothesis_shape) != len(truth_shape):
raise ValueError(
"Inconsistent ranks in hypothesis and truth. Saw shapes: %s and %s" %
(str(hypothesis_shape), str(truth_shape)))
return [tensor_shape.TensorShape(
[max(h, t) for h, t in zip(hypothesis_shape[:-1], truth_shape[:-1])])]
return [tensor_shape.unknown_shape()]
开发者ID:sherrym,项目名称:tensorflow,代码行数:13,代码来源:array_ops.py
示例19: make_dims
def make_dims(start_sum, size, name):
"""Closure to make dims range."""
start_sum = start_sum if start_sum else [
tf.zeros([], dtype=tf.int32, name="zero")
]
if self._is_all_constant_helper(size, *start_sum):
start = sum(tensor_util.constant_value(s) for s in start_sum)
stop = start + tensor_util.constant_value(size)
return tf.convert_to_tensor(
list(range(start, stop)), dtype=tf.int32, name=name)
else:
start = sum(start_sum)
return tf.range(start, start + size)
开发者ID:lewisKit,项目名称:probability,代码行数:13,代码来源:shape.py
示例20: _split_dataset_batch
def _split_dataset_batch(dataset, split_batch_by):
"""Divide a batch-ed dataset's batches into smaller batches."""
# TODO(sourabhbajaj): Remove this in lieu of distributed datasets
# pylint: disable=protected-access
def _get_batch_dataset(d):
"""Get the underlying batch dataset from the dataset object."""
if isinstance(d, dataset_ops.DatasetV1Adapter):
d = d._dataset
if isinstance(d, (dataset_ops.BatchDataset, batching._MapAndBatchDataset)):
return d
elif isinstance(d, dataset_ops.PrefetchDataset):
return _get_batch_dataset(d._input_dataset)
raise ValueError(
"Unable to get batched dataset from the input dataset. `batch` "
"`map_and_batch` need to be the last operations on the dataset. "
"The batch operations can be followed by a prefetch.")
batched_dataset = _get_batch_dataset(dataset)
if isinstance(batched_dataset, dataset_ops.BatchDataset):
batch_size = batched_dataset._batch_size
drop_remainder = batched_dataset._drop_remainder
elif isinstance(batched_dataset, batching._MapAndBatchDataset):
batch_size = batched_dataset._batch_size_t
drop_remainder = batched_dataset._drop_remainder_t
prefetch_buffer = None
if isinstance(dataset, dataset_ops.PrefetchDataset):
prefetch_buffer = dataset._buffer_size
elif (isinstance(dataset, dataset_ops.DatasetV1Adapter)
and isinstance(dataset._dataset, dataset_ops.PrefetchDataset)):
prefetch_buffer = dataset._dataset._buffer_size
# pylint: enable=protected-access
if tensor_util.is_tensor(batch_size):
batch_size = tensor_util.constant_value(batch_size)
if tensor_util.is_tensor(drop_remainder):
drop_remainder = tensor_util.constant_value(drop_remainder)
if batch_size % split_batch_by:
raise ValueError(
"Batch size %s cannot be sharded evenly across replicas %s" % (
batch_size, split_batch_by))
new_batch_size = batch_size // split_batch_by
dataset = dataset.apply(batching.unbatch())
dataset = dataset.batch(new_batch_size, drop_remainder=drop_remainder)
if prefetch_buffer is not None:
dataset = dataset.prefetch(prefetch_buffer)
return dataset
开发者ID:rmlarsen,项目名称:tensorflow,代码行数:51,代码来源:input_lib.py
注:本文中的tensorflow.python.framework.tensor_util.constant_value函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论