本文整理汇总了Python中tensorflow.python.framework.ops.convert_to_tensor函数的典型用法代码示例。如果您正苦于以下问题:Python convert_to_tensor函数的具体用法?Python convert_to_tensor怎么用?Python convert_to_tensor使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了convert_to_tensor函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_complex_tensor_with_imag_zero_doesnt_raise
def test_complex_tensor_with_imag_zero_doesnt_raise(self):
x = ops.convert_to_tensor([1., 0, 3])
y = ops.convert_to_tensor([0., 0, 0])
z = math_ops.complex(x, y)
with self.cached_session():
# Should not raise.
linear_operator_util.assert_zero_imag_part(z, message="ABC123").run()
开发者ID:AnishShah,项目名称:tensorflow,代码行数:7,代码来源:linear_operator_util_test.py
示例2: read_and_augment_data
def read_and_augment_data(image_list, label_list, image_size, batch_size, max_nrof_epochs,
random_crop, random_flip, random_rotate, nrof_preprocess_threads, shuffle=True):
images = ops.convert_to_tensor(image_list, dtype=tf.string)
labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
# Makes an input queue
input_queue = tf.train.slice_input_producer([images, labels],
num_epochs=max_nrof_epochs, shuffle=shuffle)
images_and_labels = []
for _ in range(nrof_preprocess_threads):
image, label = read_images_from_disk(input_queue)
if random_rotate:
image = tf.py_func(random_rotate_image, [image], tf.uint8)
if random_crop:
image = tf.random_crop(image, [image_size, image_size, 3])
else:
image = tf.image.resize_image_with_crop_or_pad(image, image_size, image_size)
if random_flip:
image = tf.image.random_flip_left_right(image)
#pylint: disable=no-member
image.set_shape((image_size, image_size, 3))
image = tf.image.per_image_standardization(image)
images_and_labels.append([image, label])
image_batch, label_batch = tf.train.batch_join(
images_and_labels, batch_size=batch_size,
capacity=4 * nrof_preprocess_threads * batch_size,
allow_smaller_final_batch=True)
return image_batch, label_batch
开发者ID:kissthink,项目名称:facenet_regonistant,代码行数:32,代码来源:facenet.py
示例3: __init__
def __init__(self, inputs, sequence_length, time_major=False, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
name: Name scope for any created operations.
Raises:
ValueError: if `sequence_length` is not a 1D tensor.
"""
with ops.name_scope(name, "TrainingHelper", [inputs, sequence_length]):
inputs = ops.convert_to_tensor(inputs, name="inputs")
if not time_major:
inputs = nest.map_structure(_transpose_batch_time, inputs)
self._input_tas = nest.map_structure(_unstack_ta, inputs)
self._sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if self._sequence_length.get_shape().ndims != 1:
raise ValueError(
"Expected sequence_length to be a vector, but received shape: %s" %
self._sequence_length.get_shape())
self._zero_inputs = nest.map_structure(
lambda inp: array_ops.zeros_like(inp[0, :]), inputs)
self._batch_size = array_ops.size(sequence_length)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:30,代码来源:helper.py
示例4: __init__
def __init__(self, indices, values, shape):
"""Creates a `SparseTensor`.
Args:
indices: A 2-D int64 tensor of shape `[N, ndims]`.
values: A 1-D tensor of any type and shape `[N]`.
shape: A 1-D int64 tensor of shape `[ndims]`.
Returns:
A `SparseTensor`
"""
with ops.name_scope(None, "SparseTensor", [indices, values, shape]):
indices = ops.convert_to_tensor(
indices, name="indices", dtype=dtypes.int64)
# Always pass as_ref=True because we want to be able to update
# values later if it is a VariableOp.
# TODO(touts): Consider adding mutable_values() when 'values'
# is a VariableOp and updating users of SparseTensor.
values = ops.convert_to_tensor(values, name="values", as_ref=True)
shape = ops.convert_to_tensor(shape, name="shape", dtype=dtypes.int64)
self._indices = indices
self._values = values
self._shape = shape
indices_shape = indices.get_shape().with_rank(2)
values_shape = values.get_shape().with_rank(1)
shape_shape = shape.get_shape().with_rank(1)
# Assert number of rows in indices match the number of elements in values.
indices_shape[0].merge_with(values_shape[0])
# Assert number of columns in indices matches the number of elements in
# shape.
indices_shape[1].merge_with(shape_shape[0])
开发者ID:DavidNemeskey,项目名称:tensorflow,代码行数:33,代码来源:sparse_tensor.py
示例5: __init__
def __init__(self, example_indices, feature_indices, feature_values):
"""Creates a `SparseFeatureColumn` representation.
Args:
example_indices: A 1-D int64 tensor of shape `[N]`. Also, accepts
python lists, or numpy arrays.
feature_indices: A 1-D int64 tensor of shape `[N]`. Also, accepts
python lists, or numpy arrays.
feature_values: An optional 1-D tensor float tensor of shape `[N]`. Also,
accepts python lists, or numpy arrays.
Returns:
A `SparseFeatureColumn`
"""
with name_scope(None, 'SparseFeatureColumn',
[example_indices, feature_indices]):
self._example_indices = convert_to_tensor(example_indices,
name='example_indices',
dtype=dtypes.int64)
self._feature_indices = convert_to_tensor(feature_indices,
name='feature_indices',
dtype=dtypes.int64)
self._feature_values = None
if feature_values is not None:
with name_scope(None, 'SparseFeatureColumn', [feature_values]):
self._feature_values = convert_to_tensor(feature_values,
name='feature_values',
dtype=dtypes.float32)
开发者ID:billho,项目名称:tensorflow,代码行数:28,代码来源:sdca_ops.py
示例6: saturate_cast
def saturate_cast(value, dtype, name=None):
"""Performs a safe saturating cast of `value` to `dtype`.
This function casts the input to `dtype` without applying any scaling. If
there is a danger that values would over or underflow in the cast, this op
applies the appropriate clamping before the cast.
Args:
value: A `Tensor`.
dtype: The desired output `DType`.
name: A name for the operation (optional).
Returns:
`value` safely cast to `dtype`.
"""
# When casting to a type with smaller representable range, clamp.
# Note that this covers casting to unsigned types as well.
with ops.op_scope([value], name, "saturate_cast") as name:
value = ops.convert_to_tensor(value, name="value")
dtype = dtypes.as_dtype(dtype).base_dtype
if value.dtype.min < dtype.min:
value = maximum(value, ops.convert_to_tensor(
dtype.min, dtype=value.dtype, name="min"))
if value.dtype.max > dtype.max:
value = minimum(value, ops.convert_to_tensor(
dtype.max, dtype=value.dtype, name="max"))
return cast(value, dtype, name=name)
开发者ID:13331151,项目名称:tensorflow,代码行数:27,代码来源:math_ops.py
示例7: __init__
def __init__(self, partitioned_dim_sizes, inner_dim_sizes,
dim_size_dtype=None):
"""Creates a RaggedTensorDynamicShape.
Args:
partitioned_dim_sizes: A `list` of 0-D or 1-D integer `Tensor`, one for
each partitioned dimension. If dimension `d` is uniform, then
`partitioned_dim_sizes[d]` must be an integer scalar, specifying the
size of all slices across dimension `d`. If dimension `d` is ragged,
then `partitioned_dim_sizes[d]` must be an integer vector, specifying
the size of each slice across dimension `d`.
inner_dim_sizes: A 1-D integer `Tensor`, whose length is equal to the
number of inner dimensions. `inner_dim_sizes[n]` is the size of all
slices across the `n`th inner dimension (which is the
`(len(partitioned_dim_sizes)+n)`th dimension in the overall tensor.
dim_size_dtype: dtype for dimension sizes. If not specified, then it
is chosen based on the dtypes of `partitioned_dim_sizes` and
`inner_dim_sizes`.
"""
assert isinstance(partitioned_dim_sizes, (list, tuple))
with ops.name_scope(None, 'RaggedTensorDynamicShape',
(partitioned_dim_sizes, inner_dim_sizes)):
partitioned_dim_sizes = tuple(
ops.convert_to_tensor(size, name='partitioned_dimension_size_%d' % i)
for (i, size) in enumerate(partitioned_dim_sizes))
inner_dim_sizes = ops.convert_to_tensor(
inner_dim_sizes, name='inner_dim_sizes')
# Validate shapes.
if partitioned_dim_sizes:
for axis, dimension_size in enumerate(partitioned_dim_sizes):
if dimension_size.shape.ndims is None:
raise ValueError(
'rank of partitioned_dim_sizes[%d] is unknown' % axis)
dimension_size.shape.with_rank_at_most(1)
if partitioned_dim_sizes[0].shape.ndims == 1:
raise ValueError('outermost partitioned dimension must be uniform')
if partitioned_dim_sizes[-1].shape.ndims == 0:
raise ValueError('innermost partitioned dimension must be ragged')
inner_dim_sizes.shape.assert_has_rank(1)
# Convert dimension size tensors to a single dtype.
if dim_size_dtype is None:
dim_size_dtypes = set([p.dtype for p in partitioned_dim_sizes
if p.shape.ndims == 1])
if not dim_size_dtypes:
dim_size_dtype = dtypes.int64
elif len(dim_size_dtypes) == 1:
dim_size_dtype = dim_size_dtypes.pop()
else:
if not ragged_config.auto_cast_partition_dtype():
raise ValueError('partitioned_dim_sizes must have matching dtypes')
dim_size_dtype = dtypes.int64
partitioned_dim_sizes = tuple(math_ops.cast(p, dim_size_dtype)
for p in partitioned_dim_sizes)
inner_dim_sizes = math_ops.cast(inner_dim_sizes, dim_size_dtype)
self._partitioned_dim_sizes = partitioned_dim_sizes
self._inner_dim_sizes = inner_dim_sizes
开发者ID:aritratony,项目名称:tensorflow,代码行数:60,代码来源:ragged_tensor_shape.py
示例8: gcd
def gcd(a, b, name=None):
"""Returns the greatest common divisor via Euclid's algorithm.
Args:
a: The dividend. A scalar integer `Tensor`.
b: The divisor. A scalar integer `Tensor`.
name: An optional name for the operation.
Returns:
A scalar `Tensor` representing the greatest common divisor between `a` and
`b`.
Raises:
ValueError: If `a` or `b` are not scalar integers.
"""
with ops.name_scope(name, 'gcd', [a, b]):
a = ops.convert_to_tensor(a)
b = ops.convert_to_tensor(b)
a.shape.assert_has_rank(0)
b.shape.assert_has_rank(0)
if not a.dtype.is_integer:
raise ValueError('a must be an integer type. Got: %s' % a.dtype)
if not b.dtype.is_integer:
raise ValueError('b must be an integer type. Got: %s' % b.dtype)
cond = lambda _, b: math_ops.greater(b, array_ops.zeros_like(b))
body = lambda a, b: [b, math_ops.mod(a, b)]
a, b = control_flow_ops.while_loop(cond, body, [a, b], back_prop=False)
return a
开发者ID:1000sprites,项目名称:tensorflow,代码行数:31,代码来源:util_ops.py
示例9: fused_batch_norm
def fused_batch_norm(
x,
scale,
offset, # pylint: disable=invalid-name
mean=None,
variance=None,
epsilon=0.001,
data_format="NHWC",
is_training=True,
name=None):
r"""Batch normalization.
As described in http://arxiv.org/abs/1502.03167.
Args:
x: Input `Tensor` of 4 dimensions.
scale: A `Tensor` of 1 dimension for scaling.
offset: A `Tensor` of 1 dimension for bias.
mean: A `Tensor` of 1 dimension for population mean used for inference.
variance: A `Tensor` of 1 dimension for population variance
used for inference.
epsilon: A small float number added to the variance of x.
data_format: The data format for x. Either "NHWC" (default) or "NCHW".
is_training: A bool value to specify if the operation is used for
training or inference.
name: A name for this operation (optional).
Returns:
y: A 4D Tensor for the normalized, scaled, offsetted x.
batch_mean: A 1D Tensor for the mean of x.
batch_var: A 1D Tensor for the variance of x.
Raises:
ValueError: If mean or variance is not None when is_training is True.
"""
x = ops.convert_to_tensor(x, name="input")
scale = ops.convert_to_tensor(scale, name="scale")
offset = ops.convert_to_tensor(offset, name="offset")
if is_training:
if (mean is not None) or (variance is not None):
raise ValueError("Both 'mean' and 'variance' must be None "
"if is_training is True.")
if mean is None:
mean = constant_op.constant([])
if variance is None:
variance = constant_op.constant([])
# Add 1e-12 to epsilon when epsilon <= 1e-5 to prevent CUDNN exception.
epsilon = epsilon if epsilon > 1e-5 else epsilon + 1e-12
# pylint: disable=protected-access
y, batch_mean, batch_var, _, _ = gen_nn_ops._fused_batch_norm(
x,
scale,
offset,
mean,
variance,
epsilon=epsilon,
data_format=data_format,
is_training=is_training,
name=name)
return y, batch_mean, batch_var
开发者ID:BloodD,项目名称:tensorflow,代码行数:60,代码来源:nn_impl.py
示例10: Counter
def Counter(start=0, step=1, dtype=dtypes.int64):
"""Creates a `Dataset` that counts from `start` in steps of size `step`.
For example:
```python
Dataset.count() == [0, 1, 2, ...)
Dataset.count(2) == [2, 3, ...)
Dataset.count(2, 5) == [2, 7, 12, ...)
Dataset.count(0, -1) == [0, -1, -2, ...)
Dataset.count(10, -1) == [10, 9, ...)
```
Args:
start: (Optional.) The starting value for the counter. Defaults to 0.
step: (Optional.) The step size for the counter. Defaults to 1.
dtype: (Optional.) The data type for counter elements. Defaults to
`tf.int64`.
Returns:
A `Dataset` of scalar `dtype` elements.
"""
with ops.name_scope("counter"):
start = ops.convert_to_tensor(start, dtype=dtype, name="start")
step = ops.convert_to_tensor(step, dtype=dtype, name="step")
return dataset_ops.Dataset.from_tensors(0).repeat(None).apply(
scan_ops.scan(start, lambda state, _: (state + step, state)))
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:27,代码来源:counter.py
示例11: rot90
def rot90(image, k=1, name=None):
"""Rotate an image counter-clockwise by 90 degrees.
Args:
image: A 3-D tensor of shape `[height, width, channels]`.
k: A scalar integer. The number of times the image is rotated by 90 degrees.
name: A name for this operation (optional).
Returns:
A rotated 3-D tensor of the same type and shape as `image`.
"""
with ops.name_scope(name, 'rot90', [image, k]) as scope:
image = ops.convert_to_tensor(image, name='image')
_Check3DImage(image, require_static=False)
k = ops.convert_to_tensor(k, dtype=dtypes.int32, name='k')
k.get_shape().assert_has_rank(0)
k = math_ops.mod(k, 4)
def _rot90():
return array_ops.transpose(array_ops.reverse_v2(image, [1]),
[1, 0, 2])
def _rot180():
return array_ops.reverse_v2(image, [0, 1])
def _rot270():
return array_ops.reverse_v2(array_ops.transpose(image, [1, 0, 2]),
[1])
cases = [(math_ops.equal(k, 1), _rot90),
(math_ops.equal(k, 2), _rot180),
(math_ops.equal(k, 3), _rot270)]
ret = control_flow_ops.case(cases, default=lambda: image, exclusive=True,
name=scope)
ret.set_shape([None, None, image.get_shape()[2]])
return ret
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:34,代码来源:image_ops_impl.py
示例12: __init__
def __init__(self,
filenames,
record_bytes,
header_bytes=None,
footer_bytes=None,
buffer_size=None):
"""Creates a `FixedLengthRecordDataset`.
Args:
filenames: A `tf.string` tensor containing one or more filenames.
record_bytes: A `tf.int64` scalar representing the number of bytes in
each record.
header_bytes: (Optional.) A `tf.int64` scalar representing the number of
bytes to skip at the start of a file.
footer_bytes: (Optional.) A `tf.int64` scalar representing the number of
bytes to ignore at the end of a file.
buffer_size: (Optional.) A `tf.int64` scalar representing the number of
bytes to buffer when reading.
"""
super(FixedLengthRecordDataset, self).__init__()
self._filenames = ops.convert_to_tensor(
filenames, dtype=dtypes.string, name="filenames")
self._record_bytes = ops.convert_to_tensor(
record_bytes, dtype=dtypes.int64, name="record_bytes")
self._header_bytes = _convert_optional_param_to_tensor(
"header_bytes", header_bytes)
self._footer_bytes = _convert_optional_param_to_tensor(
"footer_bytes", footer_bytes)
self._buffer_size = _convert_optional_param_to_tensor(
"buffer_size", buffer_size, _DEFAULT_READER_BUFFER_SIZE_BYTES)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:31,代码来源:readers.py
示例13: embedding_lookup
def embedding_lookup(params, ids, name='embedding_lookup'):
"""Provides a N dimensional version of tf.embedding_lookup.
Ids are flattened to a 1d tensor before being passed to embedding_lookup
then, they are unflattend to match the original ids shape plus an extra
leading dimension of the size of the embeddings.
Args:
params: List of tensors of size D0 x D1 x ... x Dn-2 x Dn-1.
ids: N-dimensional tensor of B0 x B1 x .. x Bn-2 x Bn-1.
Must contain indexes into params.
name: Optional name for the op.
Returns:
A tensor of size B0 x B1 x .. x Bn-2 x Bn-1 x D1 x ... x Dn-2 x Dn-1
containing the values from the params tensor(s) for indecies in ids.
Raises:
ValueError: if some parameters are invalid.
"""
with ops.name_scope(name, 'embedding_lookup', [params, ids]):
params = ops.convert_to_tensor(params)
ids = ops.convert_to_tensor(ids)
shape = array_ops_.shape(ids)
ids_flat = array_ops_.reshape(
ids, math_ops.reduce_prod(shape, keep_dims=True))
embeds_flat = nn.embedding_lookup(params, ids_flat, name)
embed_shape = array_ops_.concat_v2([shape, [-1]], 0)
embeds = array_ops_.reshape(embeds_flat, embed_shape)
embeds.set_shape(ids.get_shape().concatenate(params.get_shape()[1:]))
return embeds
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:31,代码来源:embeddings_ops.py
示例14: test_complex_tensor_with_nonzero_imag_raises
def test_complex_tensor_with_nonzero_imag_raises(self):
x = ops.convert_to_tensor([1., 2, 0])
y = ops.convert_to_tensor([1., 2, 0])
z = math_ops.complex(x, y)
with self.cached_session():
with self.assertRaisesOpError("ABC123"):
linear_operator_util.assert_zero_imag_part(z, message="ABC123").run()
开发者ID:AnishShah,项目名称:tensorflow,代码行数:7,代码来源:linear_operator_util_test.py
示例15: __init__
def __init__(self, input_dataset, map_func, batch_size, num_parallel_batches):
"""See `Dataset.map()` for details."""
super(_MapAndBatchDataset, self).__init__(input_dataset, map_func)
self._batch_size = ops.convert_to_tensor(
batch_size, dtype=dtypes.int64, name="batch_size")
self._num_parallel_batches = ops.convert_to_tensor(
num_parallel_batches, dtype=dtypes.int64, name="num_parallel_batches")
开发者ID:Kongsea,项目名称:tensorflow,代码行数:7,代码来源:batching.py
示例16: stateless_random_normal
def stateless_random_normal(shape,
seed,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
name=None):
"""Outputs deterministic pseudorandom values from a normal distribution.
This is a stateless version of `tf.random.normal`: if run twice with the
same seeds, it will produce the same pseudorandom numbers. The output is
consistent across multiple runs on the same hardware (and between CPU
and GPU), but may change between versions of TensorFlow or on non-CPU/GPU
hardware.
Args:
shape: A 1-D integer Tensor or Python array. The shape of the output tensor.
seed: A shape [2] integer Tensor of seeds to the random number generator.
mean: A 0-D Tensor or Python value of type `dtype`. The mean of the normal
distribution.
stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation
of the normal distribution.
dtype: The type of the output.
name: A name for the operation (optional).
Returns:
A tensor of the specified shape filled with random normal values.
"""
with ops.name_scope(name, "stateless_random_normal",
[shape, seed, mean, stddev]) as name:
shape = random_ops._ShapeTensor(shape) # pylint: disable=protected-access
mean = ops.convert_to_tensor(mean, dtype=dtype, name="mean")
stddev = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev")
rnd = gen_stateless_random_ops.stateless_random_normal(shape, seed, dtype)
return math_ops.add(rnd * stddev, mean, name=name)
开发者ID:aritratony,项目名称:tensorflow,代码行数:34,代码来源:stateless_random_ops.py
示例17: _interp_evaluate
def _interp_evaluate(coefficients, t0, t1, t):
"""Evaluate polynomial interpolation at the given time point.
Args:
coefficients: list of Tensor coefficients as created by `interp_fit`.
t0: scalar float64 Tensor giving the start of the interval.
t1: scalar float64 Tensor giving the end of the interval.
t: scalar float64 Tensor giving the desired interpolation point.
Returns:
Polynomial interpolation of the coefficients at time `t`.
"""
with ops.name_scope('interp_evaluate'):
t0 = ops.convert_to_tensor(t0)
t1 = ops.convert_to_tensor(t1)
t = ops.convert_to_tensor(t)
dtype = coefficients[0].dtype
assert_op = control_flow_ops.Assert(
(t0 <= t) & (t <= t1),
['invalid interpolation, fails `t0 <= t <= t1`:', t0, t, t1])
with ops.control_dependencies([assert_op]):
x = math_ops.cast((t - t0) / (t1 - t0), dtype)
xs = [constant_op.constant(1, dtype), x]
for _ in range(2, len(coefficients)):
xs.append(xs[-1] * x)
return _dot_product(coefficients, reversed(xs))
开发者ID:BhaskarNallani,项目名称:tensorflow,代码行数:30,代码来源:odes.py
示例18: _generateData
def _generateData(self, matrix_size, batch_size, num_rhs, seed=42):
np.random.seed(seed)
data = np.random.normal(size=(batch_size, matrix_size, 3 + num_rhs))
diags = np.stack([data[:, :, 0], data[:, :, 1], data[:, :, 2]], axis=-2)
rhs = data[:, :, 3:]
return (ops.convert_to_tensor(diags, dtype=dtypes.float64),
ops.convert_to_tensor(rhs, dtype=dtypes.float64))
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:7,代码来源:tridiagonal_solve_op_test.py
示例19: matmul
def matmul(a, b,
transpose_a=False, transpose_b=False,
a_is_sparse=False, b_is_sparse=False,
name=None):
"""Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
The inputs must be two-dimensional matrices, with matching inner dimensions,
possibly after transposition.
Both matrices must be of the same type. The supported types are:
`float`, `double`, `int32`, `complex64`.
Either matrix can be transposed on the fly by setting the corresponding flag
to `True`. This is `False` by default.
If one or both of the matrices contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
For example:
```python
# 2-D tensor `a`
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) => [[1. 2. 3.]
[4. 5. 6.]]
# 2-D tensor `b`
b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2]) => [[7. 8.]
[9. 10.]
[11. 12.]]
c = tf.matmul(a, b) => [[58 64]
[139 154]]
```
Args:
a: `Tensor` of type `float`, `double`, `int32` or `complex64`.
b: `Tensor` with same type as `a`.
transpose_a: If `True`, `a` is transposed before multiplication.
transpose_b: If `True`, `b` is transposed before multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix.
b_is_sparse: If `True`, `b` is treated as a sparse matrix.
name: Name for the operation (optional).
Returns:
A `Tensor` of the same type as `a`.
"""
with ops.op_scope([a, b], name, "MatMul") as name:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
if a.dtype == dtypes.float32 and (a_is_sparse or b_is_sparse):
return sparse_matmul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
name=name)
else:
return gen_math_ops._mat_mul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
name=name)
开发者ID:13331151,项目名称:tensorflow,代码行数:60,代码来源:math_ops.py
示例20: power_sums_tensor
def power_sums_tensor(array_size, power_matrix, multiplier):
r"""Computes \sum_{i=0}^{N-1} A^i B (A^i)^T for N=0..(array_size + 1).
Args:
array_size: The number of non-trivial sums to pre-compute.
power_matrix: The "A" matrix above.
multiplier: The "B" matrix above
Returns:
A Tensor with S[N] = \sum_{i=0}^{N-1} A^i B (A^i)^T
S[0] is the zero matrix
S[1] is B
S[2] is A B A^T + B
...and so on
"""
array_size = math_ops.cast(array_size, dtypes.int32)
power_matrix = ops.convert_to_tensor(power_matrix)
identity_like_power_matrix = linalg_ops.eye(
array_ops.shape(power_matrix)[0], dtype=power_matrix.dtype)
identity_like_power_matrix.set_shape(
ops.convert_to_tensor(power_matrix).get_shape())
transition_powers = functional_ops.scan(
lambda previous_power, _: math_ops.matmul(previous_power, power_matrix),
math_ops.range(array_size - 1),
initializer=identity_like_power_matrix)
summed = math_ops.cumsum(
array_ops.concat([
array_ops.expand_dims(multiplier, 0), math_ops.matmul(
batch_times_matrix(transition_powers, multiplier),
transition_powers,
adjoint_b=True)
], 0))
return array_ops.concat(
[array_ops.expand_dims(array_ops.zeros_like(multiplier), 0), summed], 0)
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:33,代码来源:math_utils.py
注:本文中的tensorflow.python.framework.ops.convert_to_tensor函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论