本文整理汇总了Python中tensorflow.python.ops.array_ops.matrix_transpose函数的典型用法代码示例。如果您正苦于以下问题:Python matrix_transpose函数的具体用法?Python matrix_transpose怎么用?Python matrix_transpose使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了matrix_transpose函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_defining_spd_operator_by_taking_real_part
def test_defining_spd_operator_by_taking_real_part(self):
with self.cached_session() as sess:
# S is real and positive.
s = linear_operator_test_util.random_uniform(
shape=(10, 2, 3, 4), dtype=dtypes.float32, minval=1., maxval=2.)
# Let S = S1 + S2, the Hermitian and anti-hermitian parts.
# S1 = 0.5 * (S + S^H), S2 = 0.5 * (S - S^H),
# where ^H is the Hermitian transpose of the function:
# f(n0, n1, n2)^H := ComplexConjugate[f(N0-n0, N1-n1, N2-n2)].
# We want to isolate S1, since
# S1 is Hermitian by construction
# S1 is real since S is
# S1 is positive since it is the sum of two positive kernels
# IDFT[S] = IDFT[S1] + IDFT[S2]
# = H1 + H2
# where H1 is real since it is Hermitian,
# and H2 is imaginary since it is anti-Hermitian.
ifft_s = fft_ops.ifft3d(math_ops.cast(s, dtypes.complex64))
# Throw away H2, keep H1.
real_ifft_s = math_ops.real(ifft_s)
# This is the perfect spectrum!
# spectrum = DFT[H1]
# = S1,
fft_real_ifft_s = fft_ops.fft3d(
math_ops.cast(real_ifft_s, dtypes.complex64))
# S1 is Hermitian ==> operator is real.
# S1 is real ==> operator is self-adjoint.
# S1 is positive ==> operator is positive-definite.
operator = linalg.LinearOperatorCirculant3D(fft_real_ifft_s)
# Allow for complex output so we can check operator has zero imag part.
self.assertEqual(operator.dtype, dtypes.complex64)
matrix, matrix_t = sess.run([
operator.to_dense(),
array_ops.matrix_transpose(operator.to_dense())
])
operator.assert_positive_definite().run() # Should not fail.
np.testing.assert_allclose(0, np.imag(matrix), atol=1e-6)
self.assertAllClose(matrix, matrix_t)
# Just to test the theory, get S2 as well.
# This should create an imaginary operator.
# S2 is anti-Hermitian ==> operator is imaginary.
# S2 is real ==> operator is self-adjoint.
imag_ifft_s = math_ops.imag(ifft_s)
fft_imag_ifft_s = fft_ops.fft3d(
1j * math_ops.cast(imag_ifft_s, dtypes.complex64))
operator_imag = linalg.LinearOperatorCirculant3D(fft_imag_ifft_s)
matrix, matrix_h = sess.run([
operator_imag.to_dense(),
array_ops.matrix_transpose(math_ops.conj(operator_imag.to_dense()))
])
self.assertAllClose(matrix, matrix_h)
np.testing.assert_allclose(0, np.real(matrix), atol=1e-7)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:60,代码来源:linear_operator_circulant_test.py
示例2: __call__
def __call__(self, shape, dtype=dtypes.float32):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
Raises:
ValueError: If the dtype is not floating point or the input shape is not
valid.
"""
dtype = _assert_float_dtype(dtype)
# Check the shape
if len(shape) < 2:
raise ValueError("The tensor to initialize must be "
"at least two-dimensional")
# Flatten the input shape with the last dimension remaining
# its original shape so it works for conv2d
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (max(num_cols, num_rows), min(num_cols, num_rows))
# Generate a random matrix
a = random_ops.random_normal(flat_shape, dtype=dtype, seed=self.seed)
# Compute the qr factorization
q, r = gen_linalg_ops.qr(a, full_matrices=False)
# Make Q uniform
d = array_ops.diag_part(r)
q *= math_ops.sign(d)
if num_rows < num_cols:
q = array_ops.matrix_transpose(q)
return self.gain * array_ops.reshape(q, shape)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:35,代码来源:init_ops_v2.py
示例3: testNonBatchMatrix
def testNonBatchMatrix(self):
matrix = [[1, 2, 3], [4, 5, 6]] # Shape (2, 3)
expected_transposed = [[1, 4], [2, 5], [3, 6]] # Shape (3, 2)
with self.test_session():
transposed = array_ops.matrix_transpose(matrix)
self.assertEqual((3, 2), transposed.get_shape())
self.assertAllEqual(expected_transposed, transposed.eval())
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:7,代码来源:array_ops_test.py
示例4: adjoint
def adjoint(matrix, name=None):
"""Transposes the last two dimensions of and conjugates tensor `matrix`.
For example:
```python
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.linalg.adjoint(x) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
```
Args:
matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`,
or `complex128` with shape `[..., M, M]`.
name: A name to give this `Op` (optional).
Returns:
The adjoint (a.k.a. Hermitian transpose a.k.a. conjugate transpose) of
matrix.
"""
with ops.name_scope(name, 'adjoint', [matrix]):
matrix = ops.convert_to_tensor(matrix, name='matrix')
return array_ops.matrix_transpose(matrix, conjugate=True)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:25,代码来源:linalg_impl.py
示例5: __call__
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
# Check the shape
if len(shape) < 2:
raise ValueError("The tensor to initialize must be "
"at least two-dimensional")
# Flatten the input shape with the last dimension remaining
# its original shape so it works for conv2d
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (num_cols, num_rows) if num_rows < num_cols else (num_rows,
num_cols)
# Generate a random matrix
a = random_ops.random_normal(flat_shape, dtype=dtype, seed=self.seed)
# Compute the qr factorization
q, r = linalg_ops.qr(a, full_matrices=False)
# Make Q uniform
d = array_ops.diag_part(r)
q *= math_ops.sign(d)
if num_rows < num_cols:
q = array_ops.matrix_transpose(q)
return self.gain * array_ops.reshape(q, shape)
开发者ID:moses-sun,项目名称:tensorflow,代码行数:26,代码来源:init_ops.py
示例6: _overdetermined
def _overdetermined(op, grad):
"""Gradients for the overdetermined case of MatrixSolveLs.
This is the backprop for the solution to the normal equations of the first
kind:
X = F(A, B) = (A^T * A + lambda * I)^{-1} * A^T * B
which solve the least squares problem
min ||A * X - B||_F^2 + lambda ||X||_F^2.
"""
a = op.inputs[0]
b = op.inputs[1]
l2_regularizer = math_ops.cast(op.inputs[2], a.dtype.base_dtype)
x = op.outputs[0]
a_shape = array_ops.shape(a)
batch_shape = a_shape[:-2]
n = a_shape[-1]
identity = linalg_ops.eye(n, batch_shape=batch_shape, dtype=a.dtype)
gramian = math_ops.matmul(a, a, adjoint_a=True) + l2_regularizer * identity
chol = linalg_ops.cholesky(gramian)
# Temporary z = (A^T * A + lambda * I)^{-1} * grad.
z = linalg_ops.cholesky_solve(chol, grad)
xzt = math_ops.matmul(x, z, adjoint_b=True)
zx_sym = xzt + array_ops.matrix_transpose(xzt)
grad_a = -math_ops.matmul(a, zx_sym) + math_ops.matmul(b, z, adjoint_b=True)
grad_b = math_ops.matmul(a, z)
return (grad_a, grad_b, None)
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:27,代码来源:linalg_grad.py
示例7: _unvec_by
def _unvec_by(y, num_col):
"""Unstack vector to form a matrix, with a specified amount of columns."""
return array_ops.matrix_transpose(
array_ops.reshape(
y,
array_ops.concat(
[array_ops.shape(y)[:-1], [num_col, -1]], axis=0)))
开发者ID:aritratony,项目名称:tensorflow,代码行数:7,代码来源:linear_operator_kronecker.py
示例8: _covariance
def _covariance(self):
if (isinstance(self.scale, linalg.LinearOperatorIdentity) or
isinstance(self.scale, linalg.LinearOperatorScaledIdentity) or
isinstance(self.scale, linalg.LinearOperatorDiag)):
return array_ops.matrix_diag(math_ops.square(self.scale.diag_part()))
else:
# TODO(b/35040238): Remove transpose once LinOp supports `transpose`.
return self.scale.apply(array_ops.matrix_transpose(self.scale.to_dense()))
开发者ID:jzuern,项目名称:tensorflow,代码行数:8,代码来源:mvn_linear_operator.py
示例9: testConjugate
def testConjugate(self):
m = [[1 + 1j, 2 + 2j, 3 + 3j], [4 + 4j, 5 + 5j, 6 + 6j]]
expected_transposed = [[1 - 1j, 4 - 4j], [2 - 2j, 5 - 5j], [3 - 3j, 6 - 6j]]
with self.test_session():
matrix = ops.convert_to_tensor(m)
transposed = array_ops.matrix_transpose(matrix, conjugate=True)
self.assertEqual((3, 2), transposed.get_shape())
self.assertAllEqual(expected_transposed, transposed.eval())
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:8,代码来源:array_ops_test.py
示例10: testNonBatchMatrixDynamicallyDefined
def testNonBatchMatrixDynamicallyDefined(self):
matrix = [[1, 2, 3], [4, 5, 6]] # Shape (2, 3)
expected_transposed = [[1, 4], [2, 5], [3, 6]] # Shape (3, 2)
with self.test_session():
matrix_ph = array_ops.placeholder(dtypes.int32)
transposed = array_ops.matrix_transpose(matrix_ph)
self.assertAllEqual(
expected_transposed, transposed.eval(feed_dict={matrix_ph: matrix}))
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:8,代码来源:array_ops_test.py
示例11: sign_magnitude_positive_definite
def sign_magnitude_positive_definite(
raw, off_diagonal_scale=0., overall_scale=0.):
"""Constructs a positive definite matrix from an unconstrained input matrix.
We want to keep the whole matrix on a log scale, but also allow off-diagonal
elements to be negative, so the sign of off-diagonal elements is modeled
separately from their magnitude (using the lower and upper triangles
respectively). Specifically:
for i < j, we have:
output_cholesky[i, j] = raw[j, i] / (abs(raw[j, i]) + 1) *
exp((off_diagonal_scale + overall_scale + raw[i, j]) / 2)
output_cholesky[i, i] = exp((raw[i, i] + overall_scale) / 2)
output = output_cholesky^T * output_cholesky
where raw, off_diagonal_scale, and overall_scale are
un-constrained real-valued variables. The resulting values are stable
around zero due to the exponential (and the softsign keeps the function
smooth).
Args:
raw: A [..., M, M] Tensor.
off_diagonal_scale: A scalar or [...] shaped Tensor controlling the relative
scale of off-diagonal values in the output matrix.
overall_scale: A scalar or [...] shaped Tensor controlling the overall scale
of the output matrix.
Returns:
The `output` matrix described above, a [..., M, M] positive definite matrix.
"""
raw = ops.convert_to_tensor(raw)
diagonal = array_ops.matrix_diag_part(raw)
def _right_pad_with_ones(tensor, target_rank):
# Allow broadcasting even if overall_scale and off_diagonal_scale have batch
# dimensions
tensor = ops.convert_to_tensor(tensor, dtype=raw.dtype.base_dtype)
return array_ops.reshape(tensor,
array_ops.concat(
[
array_ops.shape(tensor), array_ops.ones(
[target_rank - array_ops.rank(tensor)],
dtype=target_rank.dtype)
],
axis=0))
# We divide the log values by 2 to compensate for the squaring that happens
# when transforming Cholesky factors into positive definite matrices.
sign_magnitude = (gen_math_ops.exp(
(raw + _right_pad_with_ones(off_diagonal_scale, array_ops.rank(raw)) +
_right_pad_with_ones(overall_scale, array_ops.rank(raw))) / 2.) *
nn.softsign(array_ops.matrix_transpose(raw)))
sign_magnitude.set_shape(raw.get_shape())
cholesky_factor = array_ops.matrix_set_diag(
input=array_ops.matrix_band_part(sign_magnitude, 0, -1),
diagonal=gen_math_ops.exp((diagonal + _right_pad_with_ones(
overall_scale, array_ops.rank(diagonal))) / 2.))
return math_ops.matmul(cholesky_factor, cholesky_factor, transpose_a=True)
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:58,代码来源:math_utils.py
示例12: _GradWithInverseL
def _GradWithInverseL(l, l_inverse, grad):
middle = math_ops.matmul(l, grad, adjoint_a=True)
middle = array_ops.matrix_set_diag(middle,
0.5 * array_ops.matrix_diag_part(middle))
middle = array_ops.matrix_band_part(middle, -1, 0)
grad_a = math_ops.matmul(
math_ops.matmul(l_inverse, middle, adjoint_a=True), l_inverse)
grad_a += math_ops.conj(array_ops.matrix_transpose(grad_a))
return grad_a * 0.5
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:9,代码来源:cholesky_op_test.py
示例13: test_cholesky
def test_cholesky(self):
z = random_ops.random_normal([2, 3, 3])
x = (math_ops.matmul(z, array_ops.matrix_transpose(z)) # Ensure pos. def.
+ linalg_ops.eye(3)) # Ensure well-conditioned.
def loop_fn(i):
return linalg_ops.cholesky(array_ops.gather(x, i))
self._test_loop_fn(loop_fn, 2)
开发者ID:aritratony,项目名称:tensorflow,代码行数:9,代码来源:math_test.py
示例14: TriAngSolveCompositeGrad
def TriAngSolveCompositeGrad(l, grad):
# Gradient is l^{-H} @ ((l^{H} @ grad) * (tril(ones)-1/2*eye)) @ l^{-1}
# Compute ((l^{H} @ grad) * (tril(ones)-1/2*eye)) = middle
middle = math_ops.matmul(l, grad, adjoint_a=True)
middle = array_ops.matrix_set_diag(middle,
0.5 * array_ops.matrix_diag_part(middle))
middle = array_ops.matrix_band_part(middle, -1, 0)
# Compute l^{-H} @ middle = z
l_inverse_middle = linalg_ops.matrix_triangular_solve(l, middle, adjoint=True)
# We need to compute z @ l^{-1}. With matrix_triangular_solve we
# actually compute l^{-H} @ z^{H} = grad. Since we later add grad^{H}
# we can ommit the conjugate transpose here.
z_h = math_ops.conj(array_ops.matrix_transpose(l_inverse_middle))
grad_a = linalg_ops.matrix_triangular_solve(l, z_h, adjoint=True)
grad_a += math_ops.conj(array_ops.matrix_transpose(grad_a))
return grad_a * 0.5
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:19,代码来源:cholesky_op_test.py
示例15: _tridiagonal_solve_compact_format
def _tridiagonal_solve_compact_format(diagonals,
rhs,
transpose_rhs=False,
conjugate_rhs=False,
name=None):
"""Helper function used after the input has been cast to compact form."""
diags_rank, rhs_rank = len(diagonals.shape), len(rhs.shape)
if diags_rank < 2:
raise ValueError(
'Expected diagonals to have rank at least 2, got {}'.format(diags_rank))
if rhs_rank != diags_rank and rhs_rank != diags_rank - 1:
raise ValueError('Expected the rank of rhs to be {} or {}, got {}'.format(
diags_rank - 1, diags_rank, rhs_rank))
if diagonals.shape[-2] != 3:
raise ValueError('Expected 3 diagonals got {}'.format(diagonals.shape[-2]))
if not diagonals.shape[:-2].is_compatible_with(rhs.shape[:diags_rank - 2]):
raise ValueError('Batch shapes {} and {} are incompatible'.format(
diagonals.shape[:-2], rhs.shape[:diags_rank - 2]))
def check_num_lhs_matches_num_rhs():
if diagonals.shape[-1] != rhs.shape[-2]:
raise ValueError('Expected number of left-hand sided and right-hand '
'sides to be equal, got {} and {}'.format(
diagonals.shape[-1], rhs.shape[-2]))
if rhs_rank == diags_rank - 1:
# Rhs provided as a vector, ignoring transpose_rhs
if conjugate_rhs:
rhs = math_ops.conj(rhs)
rhs = array_ops.expand_dims(rhs, -1)
check_num_lhs_matches_num_rhs()
return array_ops.squeeze(
linalg_ops.tridiagonal_solve(diagonals, rhs, name), -1)
if transpose_rhs:
rhs = array_ops.matrix_transpose(rhs, conjugate=conjugate_rhs)
elif conjugate_rhs:
rhs = math_ops.conj(rhs)
check_num_lhs_matches_num_rhs()
result = linalg_ops.tridiagonal_solve(diagonals, rhs, name)
return array_ops.matrix_transpose(result) if transpose_rhs else result
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:43,代码来源:linalg_impl.py
示例16: testBatchMatrix
def testBatchMatrix(self):
matrix_0 = [[1, 2, 3], [4, 5, 6]]
matrix_0_t = [[1, 4], [2, 5], [3, 6]]
matrix_1 = [[11, 22, 33], [44, 55, 66]]
matrix_1_t = [[11, 44], [22, 55], [33, 66]]
batch_matrix = [matrix_0, matrix_1] # Shape (2, 2, 3)
expected_transposed = [matrix_0_t, matrix_1_t] # Shape (2, 3, 2)
with self.test_session():
transposed = array_ops.matrix_transpose(batch_matrix)
self.assertEqual((2, 3, 2), transposed.get_shape())
self.assertAllEqual(expected_transposed, transposed.eval())
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:11,代码来源:array_ops_test.py
示例17: testBatchMatrixDynamicallyDefined
def testBatchMatrixDynamicallyDefined(self):
matrix_0 = [[1, 2, 3], [4, 5, 6]]
matrix_0_t = [[1, 4], [2, 5], [3, 6]]
matrix_1 = [[11, 22, 33], [44, 55, 66]]
matrix_1_t = [[11, 44], [22, 55], [33, 66]]
batch_matrix = [matrix_0, matrix_1] # Shape (2, 2, 3)
expected_transposed = [matrix_0_t, matrix_1_t] # Shape (2, 3, 2)
with self.test_session():
batch_matrix_ph = array_ops.placeholder(dtypes.int32)
transposed = array_ops.matrix_transpose(batch_matrix_ph)
self.assertAllEqual(
expected_transposed,
transposed.eval(feed_dict={batch_matrix_ph: batch_matrix}))
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:13,代码来源:array_ops_test.py
示例18: _stddev
def _stddev(self):
if (isinstance(self.scale, linalg.LinearOperatorIdentity) or
isinstance(self.scale, linalg.LinearOperatorScaledIdentity) or
isinstance(self.scale, linalg.LinearOperatorDiag)):
return math_ops.abs(self.scale.diag_part())
elif (isinstance(self.scale, linalg.LinearOperatorUDVHUpdate)
and self.scale.is_self_adjoint):
return math_ops.sqrt(array_ops.matrix_diag_part(
self.scale.apply(self.scale.to_dense())))
else:
# TODO(b/35040238): Remove transpose once LinOp supports `transpose`.
return math_ops.sqrt(array_ops.matrix_diag_part(
self.scale.apply(array_ops.matrix_transpose(self.scale.to_dense()))))
开发者ID:jzuern,项目名称:tensorflow,代码行数:13,代码来源:mvn_linear_operator.py
示例19: test_real_hermitian_spectrum_gives_real_symmetric_operator
def test_real_hermitian_spectrum_gives_real_symmetric_operator(self):
with self.cached_session() as sess:
# This is a real and hermitian spectrum.
spectrum = [[1., 2., 2.], [3., 4., 4.], [3., 4., 4.]]
operator = linalg.LinearOperatorCirculant(spectrum)
matrix_tensor = operator.to_dense()
self.assertEqual(matrix_tensor.dtype, dtypes.complex64)
matrix_t = array_ops.matrix_transpose(matrix_tensor)
imag_matrix = math_ops.imag(matrix_tensor)
matrix, matrix_transpose, imag_matrix = sess.run(
[matrix_tensor, matrix_t, imag_matrix])
np.testing.assert_allclose(0, imag_matrix, atol=1e-6)
self.assertAllClose(matrix, matrix_transpose, atol=0)
开发者ID:aritratony,项目名称:tensorflow,代码行数:15,代码来源:linear_operator_circulant_test.py
示例20: _updated_mat
def _updated_mat(self, mat, v, diag):
# Get dense matrix defined by its square root, which is an update of `mat`:
# A = (mat + v D v^T) (mat + v D v^T)^T
# D is the diagonal matrix with `diag` on the diagonal.
# If diag is None, then it defaults to the identity matrix, so DV^T = V^T
if diag is None:
diag_vt = array_ops.matrix_transpose(v)
else:
diag_mat = array_ops.matrix_diag(diag)
diag_vt = math_ops.matmul(diag_mat, v, adjoint_b=True)
v_diag_vt = math_ops.matmul(v, diag_vt)
sqrt = mat + v_diag_vt
a = math_ops.matmul(sqrt, sqrt, adjoint_b=True)
return a.eval()
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:16,代码来源:operator_pd_vdvt_update_test.py
注:本文中的tensorflow.python.ops.array_ops.matrix_transpose函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论