本文整理汇总了Python中tensorflow.python.ops.linalg.linalg_impl.adjoint函数的典型用法代码示例。如果您正苦于以下问题:Python adjoint函数的具体用法?Python adjoint怎么用?Python adjoint使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了adjoint函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _QrGrad
def _QrGrad(op, dq, dr):
"""Gradient for Qr."""
q, r = op.outputs
if q.dtype.is_complex:
raise NotImplementedError("QrGrad not implemented for dtype: %s" % q.dtype)
if (r.shape.ndims is None or r.shape.as_list()[-2] is None or
r.shape.as_list()[-1] is None):
raise NotImplementedError("QrGrad not implemented with dynamic shapes.")
if r.shape.dims[-2].value != r.shape.dims[-1].value:
raise NotImplementedError("QrGrad not implemented when ncols > nrows "
"or full_matrices is true and ncols != nrows.")
qdq = math_ops.matmul(q, dq, adjoint_a=True)
qdq_ = qdq - _linalg.adjoint(qdq)
rdr = math_ops.matmul(r, dr, adjoint_b=True)
rdr_ = rdr - _linalg.adjoint(rdr)
tril = array_ops.matrix_band_part(qdq_ + rdr_, -1, 0)
def _TriangularSolve(x, r):
"""Equiv to matmul(x, adjoint(matrix_inverse(r))) if r is upper-tri."""
return _linalg.adjoint(
linalg_ops.matrix_triangular_solve(
r, _linalg.adjoint(x), lower=False, adjoint=False))
grad_a = math_ops.matmul(q, dr + _TriangularSolve(tril, r))
grad_b = _TriangularSolve(dq - math_ops.matmul(q, qdq), r)
return grad_a + grad_b
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:27,代码来源:linalg_grad.py
示例2: _test_matmul
def _test_matmul(self, with_batch):
for use_placeholder in self._use_placeholder_options:
for build_info in self._operator_build_infos:
# If batch dimensions are omitted, but there are
# no batch dimensions for the linear operator, then
# skip the test case. This is already checked with
# with_batch=True.
if not with_batch and len(build_info.shape) <= 2:
continue
for dtype in self._dtypes_to_test:
for adjoint in self._adjoint_options:
for adjoint_arg in self._adjoint_arg_options:
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self._operator_and_matrix(
build_info, dtype, use_placeholder=use_placeholder)
x = self._make_x(
operator, adjoint=adjoint, with_batch=with_batch)
# If adjoint_arg, compute A X^H^H = A X.
if adjoint_arg:
op_matmul = operator.matmul(
linalg.adjoint(x),
adjoint=adjoint,
adjoint_arg=adjoint_arg)
else:
op_matmul = operator.matmul(x, adjoint=adjoint)
mat_matmul = linear_operator_util.matmul_with_broadcast(
mat, x, adjoint_a=adjoint)
if not use_placeholder:
self.assertAllEqual(op_matmul.get_shape(),
mat_matmul.get_shape())
op_matmul_v, mat_matmul_v = sess.run(
[op_matmul, mat_matmul])
self.assertAC(op_matmul_v, mat_matmul_v)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:34,代码来源:linear_operator_test_util.py
示例3: _test_solve
def _test_solve(self, with_batch):
for use_placeholder in self._use_placeholder_options:
for build_info in self._operator_build_infos:
# If batch dimensions are omitted, but there are
# no batch dimensions for the linear operator, then
# skip the test case. This is already checked with
# with_batch=True.
if not with_batch and len(build_info.shape) <= 2:
continue
for dtype in self._dtypes_to_test:
for adjoint in self._adjoint_options:
for adjoint_arg in self._adjoint_arg_options:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
build_info, dtype, use_placeholder=use_placeholder)
rhs = self._make_rhs(
operator, adjoint=adjoint, with_batch=with_batch)
# If adjoint_arg, solve A X = (rhs^H)^H = rhs.
if adjoint_arg:
op_solve = operator.solve(
linalg.adjoint(rhs),
adjoint=adjoint,
adjoint_arg=adjoint_arg)
else:
op_solve = operator.solve(
rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
mat_solve = linear_operator_util.matrix_solve_with_broadcast(
mat, rhs, adjoint=adjoint)
if not use_placeholder:
self.assertAllEqual(op_solve.get_shape(),
mat_solve.get_shape())
op_solve_v, mat_solve_v = sess.run(
[op_solve, mat_solve], feed_dict=feed_dict)
self.assertAC(op_solve_v, mat_solve_v)
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:35,代码来源:linear_operator_test_util.py
示例4: _test_matmul
def _test_matmul(self, with_batch):
for use_placeholder in self._use_placeholder_options:
for build_info in self._operator_build_infos:
for dtype in self._dtypes_to_test:
for adjoint in self._adjoint_options:
for adjoint_arg in self._adjoint_arg_options:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
build_info, dtype, use_placeholder=use_placeholder)
x = self._make_x(
operator, adjoint=adjoint, with_batch=with_batch)
# If adjoint_arg, compute A X^H^H = A X.
if adjoint_arg:
op_matmul = operator.matmul(
linalg.adjoint(x),
adjoint=adjoint,
adjoint_arg=adjoint_arg)
else:
op_matmul = operator.matmul(x, adjoint=adjoint)
mat_matmul = linear_operator_util.matmul_with_broadcast(
mat, x, adjoint_a=adjoint)
if not use_placeholder:
self.assertAllEqual(op_matmul.get_shape(),
mat_matmul.get_shape())
op_matmul_v, mat_matmul_v = sess.run(
[op_matmul, mat_matmul], feed_dict=feed_dict)
self.assertAC(op_matmul_v, mat_matmul_v)
开发者ID:moses-sun,项目名称:tensorflow,代码行数:28,代码来源:linear_operator_test_util.py
示例5: _matmul
def _matmul(self, x, adjoint=False, adjoint_arg=False):
if self._assert_proper_shapes:
x = linalg.adjoint(x) if adjoint_arg else x
aps = linear_operator_util.assert_compatible_matrix_dimensions(self, x)
x = control_flow_ops.with_dependencies([aps], x)
if self.is_square:
# Note that adjoint has no effect since this matrix is self-adjoint.
if adjoint_arg:
output_shape = array_ops.concat([
array_ops.shape(x)[:-2],
[array_ops.shape(x)[-1], array_ops.shape(x)[-2]]], axis=0)
else:
output_shape = array_ops.shape(x)
return self._possibly_broadcast_batch_shape(
array_ops.zeros(shape=output_shape, dtype=x.dtype))
x_shape = array_ops.shape(x)
n = self._num_columns if adjoint else self._num_rows
m = x_shape[-2] if adjoint_arg else x_shape[-1]
output_shape = array_ops.concat([x_shape[:-2], [n, m]], axis=0)
zeros = array_ops.zeros(shape=output_shape, dtype=x.dtype)
return self._possibly_broadcast_batch_shape(zeros)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:25,代码来源:linear_operator_zeros.py
示例6: test_solve
def test_solve(self):
self._skip_if_tests_to_skip_contains("solve")
for use_placeholder in self._use_placeholder_options:
for shape in self._shapes_to_test:
for dtype in self._dtypes_to_test:
for adjoint in self._adjoint_options:
for adjoint_arg in self._adjoint_arg_options:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
shape, dtype, use_placeholder=use_placeholder)
rhs = self._make_rhs(operator, adjoint=adjoint)
# If adjoint_arg, solve A X = (rhs^H)^H = rhs.
if adjoint_arg:
op_solve = operator.solve(
linalg.adjoint(rhs),
adjoint=adjoint,
adjoint_arg=adjoint_arg)
else:
op_solve = operator.solve(
rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
mat_solve = linalg_ops.matrix_solve(mat, rhs, adjoint=adjoint)
if not use_placeholder:
self.assertAllEqual(op_solve.get_shape(),
mat_solve.get_shape())
op_solve_v, mat_solve_v = sess.run(
[op_solve, mat_solve], feed_dict=feed_dict)
self.assertAC(op_solve_v, mat_solve_v)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:28,代码来源:linear_operator_test_util.py
示例7: test_matmul
def test_matmul(self):
self._skip_if_tests_to_skip_contains("matmul")
for use_placeholder in self._use_placeholder_options:
for shape in self._shapes_to_test:
for dtype in self._dtypes_to_test:
for adjoint in self._adjoint_options:
for adjoint_arg in self._adjoint_arg_options:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
shape, dtype, use_placeholder=use_placeholder)
x = self._make_x(operator, adjoint=adjoint)
# If adjoint_arg, compute A X^H^H = A X.
if adjoint_arg:
op_matmul = operator.matmul(
linalg.adjoint(x),
adjoint=adjoint,
adjoint_arg=adjoint_arg)
else:
op_matmul = operator.matmul(x, adjoint=adjoint)
mat_matmul = math_ops.matmul(mat, x, adjoint_a=adjoint)
if not use_placeholder:
self.assertAllEqual(op_matmul.get_shape(),
mat_matmul.get_shape())
op_matmul_v, mat_matmul_v = sess.run(
[op_matmul, mat_matmul], feed_dict=feed_dict)
self.assertAC(op_matmul_v, mat_matmul_v)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:27,代码来源:linear_operator_test_util.py
示例8: _test_solve
def _test_solve(self, with_batch):
for use_placeholder in self._use_placeholder_options:
for build_info in self._operator_build_infos:
for dtype in self._dtypes_to_test:
for adjoint in self._adjoint_options:
for adjoint_arg in self._adjoint_arg_options:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
build_info, dtype, use_placeholder=use_placeholder)
rhs = self._make_rhs(
operator, adjoint=adjoint, with_batch=with_batch)
# If adjoint_arg, solve A X = (rhs^H)^H = rhs.
if adjoint_arg:
op_solve = operator.solve(
linalg.adjoint(rhs),
adjoint=adjoint,
adjoint_arg=adjoint_arg)
else:
op_solve = operator.solve(
rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
mat_solve = linear_operator_util.matrix_solve_with_broadcast(
mat, rhs, adjoint=adjoint)
if not use_placeholder:
self.assertAllEqual(op_solve.get_shape(),
mat_solve.get_shape())
op_solve_v, mat_solve_v = sess.run(
[op_solve, mat_solve], feed_dict=feed_dict)
self.assertAC(op_solve_v, mat_solve_v)
开发者ID:moses-sun,项目名称:tensorflow,代码行数:29,代码来源:linear_operator_test_util.py
示例9: _matmul
def _matmul(self, x, adjoint=False, adjoint_arg=False):
# Note that adjoint has no effect since this matrix is self-adjoint.
x = linalg.adjoint(x) if adjoint_arg else x
if self._assert_proper_shapes:
aps = linear_operator_util.assert_compatible_matrix_dimensions(self, x)
x = control_flow_ops.with_dependencies([aps], x)
return self._possibly_broadcast_batch_shape(x)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:7,代码来源:linear_operator_identity.py
示例10: _test_matmul_base
def _test_matmul_base(
self,
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
with_batch):
# If batch dimensions are omitted, but there are
# no batch dimensions for the linear operator, then
# skip the test case. This is already checked with
# with_batch=True.
if not with_batch and len(shapes_info.shape) <= 2:
return
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
x = self.make_x(
operator, adjoint=adjoint, with_batch=with_batch)
# If adjoint_arg, compute A X^H^H = A X.
if adjoint_arg:
op_matmul = operator.matmul(
linalg.adjoint(x),
adjoint=adjoint,
adjoint_arg=adjoint_arg)
else:
op_matmul = operator.matmul(x, adjoint=adjoint)
mat_matmul = math_ops.matmul(mat, x, adjoint_a=adjoint)
if not use_placeholder:
self.assertAllEqual(op_matmul.get_shape(),
mat_matmul.get_shape())
op_matmul_v, mat_matmul_v = sess.run(
[op_matmul, mat_matmul])
self.assertAC(op_matmul_v, mat_matmul_v)
开发者ID:aritratony,项目名称:tensorflow,代码行数:35,代码来源:linear_operator_test_util.py
示例11: _assert_self_adjoint
def _assert_self_adjoint(self):
dense = self._get_cached_dense_matrix()
logging.warn(
"Using (possibly slow) default implementation of assert_self_adjoint."
" Requires conversion to a dense matrix.")
return check_ops.assert_equal(
dense,
linalg.adjoint(dense),
message="Matrix was not equal to its adjoint.")
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:9,代码来源:linear_operator.py
示例12: _solve
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
if adjoint:
matrix = self._multiplier_matrix_conj
else:
matrix = self._multiplier_matrix
if self._assert_proper_shapes:
aps = linear_operator_util.assert_compatible_matrix_dimensions(self, rhs)
rhs = control_flow_ops.with_dependencies([aps], rhs)
return rhs / matrix
开发者ID:AnishShah,项目名称:tensorflow,代码行数:10,代码来源:linear_operator_identity.py
示例13: _solve
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
spectrum = self._conj_spectrum if adjoint else self._spectrum_complex
rhs, spectrum = self._broadcast_batch_dims(rhs, spectrum)
rhs_vb = self._vectorize_then_blockify(rhs)
fft_rhs_vb = self._fft(rhs_vb)
solution_vb = self._ifft(fft_rhs_vb / spectrum)
x = self._unblockify_then_matricize(solution_vb)
return math_ops.cast(x, self.dtype)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:11,代码来源:linear_operator_circulant.py
示例14: test_adjoint
def test_adjoint(self):
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_adjoint = operator.adjoint().to_dense()
op_adjoint_h = operator.H.to_dense()
mat_adjoint = linalg.adjoint(mat)
op_adjoint_v, op_adjoint_h_v, mat_adjoint_v = sess.run(
[op_adjoint, op_adjoint_h, mat_adjoint])
self.assertAC(mat_adjoint_v, op_adjoint_v)
self.assertAC(mat_adjoint_v, op_adjoint_h_v)
开发者ID:aritratony,项目名称:tensorflow,代码行数:12,代码来源:linear_operator_test_util.py
示例15: _solve
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
"""Default implementation of _solve."""
if self.is_square is False:
raise NotImplementedError(
"Solve is not yet implemented for non-square operators.")
logging.warn(
"Using (possibly slow) default implementation of solve."
" Requires conversion to a dense matrix and O(N^3) operations.")
rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
if self._can_use_cholesky():
return linalg_ops.cholesky_solve(self._get_cached_chol(), rhs)
return linalg_ops.matrix_solve(
self._get_cached_dense_matrix(), rhs, adjoint=adjoint)
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:13,代码来源:linear_operator.py
示例16: test_adjoint
def test_adjoint(self):
self._skip_if_tests_to_skip_contains("adjoint")
for use_placeholder in self._use_placeholder_options:
for build_info in self._operator_build_infos:
for dtype in self._dtypes_to_test:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self._operator_and_matrix(
build_info, dtype, use_placeholder=use_placeholder)
op_adjoint = operator.adjoint().to_dense()
op_adjoint_h = operator.H.to_dense()
mat_adjoint = linalg.adjoint(mat)
op_adjoint_v, op_adjoint_h_v, mat_adjoint_v = sess.run(
[op_adjoint, op_adjoint_h, mat_adjoint])
self.assertAC(mat_adjoint_v, op_adjoint_v)
self.assertAC(mat_adjoint_v, op_adjoint_h_v)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:16,代码来源:linear_operator_test_util.py
示例17: _matmul
def _matmul(self, x, adjoint=False, adjoint_arg=False):
x = linalg.adjoint(x) if adjoint_arg else x
# With F the matrix of a DFT, and F^{-1}, F^H the inverse and Hermitian
# transpose, one can show that F^{-1} = F^{H} is the IDFT matrix. Therefore
# matmul(x) = F^{-1} diag(spectrum) F x,
# = F^{H} diag(spectrum) F x,
# so that
# matmul(x, adjoint=True) = F^{H} diag(conj(spectrum)) F x.
spectrum = self._conj_spectrum if adjoint else self._spectrum_complex
x, spectrum = self._broadcast_batch_dims(x, spectrum)
x_vb = self._vectorize_then_blockify(x)
fft_x_vb = self._fft(x_vb)
block_vector_result = self._ifft(spectrum * fft_x_vb)
y = self._unblockify_then_matricize(block_vector_result)
return math_ops.cast(y, self.dtype)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:18,代码来源:linear_operator_circulant.py
示例18: _matmul
def _matmul(self, x, adjoint=False, adjoint_arg=False):
# Given a vector `v`, we would like to reflect `x` about the hyperplane
# orthogonal to `v` going through the origin. We first project `x` to `v`
# to get v * dot(v, x) / dot(v, v). After we project, we can reflect the
# projection about the hyperplane by flipping sign to get
# -v * dot(v, x) / dot(v, v). Finally, we can add back the component
# that is orthogonal to v. This is invariant under reflection, since the
# whole hyperplane is invariant. This component is equal to x - v * dot(v,
# x) / dot(v, v), giving the formula x - 2 * v * dot(v, x) / dot(v, v)
# for the reflection.
# Note that because this is a reflection, it lies in O(n) (for real vector
# spaces) or U(n) (for complex vector spaces), and thus is its own adjoint.
x = linalg.adjoint(x) if adjoint_arg else x
normalized_axis = self.reflection_axis / linalg.norm(
self.reflection_axis, axis=-1, keepdims=True)
mat = normalized_axis[..., array_ops.newaxis]
x_dot_normalized_v = math_ops.matmul(mat, x, adjoint_a=True)
return x - 2 * mat * x_dot_normalized_v
开发者ID:aritratony,项目名称:tensorflow,代码行数:20,代码来源:linear_operator_householder.py
示例19: _SelfAdjointEigV2Grad
def _SelfAdjointEigV2Grad(op, grad_e, grad_v):
"""Gradient for SelfAdjointEigV2."""
e = op.outputs[0]
compute_v = op.get_attr("compute_v")
# a = op.inputs[0], which satisfies
# a[...,:,:] * v[...,:,i] = e[...,i] * v[...,i]
with ops.control_dependencies([grad_e, grad_v]):
if compute_v:
v = op.outputs[1]
# Construct the matrix f(i,j) = (i != j ? 1 / (e_i - e_j) : 0).
# Notice that because of the term involving f, the gradient becomes
# infinite (or NaN in practice) when eigenvalues are not unique.
# Mathematically this should not be surprising, since for (k-fold)
# degenerate eigenvalues, the corresponding eigenvectors are only defined
# up to arbitrary rotation in a (k-dimensional) subspace.
f = array_ops.matrix_set_diag(
math_ops.reciprocal(
array_ops.expand_dims(e, -2) - array_ops.expand_dims(e, -1)),
array_ops.zeros_like(e))
grad_a = math_ops.matmul(
v,
math_ops.matmul(
array_ops.matrix_diag(grad_e) +
f * math_ops.matmul(v, grad_v, adjoint_a=True),
v,
adjoint_b=True))
else:
_, v = linalg_ops.self_adjoint_eig(op.inputs[0])
grad_a = math_ops.matmul(v,
math_ops.matmul(
array_ops.matrix_diag(grad_e),
v,
adjoint_b=True))
# The forward op only depends on the lower triangular part of a, so here we
# symmetrize and take the lower triangle
grad_a = array_ops.matrix_band_part(grad_a + _linalg.adjoint(grad_a), -1, 0)
grad_a = array_ops.matrix_set_diag(grad_a,
0.5 * array_ops.matrix_diag_part(grad_a))
return grad_a
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:39,代码来源:linalg_grad.py
示例20: _CholeskyGrad
def _CholeskyGrad(op, grad):
"""Gradient for Cholesky."""
# Gradient is l^{-H} @ ((l^{H} @ grad) * (tril(ones)-1/2*eye)) @ l^{-1}
l = op.outputs[0]
num_rows = array_ops.shape(l)[-1]
batch_shape = array_ops.shape(l)[:-2]
l_inverse = linalg_ops.matrix_triangular_solve(l,
linalg_ops.eye(
num_rows,
batch_shape=batch_shape,
dtype=l.dtype))
middle = math_ops.matmul(l, grad, adjoint_a=True)
middle = array_ops.matrix_set_diag(middle,
0.5 * array_ops.matrix_diag_part(middle))
middle = array_ops.matrix_band_part(middle, -1, 0)
grad_a = math_ops.matmul(
math_ops.matmul(l_inverse, middle, adjoint_a=True), l_inverse)
grad_a += _linalg.adjoint(grad_a)
return grad_a * 0.5
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:23,代码来源:linalg_grad.py
注:本文中的tensorflow.python.ops.linalg.linalg_impl.adjoint函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论