本文整理汇总了Python中tensorflow.matrix_diag_part函数的典型用法代码示例。如果您正苦于以下问题:Python matrix_diag_part函数的具体用法?Python matrix_diag_part怎么用?Python matrix_diag_part使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了matrix_diag_part函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: testSampleWithSameSeed
def testSampleWithSameSeed(self):
if tf.executing_eagerly():
return
scale = make_pd(1., 2)
df = 4
chol_w = tfd.Wishart(
df, scale_tril=chol(scale), input_output_cholesky=False)
x = self.evaluate(chol_w.sample(1, seed=42))
chol_x = [chol(x[0])]
full_w = tfd.Wishart(df, scale, input_output_cholesky=False)
self.assertAllClose(x, self.evaluate(full_w.sample(1, seed=42)))
chol_w_chol = tfd.Wishart(
df, scale_tril=chol(scale), input_output_cholesky=True)
self.assertAllClose(chol_x, self.evaluate(chol_w_chol.sample(1, seed=42)))
eigen_values = tf.matrix_diag_part(chol_w_chol.sample(1000, seed=42))
np.testing.assert_array_less(0., self.evaluate(eigen_values))
full_w_chol = tfd.Wishart(df, scale=scale, input_output_cholesky=True)
self.assertAllClose(chol_x, self.evaluate(full_w_chol.sample(1, seed=42)))
eigen_values = tf.matrix_diag_part(full_w_chol.sample(1000, seed=42))
np.testing.assert_array_less(0., self.evaluate(eigen_values))
开发者ID:asudomoeva,项目名称:probability,代码行数:25,代码来源:wishart_test.py
示例2: testInvalidShapeAtEval
def testInvalidShapeAtEval(self):
with self.test_session(use_gpu=self._use_gpu):
v = tf.placeholder(dtype=tf.float32)
with self.assertRaisesOpError("input must be at least 2-dim"):
tf.matrix_diag_part(v).eval(feed_dict={v: 0.0})
with self.assertRaisesOpError("last two dimensions must be equal"):
tf.matrix_diag_part(v).eval(feed_dict={v: [[0, 1], [1, 0], [0, 0]]})
开发者ID:Nishant23,项目名称:tensorflow,代码行数:7,代码来源:diag_op_test.py
示例3: testRectangular
def testRectangular(self):
with self.test_session(use_gpu=self._use_gpu):
mat = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
mat_diag = tf.matrix_diag_part(mat)
self.assertAllEqual(mat_diag.eval(), np.array([1.0, 5.0]))
mat = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
mat_diag = tf.matrix_diag_part(mat)
self.assertAllEqual(mat_diag.eval(), np.array([1.0, 4.0]))
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:8,代码来源:diag_op_test.py
示例4: _variance
def _variance(self):
if distribution_util.is_diagonal_scale(self.scale):
return 2. * tf.square(self.scale.diag_part())
elif (isinstance(self.scale, tf.linalg.LinearOperatorLowRankUpdate) and
self.scale.is_self_adjoint):
return tf.matrix_diag_part(2. * self.scale.matmul(self.scale.to_dense()))
else:
return 2. * tf.matrix_diag_part(
self.scale.matmul(self.scale.to_dense(), adjoint_arg=True))
开发者ID:lewisKit,项目名称:probability,代码行数:9,代码来源:vector_laplace_linear_operator.py
示例5: _maybe_attach_assertion
def _maybe_attach_assertion(x):
if not validate_args:
return x
if assert_positive:
return control_flow_ops.with_dependencies([
tf.assert_positive(
tf.matrix_diag_part(x), message="diagonal part must be positive"),
], x)
return control_flow_ops.with_dependencies([
tf.assert_none_equal(
tf.matrix_diag_part(x),
tf.zeros([], x.dtype),
message="diagonal part must be non-zero"),
], x)
开发者ID:lewisKit,项目名称:probability,代码行数:14,代码来源:distribution_util.py
示例6: testSample
def testSample(self):
with self.test_session():
scale = make_pd(1., 2)
df = 4
chol_w = distributions.WishartCholesky(
df, chol(scale), cholesky_input_output_matrices=False)
x = chol_w.sample_n(1, seed=42).eval()
chol_x = [chol(x[0])]
full_w = distributions.WishartFull(
df, scale, cholesky_input_output_matrices=False)
self.assertAllClose(x, full_w.sample_n(1, seed=42).eval())
chol_w_chol = distributions.WishartCholesky(
df, chol(scale), cholesky_input_output_matrices=True)
self.assertAllClose(chol_x, chol_w_chol.sample_n(1, seed=42).eval())
eigen_values = tf.matrix_diag_part(chol_w_chol.sample_n(1000, seed=42))
np.testing.assert_array_less(0., eigen_values.eval())
full_w_chol = distributions.WishartFull(
df, scale, cholesky_input_output_matrices=True)
self.assertAllClose(chol_x, full_w_chol.sample_n(1, seed=42).eval())
eigen_values = tf.matrix_diag_part(full_w_chol.sample_n(1000, seed=42))
np.testing.assert_array_less(0., eigen_values.eval())
# Check first and second moments.
df = 4.
chol_w = distributions.WishartCholesky(
df=df,
scale=chol(make_pd(1., 3)),
cholesky_input_output_matrices=False)
x = chol_w.sample_n(10000, seed=42)
self.assertAllEqual((10000, 3, 3), x.get_shape())
moment1_estimate = tf.reduce_mean(x, reduction_indices=[0]).eval()
self.assertAllClose(chol_w.mean().eval(),
moment1_estimate,
rtol=0.05)
# The Variance estimate uses the squares rather than outer-products
# because Wishart.Variance is the diagonal of the Wishart covariance
# matrix.
variance_estimate = (
tf.reduce_mean(tf.square(x), reduction_indices=[0]) -
tf.square(moment1_estimate)).eval()
self.assertAllClose(chol_w.variance().eval(),
variance_estimate,
rtol=0.05)
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:50,代码来源:wishart_test.py
示例7: _expectation
def _expectation(p, mean, none, kern, feat, nghp=None):
"""
Compute the expectation:
expectation[n] = <x_n K_{x_n, Z}>_p(x_n)
- K_{.,.} :: RBF kernel
:return: NxDxM
"""
Xmu, Xcov = p.mu, p.cov
with tf.control_dependencies([tf.assert_equal(
tf.shape(Xmu)[1], tf.constant(kern.input_dim, settings.tf_int),
message="Currently cannot handle slicing in exKxz.")]):
Xmu = tf.identity(Xmu)
with params_as_tensors_for(kern), params_as_tensors_for(feat):
D = tf.shape(Xmu)[1]
lengthscales = kern.lengthscales if kern.ARD \
else tf.zeros((D,), dtype=settings.float_type) + kern.lengthscales
chol_L_plus_Xcov = tf.cholesky(tf.matrix_diag(lengthscales ** 2) + Xcov) # NxDxD
all_diffs = tf.transpose(feat.Z) - tf.expand_dims(Xmu, 2) # NxDxM
sqrt_det_L = tf.reduce_prod(lengthscales)
sqrt_det_L_plus_Xcov = tf.exp(tf.reduce_sum(tf.log(tf.matrix_diag_part(chol_L_plus_Xcov)), axis=1))
determinants = sqrt_det_L / sqrt_det_L_plus_Xcov # N
exponent_mahalanobis = tf.cholesky_solve(chol_L_plus_Xcov, all_diffs) # NxDxM
non_exponent_term = tf.matmul(Xcov, exponent_mahalanobis, transpose_a=True)
non_exponent_term = tf.expand_dims(Xmu, 2) + non_exponent_term # NxDxM
exponent_mahalanobis = tf.reduce_sum(all_diffs * exponent_mahalanobis, 1) # NxM
exponent_mahalanobis = tf.exp(-0.5 * exponent_mahalanobis) # NxM
return kern.variance * (determinants[:, None] * exponent_mahalanobis)[:, None, :] * non_exponent_term
开发者ID:vincentadam87,项目名称:GPflow,代码行数:35,代码来源:expectations.py
示例8: _build_likelihood
def _build_likelihood(self):
"""
q_alpha, q_lambda are variational parameters, size N x R
This method computes the variational lower bound on the likelihood,
which is:
E_{q(F)} [ \log p(Y|F) ] - KL[ q(F) || p(F)]
with
q(f) = N(f | K alpha + mean, [K^-1 + diag(square(lambda))]^-1) .
"""
K = self.kern.K(self.X)
K_alpha = tf.matmul(K, self.q_alpha)
f_mean = K_alpha + self.mean_function(self.X)
# compute the variance for each of the outputs
I = tf.tile(tf.expand_dims(tf.eye(self.num_data, dtype=settings.float_type), 0),
[self.num_latent, 1, 1])
A = I + tf.expand_dims(tf.transpose(self.q_lambda), 1) * \
tf.expand_dims(tf.transpose(self.q_lambda), 2) * K
L = tf.cholesky(A)
Li = tf.matrix_triangular_solve(L, I)
tmp = Li / tf.expand_dims(tf.transpose(self.q_lambda), 1)
f_var = 1. / tf.square(self.q_lambda) - tf.transpose(tf.reduce_sum(tf.square(tmp), 1))
# some statistics about A are used in the KL
A_logdet = 2.0 * tf.reduce_sum(tf.log(tf.matrix_diag_part(L)))
trAi = tf.reduce_sum(tf.square(Li))
KL = 0.5 * (A_logdet + trAi - self.num_data * self.num_latent +
tf.reduce_sum(K_alpha * self.q_alpha))
v_exp = self.likelihood.variational_expectations(f_mean, f_var, self.Y)
return tf.reduce_sum(v_exp) - KL
开发者ID:sanket-kamthe,项目名称:GPflow,代码行数:32,代码来源:vgp.py
示例9: multivariate_normal
def multivariate_normal(x, mu, L):
"""
Computes the log-density of a multivariate normal.
:param x : Dx1 or DxN sample(s) for which we want the density
:param mu : Dx1 or DxN mean(s) of the normal distribution
:param L : DxD Cholesky decomposition of the covariance matrix
:return p : (1,) or (N,) vector of log densities for each of the N x's and/or mu's
x and mu are either vectors or matrices. If both are vectors (N,1):
p[0] = log pdf(x) where x ~ N(mu, LL^T)
If at least one is a matrix, we assume independence over the *columns*:
the number of rows must match the size of L. Broadcasting behaviour:
p[n] = log pdf of:
x[n] ~ N(mu, LL^T) or x ~ N(mu[n], LL^T) or x[n] ~ N(mu[n], LL^T)
"""
if x.shape.ndims is None:
warnings.warn('Shape of x must be 2D at computation.')
elif x.shape.ndims != 2:
raise ValueError('Shape of x must be 2D.')
if mu.shape.ndims is None:
warnings.warn('Shape of mu may be unknown or not 2D.')
elif mu.shape.ndims != 2:
raise ValueError('Shape of mu must be 2D.')
d = x - mu
alpha = tf.matrix_triangular_solve(L, d, lower=True)
num_dims = tf.cast(tf.shape(d)[0], L.dtype)
p = - 0.5 * tf.reduce_sum(tf.square(alpha), 0)
p -= 0.5 * num_dims * np.log(2 * np.pi)
p -= tf.reduce_sum(tf.log(tf.matrix_diag_part(L)))
return p
开发者ID:vincentadam87,项目名称:GPflow,代码行数:31,代码来源:logdensities.py
示例10: _forward_log_det_jacobian
def _forward_log_det_jacobian(self, x):
# We formulate the Jacobian with respect to the flattened matrices
# `vec(x)` and `vec(y)`. Suppose for notational convenience that
# the first `n` entries of `vec(x)` are the diagonal of `x`, and
# the remaining `n**2-n` entries are the off-diagonals in
# arbitrary order. Then the Jacobian is a block-diagonal matrix,
# with the Jacobian of the diagonal bijector in the first block,
# and the identity Jacobian for the remaining entries (since this
# bijector acts as the identity on non-diagonal entries):
#
# J_vec(x) (vec(y)) =
# -------------------------------
# | J_diag(x) (diag(y)) 0 | n entries
# | |
# | 0 I | n**2-n entries
# -------------------------------
# n n**2-n
#
# Since the log-det of the second (identity) block is zero, the
# overall log-det-jacobian is just the log-det of first block,
# from the diagonal bijector.
#
# Note that for elementwise operations (exp, softplus, etc) the
# first block of the Jacobian will itself be a diagonal matrix,
# but our implementation does not require this to be true.
return self._diag_bijector.forward_log_det_jacobian(
tf.matrix_diag_part(x), event_ndims=1)
开发者ID:asudomoeva,项目名称:probability,代码行数:27,代码来源:transform_diagonal.py
示例11: testMatrix
def testMatrix(self):
with self.test_session(use_gpu=self._use_gpu):
v = np.array([1.0, 2.0, 3.0])
mat = np.diag(v)
mat_diag = tf.matrix_diag_part(mat)
self.assertEqual((3,), mat_diag.get_shape())
self.assertAllEqual(mat_diag.eval(), v)
开发者ID:Nishant23,项目名称:tensorflow,代码行数:7,代码来源:diag_op_test.py
示例12: gauss_kl
def gauss_kl(q_mu, q_sqrt, K):
"""
Compute the KL divergence from
q(x) = N(q_mu, q_sqrt^2)
to
p(x) = N(0, K)
We assume multiple independent distributions, given by the columns of
q_mu and the last dimension of q_sqrt.
q_mu is a matrix, each column contains a mean.
q_sqrt is a 3D tensor, each matrix within is a lower triangular square-root
matrix of the covariance of q.
K is a positive definite matrix: the covariance of p.
"""
L = tf.cholesky(K)
alpha = tf.matrix_triangular_solve(L, q_mu, lower=True)
KL = 0.5 * tf.reduce_sum(tf.square(alpha)) # Mahalanobis term.
num_latent = tf.cast(tf.shape(q_sqrt)[2], float_type)
KL += num_latent * 0.5 * tf.reduce_sum(tf.log(tf.square(tf.diag_part(L)))) # Prior log-det term.
KL += -0.5 * tf.cast(tf.reduce_prod(tf.shape(q_sqrt)[1:]), float_type) # constant term
Lq = tf.matrix_band_part(tf.transpose(q_sqrt, (2, 0, 1)), -1, 0) # force lower triangle
KL += -0.5*tf.reduce_sum(tf.log(tf.square(tf.matrix_diag_part(Lq)))) # logdet
L_tiled = tf.tile(tf.expand_dims(L, 0), tf.pack([tf.shape(Lq)[0], 1, 1]))
LiLq = tf.matrix_triangular_solve(L_tiled, Lq, lower=True)
KL += 0.5 * tf.reduce_sum(tf.square(LiLq)) # Trace term
return KL
开发者ID:GPflow,项目名称:GPflow,代码行数:30,代码来源:kullback_leiblers.py
示例13: _variance
def _variance(self):
# Because df is a scalar, we need to expand dimensions to match
# scale_operator. We use ellipses notation (...) to select all dimensions
# and add two dimensions to the end.
df = self.df[..., tf.newaxis, tf.newaxis]
x = tf.sqrt(df) * self._square_scale_operator()
d = tf.expand_dims(tf.matrix_diag_part(x), -1)
v = tf.square(x) + tf.matmul(d, d, adjoint_b=True)
return v
开发者ID:asudomoeva,项目名称:probability,代码行数:9,代码来源:wishart.py
示例14: testGrad
def testGrad(self):
shapes = ((3, 3), (5, 3, 3))
with self.test_session(use_gpu=self._use_gpu):
for shape in shapes:
x = tf.constant(np.random.rand(*shape), dtype=np.float32)
y = tf.matrix_diag_part(x)
error = tf.test.compute_gradient_error(x, x.get_shape().as_list(),
y, y.get_shape().as_list())
self.assertLess(error, 1e-4)
开发者ID:Nishant23,项目名称:tensorflow,代码行数:9,代码来源:diag_op_test.py
示例15: testRectangularBatch
def testRectangularBatch(self):
with self.test_session(use_gpu=self._use_gpu):
v_batch = np.array([[1.0, 2.0],
[4.0, 5.0]])
mat_batch = np.array(
[[[1.0, 0.0, 0.0],
[0.0, 2.0, 0.0]],
[[4.0, 0.0, 0.0],
[0.0, 5.0, 0.0]]])
self.assertEqual(mat_batch.shape, (2, 2, 3))
mat_batch_diag = tf.matrix_diag_part(mat_batch)
self.assertEqual((2, 2), mat_batch_diag.get_shape())
self.assertAllEqual(mat_batch_diag.eval(), v_batch)
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:13,代码来源:diag_op_test.py
示例16: _forward_log_det_jacobian
def _forward_log_det_jacobian(self, x):
# CholeskyToInvCholesky.forward(X) is equivalent to
# 1) M = CholeskyOuterProduct.forward(X)
# 2) N = invert(M)
# 3) Y = CholeskyOuterProduct.inverse(N)
#
# For step 1,
# |Jac(outerprod(X))| = 2^p prod_{j=0}^{p-1} X[j,j]^{p-j}.
# For step 2,
# |Jac(inverse(M))| = |M|^{-(p+1)} (because M is symmetric)
# = |X|^{-2(p+1)} = (prod_{j=0}^{p-1} X[j,j])^{-2(p+1)}
# (see http://web.mit.edu/18.325/www/handouts/handout2.pdf sect 3.0.2)
# For step 3,
# |Jac(Cholesky(N))| = -|Jac(outerprod(Y)|
# = 2^p prod_{j=0}^{p-1} Y[j,j]^{p-j}
n = tf.cast(tf.shape(x)[-1], x.dtype)
y = self._forward(x)
return (
(self._cholesky.forward_log_det_jacobian(x, event_ndims=2) -
(n + 1.) * tf.reduce_sum(tf.log(tf.matrix_diag_part(x)), axis=-1)) -
(self._cholesky.forward_log_det_jacobian(y, event_ndims=2) -
(n + 1.) * tf.reduce_sum(tf.log(tf.matrix_diag_part(y)), axis=-1)))
开发者ID:lewisKit,项目名称:probability,代码行数:22,代码来源:cholesky_to_inv_cholesky.py
示例17: _build_likelihood
def _build_likelihood(self):
"""
Construct a tensorflow function to compute the bound on the marginal
likelihood. For a derivation of the terms in here, see the associated
SGPR notebook.
"""
num_inducing = len(self.feature)
num_data = tf.cast(tf.shape(self.Y)[0], settings.float_type)
output_dim = tf.cast(tf.shape(self.Y)[1], settings.float_type)
err = self.Y - self.mean_function(self.X)
Kdiag = self.kern.Kdiag(self.X)
Kuf = self.feature.Kuf(self.kern, self.X)
Kuu = self.feature.Kuu(self.kern, jitter=settings.numerics.jitter_level)
L = tf.cholesky(Kuu)
sigma = tf.sqrt(self.likelihood.variance)
# Compute intermediate matrices
A = tf.matrix_triangular_solve(L, Kuf, lower=True) / sigma
AAT = tf.matmul(A, A, transpose_b=True)
B = AAT + tf.eye(num_inducing, dtype=settings.float_type)
LB = tf.cholesky(B)
Aerr = tf.matmul(A, err)
c = tf.matrix_triangular_solve(LB, Aerr, lower=True) / sigma
# compute log marginal bound
bound = -0.5 * num_data * output_dim * np.log(2 * np.pi)
bound += tf.negative(output_dim) * tf.reduce_sum(tf.log(tf.matrix_diag_part(LB)))
bound -= 0.5 * num_data * output_dim * tf.log(self.likelihood.variance)
bound += -0.5 * tf.reduce_sum(tf.square(err)) / self.likelihood.variance
bound += 0.5 * tf.reduce_sum(tf.square(c))
bound += -0.5 * output_dim * tf.reduce_sum(Kdiag) / self.likelihood.variance
bound += 0.5 * output_dim * tf.reduce_sum(tf.matrix_diag_part(AAT))
return bound
开发者ID:vincentadam87,项目名称:GPflow,代码行数:36,代码来源:sgpr.py
示例18: fit
def fit(self, x=None, y=None):
# p(coeffs | x, y) = Normal(coeffs |
# mean = (1/noise_variance) (1/noise_variance x^T x + I)^{-1} x^T y,
# covariance = (1/noise_variance x^T x + I)^{-1})
# TODO(trandustin): We newly fit the data at each call. Extend to do
# Bayesian updating.
kernel_matrix = tf.matmul(x, x, transpose_a=True) / self.noise_variance
coeffs_precision = tf.matrix_set_diag(
kernel_matrix, tf.matrix_diag_part(kernel_matrix) + 1.)
coeffs_precision_tril = tf.linalg.cholesky(coeffs_precision)
self.coeffs_precision_tril_op = tf.linalg.LinearOperatorLowerTriangular(
coeffs_precision_tril)
self.coeffs_mean = self.coeffs_precision_tril_op.solvevec(
self.coeffs_precision_tril_op.solvevec(tf.einsum('nm,n->m', x, y)),
adjoint=True) / self.noise_variance
# TODO(trandustin): To be fully Keras-compatible, return History object.
return
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:17,代码来源:bayes.py
示例19: zero_mean_covariance
def zero_mean_covariance(covariance, stability=0.0):
'''Output covariance of ReLU for zero-mean Gaussian input.
f(x) = max(x, 0).
Args:
covariance: Input covariance matrix (Size, Size).
stability: For accurate results this should be zero
if used in training, use a value like 1e-4 for stability.
Returns:
Output covariance of ReLU for zero-mean Gaussian input (Size, Size).
'''
S = outer(tf.sqrt(tf.matrix_diag_part(covariance)))
V = tf.clip_by_value(covariance / S, stability - 1.0, 1.0 - stability)
Q = tf.acos(-V) * V + tf.sqrt(1.0 - (V**2.0)) - 1.0
return S * Q * (1.0 / (2.0 * math.pi))
开发者ID:ModarTensai,项目名称:network_moments,代码行数:18,代码来源:relu.py
示例20: _assertions
def _assertions(self, x):
if not self.validate_args:
return []
x_shape = tf.shape(x)
is_matrix = tf.assert_rank_at_least(
x, 2,
message="Input must have rank at least 2.")
is_square = tf.assert_equal(
x_shape[-2], x_shape[-1],
message="Input must be a square matrix.")
diag_part_x = tf.matrix_diag_part(x)
is_lower_triangular = tf.assert_equal(
tf.matrix_band_part(x, 0, -1), # Preserves triu, zeros rest.
tf.matrix_diag(diag_part_x),
message="Input must be lower triangular.")
is_positive_diag = tf.assert_positive(
diag_part_x,
message="Input must have all positive diagonal entries.")
return [is_matrix, is_square, is_lower_triangular, is_positive_diag]
开发者ID:lewisKit,项目名称:probability,代码行数:19,代码来源:cholesky_to_inv_cholesky.py
注:本文中的tensorflow.matrix_diag_part函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论