本文整理汇总了Python中tensorflow.cholesky函数的典型用法代码示例。如果您正苦于以下问题:Python cholesky函数的具体用法?Python cholesky怎么用?Python cholesky使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cholesky函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: compute_upper_bound
def compute_upper_bound(self):
num_data = tf.cast(tf.shape(self.Y)[0], settings.float_type)
Kdiag = self.kern.Kdiag(self.X)
Kuu = self.feature.Kuu(self.kern, jitter=settings.numerics.jitter_level)
Kuf = self.feature.Kuf(self.kern, self.X)
L = tf.cholesky(Kuu)
LB = tf.cholesky(Kuu + self.likelihood.variance ** -1.0 * tf.matmul(Kuf, Kuf, transpose_b=True))
LinvKuf = tf.matrix_triangular_solve(L, Kuf, lower=True)
# Using the Trace bound, from Titsias' presentation
c = tf.reduce_sum(Kdiag) - tf.reduce_sum(LinvKuf ** 2.0)
# Kff = self.kern.K(self.X)
# Qff = tf.matmul(Kuf, LinvKuf, transpose_a=True)
# Alternative bound on max eigenval:
# c = tf.reduce_max(tf.reduce_sum(tf.abs(Kff - Qff), 0))
corrected_noise = self.likelihood.variance + c
const = -0.5 * num_data * tf.log(2 * np.pi * self.likelihood.variance)
logdet = tf.reduce_sum(tf.log(tf.diag_part(L))) - tf.reduce_sum(tf.log(tf.diag_part(LB)))
LC = tf.cholesky(Kuu + corrected_noise ** -1.0 * tf.matmul(Kuf, Kuf, transpose_b=True))
v = tf.matrix_triangular_solve(LC, corrected_noise ** -1.0 * tf.matmul(Kuf, self.Y), lower=True)
quad = -0.5 * corrected_noise ** -1.0 * tf.reduce_sum(self.Y ** 2.0) + 0.5 * tf.reduce_sum(v ** 2.0)
return const + logdet + quad
开发者ID:vincentadam87,项目名称:GPflow,代码行数:28,代码来源:sgpr.py
示例2: build_predict
def build_predict(self, Xnew, full_cov=False):
"""
Compute the mean and variance of the latent function at some new points
Xnew. For a derivation of the terms in here, see the associated SGPR
notebook.
"""
num_inducing = tf.shape(self.Z)[0]
err = self.Y - self.mean_function(self.X)
Kuf = self.kern.K(self.Z, self.X)
Kuu = self.kern.K(self.Z) + eye(num_inducing) * 1e-6
Kus = self.kern.K(self.Z, Xnew)
sigma = tf.sqrt(self.likelihood.variance)
L = tf.cholesky(Kuu)
A = tf.matrix_triangular_solve(L, Kuf, lower=True) / sigma
B = tf.matmul(A, tf.transpose(A)) + eye(num_inducing)
LB = tf.cholesky(B)
Aerr = tf.matmul(A, err)
c = tf.matrix_triangular_solve(LB, Aerr, lower=True) / sigma
tmp1 = tf.matrix_triangular_solve(L, Kus, lower=True)
tmp2 = tf.matrix_triangular_solve(LB, tmp1, lower=True)
mean = tf.matmul(tf.transpose(tmp2), c)
if full_cov:
var = self.kern.K(Xnew) + tf.matmul(tf.transpose(tmp2), tmp2)\
- tf.matmul(tf.transpose(tmp1), tmp1)
shape = tf.pack([1, 1, tf.shape(self.Y)[1]])
var = tf.tile(tf.expand_dims(var, 2), shape)
else:
var = self.kern.Kdiag(Xnew) + tf.reduce_sum(tf.square(tmp2), 0)\
- tf.reduce_sum(tf.square(tmp1), 0)
shape = tf.pack([1, tf.shape(self.Y)[1]])
var = tf.tile(tf.expand_dims(var, 1), shape)
return mean + self.mean_function(Xnew), var
开发者ID:gbohner,项目名称:GPflow,代码行数:32,代码来源:sgpr.py
示例3: build_likelihood
def build_likelihood(self):
"""
Constuct a tensorflow function to compute the bound on the marginal
likelihood. For a derivation of the terms in here, see the associated
SGPR notebook.
"""
num_inducing = tf.shape(self.Z)[0]
num_data = tf.shape(self.Y)[0]
output_dim = tf.shape(self.Y)[1]
err = self.Y - self.mean_function(self.X)
Kdiag = self.kern.Kdiag(self.X)
Kuf = self.kern.K(self.Z, self.X)
Kuu = self.kern.K(self.Z) + eye(num_inducing) * 1e-6
L = tf.cholesky(Kuu)
# Compute intermediate matrices
A = tf.matrix_triangular_solve(L, Kuf, lower=True)*tf.sqrt(1./self.likelihood.variance)
AAT = tf.matmul(A, tf.transpose(A))
B = AAT + eye(num_inducing)
LB = tf.cholesky(B)
c = tf.matrix_triangular_solve(LB, tf.matmul(A, err), lower=True) * tf.sqrt(1./self.likelihood.variance)
#compute log marginal bound
bound = -0.5*tf.cast(num_data*output_dim, tf.float64)*np.log(2*np.pi)
bound += -tf.cast(output_dim, tf.float64)*tf.reduce_sum(tf.log(tf.user_ops.get_diag(LB)))
bound += -0.5*tf.cast(num_data*output_dim, tf.float64)*tf.log(self.likelihood.variance)
bound += -0.5*tf.reduce_sum(tf.square(err))/self.likelihood.variance
bound += 0.5*tf.reduce_sum(tf.square(c))
bound += -0.5*(tf.reduce_sum(Kdiag)/self.likelihood.variance - tf.reduce_sum(tf.user_ops.get_diag(AAT)))
return bound
开发者ID:agarbuno,项目名称:GPflow,代码行数:33,代码来源:sgpr.py
示例4: testNonSquareMatrix
def testNonSquareMatrix(self):
with self.assertRaises(ValueError):
tf.cholesky(np.array([[1., 2., 3.], [3., 4., 5.]]))
with self.assertRaises(ValueError):
tf.cholesky(
np.array([[[1., 2., 3.], [3., 4., 5.]], [[1., 2., 3.], [3., 4., 5.]]
]))
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:7,代码来源:cholesky_op_test.py
示例5: build_predict
def build_predict(self, Xnew, full_cov=False):
"""
Compute the mean and variance of the latent function at some new points
Xnew. Note that this is very similar to the SGPR prediction, for whcih
there are notes in the SGPR notebook.
"""
num_inducing = tf.shape(self.Z)[0]
psi0, psi1, psi2 = ke.build_psi_stats(self.Z, self.kern, self.X_mean, self.X_var)
Kuu = self.kern.K(self.Z) + eye(num_inducing) * 1e-6
Kus = self.kern.K(self.Z, Xnew)
sigma2 = self.likelihood.variance
sigma = tf.sqrt(sigma2)
L = tf.cholesky(Kuu)
A = tf.matrix_triangular_solve(L, tf.transpose(psi1), lower=True) / sigma
tmp = tf.matrix_triangular_solve(L, psi2, lower=True)
AAT = tf.matrix_triangular_solve(L, tf.transpose(tmp), lower=True) / sigma2
B = AAT + eye(num_inducing)
LB = tf.cholesky(B)
c = tf.matrix_triangular_solve(LB, tf.matmul(A, self.Y), lower=True) / sigma
tmp1 = tf.matrix_triangular_solve(L, Kus, lower=True)
tmp2 = tf.matrix_triangular_solve(LB, tmp1, lower=True)
mean = tf.matmul(tf.transpose(tmp2), c)
if full_cov:
var = self.kern.K(Xnew) + tf.matmul(tf.transpose(tmp2), tmp2)\
- tf.matmul(tf.transpose(tmp1), tmp1)
shape = tf.pack([1, 1, tf.shape(self.Y)[1]])
var = tf.tile(tf.expand_dims(var, 2), shape)
else:
var = self.kern.Kdiag(Xnew) + tf.reduce_sum(tf.square(tmp2), 0)\
- tf.reduce_sum(tf.square(tmp1), 0)
shape = tf.pack([1, tf.shape(self.Y)[1]])
var = tf.tile(tf.expand_dims(var, 1), shape)
return mean + self.mean_function(Xnew), var
开发者ID:blutooth,项目名称:dgp,代码行数:34,代码来源:gplvm.py
示例6: build_predict
def build_predict(self, Xnew , full_cov=False):
err = self.Y
Kuf = self.RBF(self.Z, self.X)
Kuu = self.RBF(self.Z,self.Z) + eye(self.num_inducing) * 1e-6
Kus = self.RBF(self.Z, Xnew)
sigma = tf.sqrt(self.likelihood_variance)
L = tf.cholesky(Kuu)
A = tf.matrix_triangular_solve(L, Kuf, lower=True) / sigma
B = tf.matmul(A, tf.transpose(A)) + eye(num_inducing)
LB = tf.cholesky(B)
Aerr = tf.matmul(A, err)
c = tf.matrix_triangular_solve(LB, Aerr, lower=True) / sigma
tmp1 = tf.matrix_triangular_solve(L, Kus, lower=True)
tmp2 = tf.matrix_triangular_solve(LB, tmp1, lower=True)
mean = tf.matmul(tf.transpose(tmp2), c)
if full_cov:
var = self.RBF(Xnew, Xnew) + tf.matmul(tf.transpose(tmp2), tmp2)\
- tf.matmul(tf.transpose(tmp1), tmp1)
shape = tf.pack([1, 1, tf.shape(self.Y)[1]])
var = tf.tile(tf.expand_dims(var, 2), shape)
else:
var = self.RBF(Xnew, Xnew) + tf.reduce_sum(tf.square(tmp2), 0)\
- tf.reduce_sum(tf.square(tmp1), 0)
shape = tf.pack([1, tf.shape(self.Y)[1]])
var = tf.tile(tf.expand_dims(var, 1), shape)
return mean , var
开发者ID:SebastianPopescu,项目名称:ConnectionistNetwork,代码行数:34,代码来源:ConnectionistNetwork.py
示例7: _build_predict
def _build_predict(self, Xnew, full_cov=False):
"""
Compute the mean and variance of the latent function at some new points
Xnew. For a derivation of the terms in here, see the associated SGPR
notebook.
"""
num_inducing = len(self.feature)
err = self.Y - self.mean_function(self.X)
Kuf = self.feature.Kuf(self.kern, self.X)
Kuu = self.feature.Kuu(self.kern, jitter=settings.numerics.jitter_level)
Kus = self.feature.Kuf(self.kern, Xnew)
sigma = tf.sqrt(self.likelihood.variance)
L = tf.cholesky(Kuu)
A = tf.matrix_triangular_solve(L, Kuf, lower=True) / sigma
B = tf.matmul(A, A, transpose_b=True) + tf.eye(num_inducing, dtype=settings.float_type)
LB = tf.cholesky(B)
Aerr = tf.matmul(A, err)
c = tf.matrix_triangular_solve(LB, Aerr, lower=True) / sigma
tmp1 = tf.matrix_triangular_solve(L, Kus, lower=True)
tmp2 = tf.matrix_triangular_solve(LB, tmp1, lower=True)
mean = tf.matmul(tmp2, c, transpose_a=True)
if full_cov:
var = self.kern.K(Xnew) + tf.matmul(tmp2, tmp2, transpose_a=True) \
- tf.matmul(tmp1, tmp1, transpose_a=True)
shape = tf.stack([1, 1, tf.shape(self.Y)[1]])
var = tf.tile(tf.expand_dims(var, 2), shape)
else:
var = self.kern.Kdiag(Xnew) + tf.reduce_sum(tf.square(tmp2), 0) \
- tf.reduce_sum(tf.square(tmp1), 0)
shape = tf.stack([1, tf.shape(self.Y)[1]])
var = tf.tile(tf.expand_dims(var, 1), shape)
return mean + self.mean_function(Xnew), var
开发者ID:vincentadam87,项目名称:GPflow,代码行数:32,代码来源:sgpr.py
示例8: gauss_kl
def gauss_kl(min_q_mu, q_sq,K):
q_mu=-1*min_q_mu
#q_sqrt=tf.cholesky(tf.squeeze(q_sqrt))
# K is a variance...we sqrt later
'''
N=1
Q=5
q_mu=tf.random_normal([Q,1],dtype=tf.float64)
q_var=tf.random_normal([Q,Q],dtype=tf.float64)
q_var=q_var+tf.transpose(q_var [1,0])+1e+1*np.eye(Q)
K=q_var
q_sqrt=tf.cholesky(q_var)
q_sqrt=tf.expand_dims(q_sqrt,-1)
num_latent=1
s=tf.Session()
s.run(tf.initialize_all_variables())
'''
"""
Compute the KL divergence from
q(x) = N(q_mu, q_sqrt^2)
to
p(x) = N(0, K)
We assume num_latent independent distributions, given by the columns of
q_mu and the last dimension of q_sqrt.
q_mu is a matrix, each column contains a mean.
q_sqrt is a 3D tensor, each matrix within is a lower triangular square-root
matrix of the covariance of q.
K is a positive definite matrix: the covariance of p.
num_latent is an integer: the number of independent distributions (equal to
the columns of q_mu and the last dim of q_sqrt).
q_sqrt=tf.cholesky(K)
L = tf.cholesky(q_sq)
alpha = tf.matrix_triangular_solve(L, q_mu, lower=True)
KL = 0.5 * tf.reduce_sum(tf.square(alpha)) # Mahalanobis term.
KL += 0.5 * tf.reduce_sum(
tf.log(tf.square(tf.diag_part(L)))) # Prior log-det term.
KL += -0.5 * tf.cast(tf.shape(q_sqrt)[0], tf.float64)
Lq = tf.batch_matrix_band_part(q_sqrt, -1, 0)
# Log determinant of q covariance:
KL += -0.5*tf.reduce_sum(tf.log(tf.square(tf.diag_part(Lq))))
LiLq = tf.matrix_triangular_solve(L, Lq, lower=True)
KL += 0.5 * tf.reduce_sum(tf.square(LiLq)) # Trace term
"""
V2=tf.cholesky(K)
V1=tf.cholesky(q_sq)
KL=h.Mul(tf.transpose(q_mu),tf.cholesky_solve(V2,q_mu))
KL+=tf.trace(tf.cholesky_solve(V2,q_sq))
KL-=h.get_dim(K,0)
KL+=tf.reduce_sum(2*tf.log(tf.diag_part(V2))-2*tf.log(tf.diag_part(V1)))
return KL/2
开发者ID:blutooth,项目名称:dgp,代码行数:59,代码来源:maxKL.py
示例9: log_det
def log_det(Z):
#conditioned=condition(Z)
Z=(Z+tf.transpose(Z))/2
return 2*tf.reduce_sum(tf.log(tf.diag_part(tf.cholesky(Z))))
chol=tf.cholesky(Z)
logdet=2*tf.reduce_sum(tf.log(tf.diag_part(chol)))
return logdet
开发者ID:blutooth,项目名称:gp,代码行数:8,代码来源:helper.py
示例10: F_bound2_v2
def F_bound2_v2(y,S,Kmm,Knm,Kmnnm,Tr_Knn,sigma):
#matrices to be used
N=get_dim(y,0)
Kmm_chol=tf.cholesky(Kmm)
Q_nn=tf.square(sigma)*np.eye(N)+Mul(Knm,tf.cholesky_solve(Kmm_chol,tf.transpose(Knm)))
bound=-0.5*(Tr_Knn-tf.trace(tf.cholesky_solve(Kmm_chol,Kmnnm)))/tf.square(sigma)
bound+=multivariate_normal(y, tf.zeros([N,1],dtype=tf.float32), tf.cholesky(Q_nn))
return bound
开发者ID:blutooth,项目名称:gp,代码行数:8,代码来源:helper.py
示例11: natural_to_meanvarsqrt
def natural_to_meanvarsqrt(nat_1, nat_2):
var_sqrt_inv = tf.cholesky(-2 * nat_2)
var_sqrt = _inverse_lower_triangular(var_sqrt_inv)
S = tf.matmul(var_sqrt, var_sqrt, transpose_a=True)
mu = tf.matmul(S, nat_1)
# We need the decomposition of S as L L^T, not as L^T L,
# hence we need another cholesky.
return mu, tf.cholesky(S)
开发者ID:sanket-kamthe,项目名称:GPflow,代码行数:8,代码来源:natgrad_optimizer.py
示例12: multivariate_gaussian_log_density
def multivariate_gaussian_log_density(x, mu,
Sigma=None, L=None,
prec=None, L_prec=None):
"""
Assume X is a single vector described by a multivariate Gaussian
distribution with x ~ N(mu, Sigma).
We accept parameterization in terms of the covariance matrix or
its cholesky decomposition L (more efficient if available), or the
precision matrix or its cholesky decomposition L_prec.
The latter is useful when representing a Gaussian in its natural
parameterization. Note that we still require the explicit mean mu
(not the natural parameter prec*mu) since I'm too lazy to cover
all the permutations of possible arguments (though this should be
straightforward).
"""
s = extract_shape(x)
try:
n, = s
except:
n, m = s
assert(m==1)
if L is None and Sigma is not None:
L = tf.cholesky(Sigma)
if L_prec is None and prec is not None:
L_prec = tf.cholesky(prec)
if L is not None:
neg_half_logdet = -tf.reduce_sum(tf.log(tf.diag_part(L)))
else:
assert(L_prec is not None)
neg_half_logdet = tf.reduce_sum(tf.log(tf.diag_part(L_prec)))
d = tf.reshape(x - mu, (n,1))
if L is not None:
alpha = tf.matrix_triangular_solve(L, d, lower=True)
exponential_part= tf.reduce_sum(tf.square(alpha))
elif prec is not None:
d = tf.reshape(d, (n, 1))
exponential_part = tf.reduce_sum(d * tf.matmul(prec, d))
else:
assert(L_prec is not None)
d = tf.reshape(d, (1, n))
alpha = tf.matmul(d, L_prec)
exponential_part= tf.reduce_sum(tf.square(alpha))
n_log2pi = n * 1.83787706641
logp = -0.5 * n_log2pi
logp += neg_half_logdet
logp += -0.5 * exponential_part
return logp
开发者ID:BenJamesbabala,项目名称:bayesflow,代码行数:54,代码来源:dists.py
示例13: Bound1
def Bound1(y,S,Kmm,Knm,Tr_Knn,sigma):
#matrices to be used
Kmm_chol=tf.cholesky(Kmm)
sig_2=tf.square(sigma)
N=h.get_dim(y,0)
Q_nn=h.Mul(Knm,tf.cholesky_solve(Kmm_chol,tf.transpose(Knm)))
Q_I_chol=tf.cholesky(sig_2*np.eye(N)+Q_nn)
bound=-0.5*(Tr_Knn-Q_nn)/sig_2
bound+=h.multivariate_normal(y, tf.zeros([N,1],dtype=tf.float32), Q_I_chol)
bound-=0.5*tf.reduce_sum(S)/sig_2+0.1*0.5*tf.reduce_sum(tf.log(S))
return bound
开发者ID:blutooth,项目名称:gp,代码行数:11,代码来源:BayesianGPLVM.py
示例14: _build_likelihood
def _build_likelihood(self):
"""
q_alpha, q_lambda are variational parameters, size N x R
This method computes the variational lower bound on the likelihood,
which is:
E_{q(F)} [ \log p(Y|F) ] - KL[ q(F) || p(F)]
with
q(f) = N(f | K alpha + mean, [K^-1 + diag(square(lambda))]^-1) .
"""
K = self.kern.K(self.X)
K_alpha = tf.matmul(K, self.q_alpha)
f_mean = K_alpha + self.mean_function(self.X)
# compute the variance for each of the outputs
I = tf.tile(tf.expand_dims(tf.eye(self.num_data, dtype=settings.float_type), 0),
[self.num_latent, 1, 1])
A = I + tf.expand_dims(tf.transpose(self.q_lambda), 1) * \
tf.expand_dims(tf.transpose(self.q_lambda), 2) * K
L = tf.cholesky(A)
Li = tf.matrix_triangular_solve(L, I)
tmp = Li / tf.expand_dims(tf.transpose(self.q_lambda), 1)
f_var = 1. / tf.square(self.q_lambda) - tf.transpose(tf.reduce_sum(tf.square(tmp), 1))
# some statistics about A are used in the KL
A_logdet = 2.0 * tf.reduce_sum(tf.log(tf.matrix_diag_part(L)))
trAi = tf.reduce_sum(tf.square(Li))
KL = 0.5 * (A_logdet + trAi - self.num_data * self.num_latent +
tf.reduce_sum(K_alpha * self.q_alpha))
v_exp = self.likelihood.variational_expectations(f_mean, f_var, self.Y)
return tf.reduce_sum(v_exp) - KL
开发者ID:sanket-kamthe,项目名称:GPflow,代码行数:32,代码来源:vgp.py
示例15: initialize
def initialize(self, *args, **kwargs):
# Store latent variables in a temporary attribute; MAP will
# optimize `PointMass` random variables, which subsequently
# optimizes mean parameters of the normal approximations.
latent_vars_normal = self.latent_vars.copy()
self.latent_vars = {z: PointMass(params=qz.loc)
for z, qz in six.iteritems(latent_vars_normal)}
super(Laplace, self).initialize(*args, **kwargs)
hessians = tf.hessians(self.loss, list(six.itervalues(self.latent_vars)))
self.finalize_ops = []
for z, hessian in zip(six.iterkeys(self.latent_vars), hessians):
qz = latent_vars_normal[z]
if isinstance(qz, (MultivariateNormalDiag, Normal)):
scale_var = get_variables(qz.variance())[0]
scale = 1.0 / tf.diag_part(hessian)
else: # qz is MultivariateNormalTriL
scale_var = get_variables(qz.covariance())[0]
scale = tf.matrix_inverse(tf.cholesky(hessian))
self.finalize_ops.append(scale_var.assign(scale))
self.latent_vars = latent_vars_normal.copy()
del latent_vars_normal
开发者ID:wujsAct,项目名称:edward,代码行数:25,代码来源:laplace.py
示例16: gp_predict_whitened
def gp_predict_whitened(Xnew, X, kern, V):
"""
Given a whitened representation of the GP at the points X (V), produce the
mean and variance of the GP at the points Xnew (F*).
The GP has been centered (whitened) so that
p(v) = N( 0, I)
f = L v ,
and so
p(f) = N(0, LL^T) = N(0, K).
We assume K independent GPs, represented by the columns of V. The GP conditional is:
p(F*[:,i] | V[:,i]) = N (K_{*f} L^{-T} V[:,i], K_{**} - K_{*f}L^{-1} L^{-T} K_{f*})
Xnew is a data matrix, size N* x D
X is a data matrix, size N x D
V is a matrix containing whitened GP values, size N x K
See also:
gaussian_gp_predict_whitened -- where there is no uncertainty in V
gp_predict -- same, without the whitening
"""
Kd = kern.Kdiag(Xnew)
Kx = kern.K(X, Xnew)
K = kern.K(X)
L = tf.cholesky(K)
A = tf.user_ops.triangular_solve(L, Kx, 'lower')
fmean = tf.matmul(tf.transpose(A), V)
fvar = Kd - tf.reduce_sum(tf.square(A), 0)
return fmean, tf.expand_dims(fvar, 1) * tf.ones_like(V[0,:])
开发者ID:beckdaniel,项目名称:GPflow,代码行数:34,代码来源:conditionals.py
示例17: runFiniteDifferences
def runFiniteDifferences(self,
shapes,
dtypes=(tf.float32, tf.float64),
scalarTest=False):
with self.test_session(use_gpu=False):
for shape in shapes:
for batch in False, True:
for dtype in dtypes:
if not scalarTest:
x = tf.constant(np.random.randn(shape[0], shape[1]), dtype)
tensor = tf.matmul(x, tf.transpose(x)) / shape[0]
else:
# This is designed to be a faster test for larger matrices.
x = tf.constant(np.random.randn(), dtype)
R = tf.constant(np.random.randn(shape[0], shape[1]), dtype)
e = tf.mul(R, x)
tensor = tf.matmul(e, tf.transpose(e)) / shape[0]
# Inner-most matrices in tensor are positive definite.
if batch:
tensor = tf.tile(tf.expand_dims(tensor, 0), [4, 1, 1])
y = tf.cholesky(tensor)
if scalarTest:
y = tf.reduce_mean(y)
error = tf.test.compute_gradient_error(x, x._shape_as_list(), y,
y._shape_as_list())
tf.logging.info("error = %f", error)
if dtype == tf.float64:
self.assertLess(error, 1e-5)
else:
self.assertLess(error, 3e-3)
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:31,代码来源:cholesky_op_test.py
示例18: _build_predict
def _build_predict(self, Xnew, full_cov=False):
"""
The posterior variance of F is given by
q(f) = N(f | K alpha + mean, [K^-1 + diag(lambda**2)]^-1)
Here we project this to F*, the values of the GP at Xnew which is given
by
q(F*) = N ( F* | K_{*F} alpha + mean, K_{**} - K_{*f}[K_{ff} +
diag(lambda**-2)]^-1 K_{f*} )
"""
# compute kernel things
Kx = self.kern.K(self.X, Xnew)
K = self.kern.K(self.X)
# predictive mean
f_mean = tf.matmul(Kx, self.q_alpha, transpose_a=True) + self.mean_function(Xnew)
# predictive var
A = K + tf.matrix_diag(tf.transpose(1. / tf.square(self.q_lambda)))
L = tf.cholesky(A)
Kx_tiled = tf.tile(tf.expand_dims(Kx, 0), [self.num_latent, 1, 1])
LiKx = tf.matrix_triangular_solve(L, Kx_tiled)
if full_cov:
f_var = self.kern.K(Xnew) - tf.matmul(LiKx, LiKx, transpose_a=True)
else:
f_var = self.kern.Kdiag(Xnew) - tf.reduce_sum(tf.square(LiKx), 1)
return f_mean, tf.transpose(f_var)
开发者ID:sanket-kamthe,项目名称:GPflow,代码行数:27,代码来源:vgp.py
示例19: gauss_kl
def gauss_kl(q_mu, q_sqrt, K, num_latent):
"""
Compute the KL divergence from
q(x) = N(q_mu, q_sqrt^2)
to
p(x) = N(0, K)
We assume num_latent independent distributions, given by the columns of
q_mu and the last dimension of q_sqrt.
q_mu is a matrix, each column contains a mean.
q_sqrt is a 3D tensor, each matrix within is a lower triangular square-root
matrix of the covariance of q.
K is a positive definite matrix: the covariance of p.
num_latent is an integer: the number of independent distributions (equal to
the columns of q_mu and the last dim of q_sqrt).
"""
L = tf.cholesky(K)
alpha = tf.matrix_triangular_solve(L, q_mu, lower=True)
KL = 0.5 * tf.reduce_sum(tf.square(alpha)) # Mahalanobis term.
KL += num_latent * 0.5 * tf.reduce_sum(
tf.log(tf.square(tf.diag_part(L)))) # Prior log-det term.
KL += -0.5 * tf.cast(tf.shape(q_sqrt)[0] * num_latent, tf.float64)
for d in range(num_latent):
Lq = tf.batch_matrix_band_part(q_sqrt[:, :, d], -1, 0)
# Log determinant of q covariance:
KL += -0.5*tf.reduce_sum(tf.log(tf.square(tf.diag_part(Lq))))
LiLq = tf.matrix_triangular_solve(L, Lq, lower=True)
KL += 0.5 * tf.reduce_sum(tf.square(LiLq)) # Trace term
return KL
开发者ID:davharris,项目名称:GPflow,代码行数:34,代码来源:kullback_leiblers.py
示例20: __init__
def __init__(self, mean, cov, d=None):
mean = tf.convert_to_tensor(mean)
cov = tf.convert_to_tensor(cov)
try:
d1, = util.extract_shape(mean)
mean = tf.reshape(mean, (d1,1))
except:
d1,k = util.extract_shape(mean)
assert(k == 1)
d2,_ = util.extract_shape(cov)
assert(d1==d2)
if d is None:
d = d1
else:
assert(d==d1)
super(MVGaussianMeanCov, self).__init__(d=d)
self._mean = mean
self._cov = cov
self._L_cov = tf.cholesky(cov)
self._entropy = bf.dists.multivariate_gaussian_entropy(L=self._L_cov)
L_prec_transpose = util.triangular_inv(self._L_cov)
self._L_prec = tf.transpose(L_prec_transpose)
self._prec = tf.matmul(self._L_prec, L_prec_transpose)
self._prec_mean = tf.matmul(self._prec, self._mean)
开发者ID:BenJamesbabala,项目名称:bayesflow,代码行数:31,代码来源:gaussian_messages.py
注:本文中的tensorflow.cholesky函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论