本文整理汇总了Python中tensorflow.matrix_triangular_solve函数的典型用法代码示例。如果您正苦于以下问题:Python matrix_triangular_solve函数的具体用法?Python matrix_triangular_solve怎么用?Python matrix_triangular_solve使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了matrix_triangular_solve函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: build_predict
def build_predict(self, Xnew, full_cov=False):
"""
Compute the mean and variance of the latent function at some new points
Xnew.
"""
_, _, Luu, L, _, _, gamma = self.build_common_terms()
Kus = self.kern.K(self.Z, Xnew) # size M x Xnew
w = tf.matrix_triangular_solve(Luu, Kus, lower=True) # size M x Xnew
tmp = tf.matrix_triangular_solve(tf.transpose(L), gamma, lower=False)
mean = tf.matmul(tf.transpose(w), tmp) + self.mean_function(Xnew)
intermediateA = tf.matrix_triangular_solve(L, w, lower=True)
if full_cov:
var = (
self.kern.K(Xnew)
- tf.matmul(tf.transpose(w), w)
+ tf.matmul(tf.transpose(intermediateA), intermediateA)
)
var = tf.tile(tf.expand_dims(var, 2), tf.pack([1, 1, tf.shape(self.Y)[1]]))
else:
var = (
self.kern.Kdiag(Xnew) - tf.reduce_sum(tf.square(w), 0) + tf.reduce_sum(tf.square(intermediateA), 0)
) # size Xnew,
var = tf.tile(tf.expand_dims(var, 1), tf.pack([1, tf.shape(self.Y)[1]]))
return mean, var
开发者ID:GPflow,项目名称:GPflow,代码行数:28,代码来源:sgpr.py
示例2: testNonSquareMatrix
def testNonSquareMatrix(self):
# When the solve of a non-square matrix is attempted we should return
# an error
with self.test_session():
with self.assertRaises(ValueError):
matrix = tf.constant([[1., 2., 3.], [3., 4., 5.]])
tf.matrix_triangular_solve(matrix, matrix)
开发者ID:hessenh,项目名称:Human-Activity-Recognition,代码行数:7,代码来源:matrix_triangular_solve_op_test.py
示例3: compute_upper_bound
def compute_upper_bound(self):
num_data = tf.cast(tf.shape(self.Y)[0], settings.float_type)
Kdiag = self.kern.Kdiag(self.X)
Kuu = self.feature.Kuu(self.kern, jitter=settings.numerics.jitter_level)
Kuf = self.feature.Kuf(self.kern, self.X)
L = tf.cholesky(Kuu)
LB = tf.cholesky(Kuu + self.likelihood.variance ** -1.0 * tf.matmul(Kuf, Kuf, transpose_b=True))
LinvKuf = tf.matrix_triangular_solve(L, Kuf, lower=True)
# Using the Trace bound, from Titsias' presentation
c = tf.reduce_sum(Kdiag) - tf.reduce_sum(LinvKuf ** 2.0)
# Kff = self.kern.K(self.X)
# Qff = tf.matmul(Kuf, LinvKuf, transpose_a=True)
# Alternative bound on max eigenval:
# c = tf.reduce_max(tf.reduce_sum(tf.abs(Kff - Qff), 0))
corrected_noise = self.likelihood.variance + c
const = -0.5 * num_data * tf.log(2 * np.pi * self.likelihood.variance)
logdet = tf.reduce_sum(tf.log(tf.diag_part(L))) - tf.reduce_sum(tf.log(tf.diag_part(LB)))
LC = tf.cholesky(Kuu + corrected_noise ** -1.0 * tf.matmul(Kuf, Kuf, transpose_b=True))
v = tf.matrix_triangular_solve(LC, corrected_noise ** -1.0 * tf.matmul(Kuf, self.Y), lower=True)
quad = -0.5 * corrected_noise ** -1.0 * tf.reduce_sum(self.Y ** 2.0) + 0.5 * tf.reduce_sum(v ** 2.0)
return const + logdet + quad
开发者ID:vincentadam87,项目名称:GPflow,代码行数:28,代码来源:sgpr.py
示例4: gauss_kl
def gauss_kl(q_mu, q_sqrt, K):
"""
Compute the KL divergence from
q(x) = N(q_mu, q_sqrt^2)
to
p(x) = N(0, K)
We assume multiple independent distributions, given by the columns of
q_mu and the last dimension of q_sqrt.
q_mu is a matrix, each column contains a mean.
q_sqrt is a 3D tensor, each matrix within is a lower triangular square-root
matrix of the covariance of q.
K is a positive definite matrix: the covariance of p.
"""
L = tf.cholesky(K)
alpha = tf.matrix_triangular_solve(L, q_mu, lower=True)
KL = 0.5 * tf.reduce_sum(tf.square(alpha)) # Mahalanobis term.
num_latent = tf.cast(tf.shape(q_sqrt)[2], float_type)
KL += num_latent * 0.5 * tf.reduce_sum(tf.log(tf.square(tf.diag_part(L)))) # Prior log-det term.
KL += -0.5 * tf.cast(tf.reduce_prod(tf.shape(q_sqrt)[1:]), float_type) # constant term
Lq = tf.matrix_band_part(tf.transpose(q_sqrt, (2, 0, 1)), -1, 0) # force lower triangle
KL += -0.5*tf.reduce_sum(tf.log(tf.square(tf.matrix_diag_part(Lq)))) # logdet
L_tiled = tf.tile(tf.expand_dims(L, 0), tf.pack([tf.shape(Lq)[0], 1, 1]))
LiLq = tf.matrix_triangular_solve(L_tiled, Lq, lower=True)
KL += 0.5 * tf.reduce_sum(tf.square(LiLq)) # Trace term
return KL
开发者ID:GPflow,项目名称:GPflow,代码行数:30,代码来源:kullback_leiblers.py
示例5: build_predict
def build_predict(self, Xnew, full_cov=False):
"""
Xnew is a data matrix, point at which we want to predict
This method computes
p(F* | Y )
where F* are points on the GP at Xnew, Y are noisy observations at X.
"""
Kx = self.kern.K(self.X, Xnew)
K = self.kern.K(self.X) + eye(self.num_data) * self.likelihood.variance
L = tf.cholesky(K)
A = tf.matrix_triangular_solve(L, Kx, lower=True)
V = tf.matrix_triangular_solve(L, self.Y - self.mean_function(self.X))
fmean = tf.matmul(tf.transpose(A), V) + self.mean_function(Xnew)
if full_cov:
fvar = self.kern.K(Xnew) - tf.matmul(tf.transpose(A), A)
shape = tf.pack([1, 1, tf.shape(self.Y)[1]])
fvar = tf.tile(tf.expand_dims(fvar, 2), shape)
else:
fvar = self.kern.Kdiag(Xnew) - tf.reduce_sum(tf.square(A), 0)
fvar = tf.tile(tf.reshape(fvar, (-1, 1)), [1, self.Y.shape[1]])
return fmean, fvar
开发者ID:erenis,项目名称:GPflow,代码行数:25,代码来源:gpr.py
示例6: build_predict
def build_predict(self, Xnew, full_cov=False):
"""
Compute the mean and variance of the latent function at some new points
Xnew. For a derivation of the terms in here, see the associated SGPR
notebook.
"""
num_inducing = tf.shape(self.Z)[0]
err = self.Y - self.mean_function(self.X)
Kuf = self.kern.K(self.Z, self.X)
Kuu = self.kern.K(self.Z) + eye(num_inducing) * 1e-6
Kus = self.kern.K(self.Z, Xnew)
sigma = tf.sqrt(self.likelihood.variance)
L = tf.cholesky(Kuu)
A = tf.matrix_triangular_solve(L, Kuf, lower=True) / sigma
B = tf.matmul(A, tf.transpose(A)) + eye(num_inducing)
LB = tf.cholesky(B)
Aerr = tf.matmul(A, err)
c = tf.matrix_triangular_solve(LB, Aerr, lower=True) / sigma
tmp1 = tf.matrix_triangular_solve(L, Kus, lower=True)
tmp2 = tf.matrix_triangular_solve(LB, tmp1, lower=True)
mean = tf.matmul(tf.transpose(tmp2), c)
if full_cov:
var = self.kern.K(Xnew) + tf.matmul(tf.transpose(tmp2), tmp2)\
- tf.matmul(tf.transpose(tmp1), tmp1)
shape = tf.pack([1, 1, tf.shape(self.Y)[1]])
var = tf.tile(tf.expand_dims(var, 2), shape)
else:
var = self.kern.Kdiag(Xnew) + tf.reduce_sum(tf.square(tmp2), 0)\
- tf.reduce_sum(tf.square(tmp1), 0)
shape = tf.pack([1, tf.shape(self.Y)[1]])
var = tf.tile(tf.expand_dims(var, 1), shape)
return mean + self.mean_function(Xnew), var
开发者ID:gbohner,项目名称:GPflow,代码行数:32,代码来源:sgpr.py
示例7: test_whiten
def test_whiten(self):
"""
make sure that predicting using the whitened representation is the
sameas the non-whitened one.
"""
with self.test_context() as sess:
rng = np.random.RandomState(0)
Xs, X, F, k, num_data, feed_dict = self.prepare()
k.compile(session=sess)
F_sqrt = tf.placeholder(settings.float_type, [num_data, 1])
F_sqrt_data = rng.rand(num_data, 1)
feed_dict[F_sqrt] = F_sqrt_data
K = k.K(X)
L = tf.cholesky(K)
V = tf.matrix_triangular_solve(L, F, lower=True)
V_sqrt = tf.matrix_triangular_solve(L, tf.diag(F_sqrt[:, 0]), lower=True)[None, :, :]
Fstar_mean, Fstar_var = gpflow.conditionals.conditional(
Xs, X, k, F, q_sqrt=F_sqrt)
Fstar_w_mean, Fstar_w_var = gpflow.conditionals.conditional(
Xs, X, k, V, q_sqrt=V_sqrt, white=True)
mean_difference = sess.run(Fstar_w_mean - Fstar_mean, feed_dict=feed_dict)
var_difference = sess.run(Fstar_w_var - Fstar_var, feed_dict=feed_dict)
assert_allclose(mean_difference, 0, atol=4)
assert_allclose(var_difference, 0, atol=4)
开发者ID:sanket-kamthe,项目名称:GPflow,代码行数:29,代码来源:test_conditionals.py
示例8: build_likelihood
def build_likelihood(self):
"""
Constuct a tensorflow function to compute the bound on the marginal
likelihood. For a derivation of the terms in here, see the associated
SGPR notebook.
"""
num_inducing = tf.shape(self.Z)[0]
num_data = tf.shape(self.Y)[0]
output_dim = tf.shape(self.Y)[1]
err = self.Y - self.mean_function(self.X)
Kdiag = self.kern.Kdiag(self.X)
Kuf = self.kern.K(self.Z, self.X)
Kuu = self.kern.K(self.Z) + eye(num_inducing) * 1e-6
L = tf.cholesky(Kuu)
# Compute intermediate matrices
A = tf.matrix_triangular_solve(L, Kuf, lower=True)*tf.sqrt(1./self.likelihood.variance)
AAT = tf.matmul(A, tf.transpose(A))
B = AAT + eye(num_inducing)
LB = tf.cholesky(B)
c = tf.matrix_triangular_solve(LB, tf.matmul(A, err), lower=True) * tf.sqrt(1./self.likelihood.variance)
#compute log marginal bound
bound = -0.5*tf.cast(num_data*output_dim, tf.float64)*np.log(2*np.pi)
bound += -tf.cast(output_dim, tf.float64)*tf.reduce_sum(tf.log(tf.user_ops.get_diag(LB)))
bound += -0.5*tf.cast(num_data*output_dim, tf.float64)*tf.log(self.likelihood.variance)
bound += -0.5*tf.reduce_sum(tf.square(err))/self.likelihood.variance
bound += 0.5*tf.reduce_sum(tf.square(c))
bound += -0.5*(tf.reduce_sum(Kdiag)/self.likelihood.variance - tf.reduce_sum(tf.user_ops.get_diag(AAT)))
return bound
开发者ID:agarbuno,项目名称:GPflow,代码行数:33,代码来源:sgpr.py
示例9: gauss_kl_diag
def gauss_kl_diag(q_mu, q_sqrt, K, num_latent):
"""
Compute the KL divergence from
q(x) = N(q_mu, q_sqrt^2)
to
p(x) = N(0, K)
We assume num_latent independent distributions, given by the columns of
q_mu and q_sqrt.
q_mu is a matrix, each column contains a mean
q_sqrt is a matrix, each column represents the diagonal of a square-root
matrix of the covariance of q.
K is a positive definite matrix: the covariance of p.
num_latent is an integer: the number of independent distributions (equal to
the columns of q_mu and q_sqrt).
"""
L = tf.cholesky(K)
alpha = tf.matrix_triangular_solve(L, q_mu, lower=True)
KL = 0.5 * tf.reduce_sum(tf.square(alpha)) # Mahalanobis term.
KL += num_latent * 0.5 * tf.reduce_sum(
tf.log(tf.square(tf.diag_part(L)))) # Prior log-det term.
KL += -0.5 * tf.cast(tf.shape(q_sqrt)[0] * num_latent, tf.float64)
KL += -0.5 * tf.reduce_sum(tf.log(tf.square(q_sqrt))) # Log-det of q-cov
L_inv = tf.matrix_triangular_solve(L, eye(tf.shape(L)[0]), lower=True)
K_inv = tf.matrix_triangular_solve(tf.transpose(L), L_inv, lower=False)
KL += 0.5 * tf.reduce_sum(tf.expand_dims(tf.diag_part(K_inv), 1)
* tf.square(q_sqrt)) # Trace term.
return KL
开发者ID:blutooth,项目名称:dgp,代码行数:33,代码来源:kullback_leiblers.py
示例10: build_predict
def build_predict(self, Xnew , full_cov=False):
err = self.Y
Kuf = self.RBF(self.Z, self.X)
Kuu = self.RBF(self.Z,self.Z) + eye(self.num_inducing) * 1e-6
Kus = self.RBF(self.Z, Xnew)
sigma = tf.sqrt(self.likelihood_variance)
L = tf.cholesky(Kuu)
A = tf.matrix_triangular_solve(L, Kuf, lower=True) / sigma
B = tf.matmul(A, tf.transpose(A)) + eye(num_inducing)
LB = tf.cholesky(B)
Aerr = tf.matmul(A, err)
c = tf.matrix_triangular_solve(LB, Aerr, lower=True) / sigma
tmp1 = tf.matrix_triangular_solve(L, Kus, lower=True)
tmp2 = tf.matrix_triangular_solve(LB, tmp1, lower=True)
mean = tf.matmul(tf.transpose(tmp2), c)
if full_cov:
var = self.RBF(Xnew, Xnew) + tf.matmul(tf.transpose(tmp2), tmp2)\
- tf.matmul(tf.transpose(tmp1), tmp1)
shape = tf.pack([1, 1, tf.shape(self.Y)[1]])
var = tf.tile(tf.expand_dims(var, 2), shape)
else:
var = self.RBF(Xnew, Xnew) + tf.reduce_sum(tf.square(tmp2), 0)\
- tf.reduce_sum(tf.square(tmp1), 0)
shape = tf.pack([1, tf.shape(self.Y)[1]])
var = tf.tile(tf.expand_dims(var, 1), shape)
return mean , var
开发者ID:SebastianPopescu,项目名称:ConnectionistNetwork,代码行数:34,代码来源:ConnectionistNetwork.py
示例11: gauss_kl
def gauss_kl(q_mu, q_sqrt, K, num_latent):
"""
Compute the KL divergence from
q(x) = N(q_mu, q_sqrt^2)
to
p(x) = N(0, K)
We assume num_latent independent distributions, given by the columns of
q_mu and the last dimension of q_sqrt.
q_mu is a matrix, each column contains a mean.
q_sqrt is a 3D tensor, each matrix within is a lower triangular square-root
matrix of the covariance of q.
K is a positive definite matrix: the covariance of p.
num_latent is an integer: the number of independent distributions (equal to
the columns of q_mu and the last dim of q_sqrt).
"""
L = tf.cholesky(K)
alpha = tf.matrix_triangular_solve(L, q_mu, lower=True)
KL = 0.5 * tf.reduce_sum(tf.square(alpha)) # Mahalanobis term.
KL += num_latent * 0.5 * tf.reduce_sum(
tf.log(tf.square(tf.diag_part(L)))) # Prior log-det term.
KL += -0.5 * tf.cast(tf.shape(q_sqrt)[0] * num_latent, tf.float64)
for d in range(num_latent):
Lq = tf.batch_matrix_band_part(q_sqrt[:, :, d], -1, 0)
# Log determinant of q covariance:
KL += -0.5*tf.reduce_sum(tf.log(tf.square(tf.diag_part(Lq))))
LiLq = tf.matrix_triangular_solve(L, Lq, lower=True)
KL += 0.5 * tf.reduce_sum(tf.square(LiLq)) # Trace term
return KL
开发者ID:davharris,项目名称:GPflow,代码行数:34,代码来源:kullback_leiblers.py
示例12: _build_predict
def _build_predict(self, Xnew, full_cov=False):
"""
Compute the mean and variance of the latent function at some new points
Xnew. For a derivation of the terms in here, see the associated SGPR
notebook.
"""
num_inducing = len(self.feature)
err = self.Y - self.mean_function(self.X)
Kuf = self.feature.Kuf(self.kern, self.X)
Kuu = self.feature.Kuu(self.kern, jitter=settings.numerics.jitter_level)
Kus = self.feature.Kuf(self.kern, Xnew)
sigma = tf.sqrt(self.likelihood.variance)
L = tf.cholesky(Kuu)
A = tf.matrix_triangular_solve(L, Kuf, lower=True) / sigma
B = tf.matmul(A, A, transpose_b=True) + tf.eye(num_inducing, dtype=settings.float_type)
LB = tf.cholesky(B)
Aerr = tf.matmul(A, err)
c = tf.matrix_triangular_solve(LB, Aerr, lower=True) / sigma
tmp1 = tf.matrix_triangular_solve(L, Kus, lower=True)
tmp2 = tf.matrix_triangular_solve(LB, tmp1, lower=True)
mean = tf.matmul(tmp2, c, transpose_a=True)
if full_cov:
var = self.kern.K(Xnew) + tf.matmul(tmp2, tmp2, transpose_a=True) \
- tf.matmul(tmp1, tmp1, transpose_a=True)
shape = tf.stack([1, 1, tf.shape(self.Y)[1]])
var = tf.tile(tf.expand_dims(var, 2), shape)
else:
var = self.kern.Kdiag(Xnew) + tf.reduce_sum(tf.square(tmp2), 0) \
- tf.reduce_sum(tf.square(tmp1), 0)
shape = tf.stack([1, tf.shape(self.Y)[1]])
var = tf.tile(tf.expand_dims(var, 1), shape)
return mean + self.mean_function(Xnew), var
开发者ID:vincentadam87,项目名称:GPflow,代码行数:32,代码来源:sgpr.py
示例13: testNotInvertible
def testNotInvertible(self):
# The input should be invertible.
with self.test_session():
with self.assertRaisesOpError("Input matrix is not invertible."):
# The matrix has a zero on the diagonal.
matrix = tf.constant([[1., 0., -1.], [-1., 0., 1.], [0., -1., 1.]])
tf.matrix_triangular_solve(matrix, matrix).eval()
开发者ID:hessenh,项目名称:Human-Activity-Recognition,代码行数:7,代码来源:matrix_triangular_solve_op_test.py
示例14: testWrongDimensions
def testWrongDimensions(self):
# The matrix and rhs should have the same number of rows as the
# right-hand sides.
with self.test_session():
matrix = tf.constant([[1., 0.], [0., 1.]])
rhs = tf.constant([[1., 0.]])
with self.assertRaises(ValueError):
tf.matrix_triangular_solve(matrix, rhs)
开发者ID:hessenh,项目名称:Human-Activity-Recognition,代码行数:8,代码来源:matrix_triangular_solve_op_test.py
示例15: _expectation
def _expectation(p, kern1, feat1, kern2, feat2, nghp=None):
"""
Compute the expectation:
expectation[n] = <Ka_{Z1, x_n} Kb_{x_n, Z2}>_p(x_n)
- Ka_{.,.}, Kb_{.,.} :: RBF kernels
Ka and Kb as well as Z1 and Z2 can differ from each other, but this is supported
only if the Gaussian p is Diagonal (p.cov NxD) and Ka, Kb have disjoint active_dims
in which case the joint expectations simplify into a product of expectations
:return: NxMxM
"""
if kern1.on_separate_dims(kern2) and isinstance(p, DiagonalGaussian): # no joint expectations required
eKxz1 = expectation(p, (kern1, feat1))
eKxz2 = expectation(p, (kern2, feat2))
return eKxz1[:, :, None] * eKxz2[:, None, :]
if feat1 != feat2 or kern1 != kern2:
raise NotImplementedError("The expectation over two kernels has only an "
"analytical implementation if both kernels are equal.")
kern = kern1
feat = feat1
with params_as_tensors_for(kern), params_as_tensors_for(feat):
# use only active dimensions
Xcov = kern._slice_cov(tf.matrix_diag(p.cov) if isinstance(p, DiagonalGaussian) else p.cov)
Z, Xmu = kern._slice(feat.Z, p.mu)
N = tf.shape(Xmu)[0]
D = tf.shape(Xmu)[1]
squared_lengthscales = kern.lengthscales ** 2. if kern.ARD \
else tf.zeros((D,), dtype=settings.tf_float) + kern.lengthscales ** 2.
sqrt_det_L = tf.reduce_prod(0.5 * squared_lengthscales) ** 0.5
C = tf.cholesky(0.5 * tf.matrix_diag(squared_lengthscales) + Xcov) # NxDxD
dets = sqrt_det_L / tf.exp(tf.reduce_sum(tf.log(tf.matrix_diag_part(C)), axis=1)) # N
C_inv_mu = tf.matrix_triangular_solve(C, tf.expand_dims(Xmu, 2), lower=True) # NxDx1
C_inv_z = tf.matrix_triangular_solve(C,
tf.tile(tf.expand_dims(tf.transpose(Z) / 2., 0), [N, 1, 1]),
lower=True) # NxDxM
mu_CC_inv_mu = tf.expand_dims(tf.reduce_sum(tf.square(C_inv_mu), 1), 2) # Nx1x1
z_CC_inv_z = tf.reduce_sum(tf.square(C_inv_z), 1) # NxM
zm_CC_inv_zn = tf.matmul(C_inv_z, C_inv_z, transpose_a=True) # NxMxM
two_z_CC_inv_mu = 2 * tf.matmul(C_inv_z, C_inv_mu, transpose_a=True)[:, :, 0] # NxM
exponent_mahalanobis = mu_CC_inv_mu + tf.expand_dims(z_CC_inv_z, 1) + \
tf.expand_dims(z_CC_inv_z, 2) + 2 * zm_CC_inv_zn - \
tf.expand_dims(two_z_CC_inv_mu, 2) - tf.expand_dims(two_z_CC_inv_mu, 1) # NxMxM
exponent_mahalanobis = tf.exp(-0.5 * exponent_mahalanobis) # NxMxM
# Compute sqrt(self.K(Z)) explicitly to prevent automatic gradient from
# being NaN sometimes, see pull request #615
kernel_sqrt = tf.exp(-0.25 * kern.square_dist(Z, None))
return kern.variance ** 2 * kernel_sqrt * \
tf.reshape(dets, [N, 1, 1]) * exponent_mahalanobis
开发者ID:vincentadam87,项目名称:GPflow,代码行数:57,代码来源:expectations.py
示例16: build_likelihood
def build_likelihood(self):
"""
q_alpha, q_lambda are variational parameters, size N x R
This method computes the variational lower lound on the likelihood, which is:
E_{q(F)} [ \log p(Y|F) ] - KL[ q(F) || p(F)]
with
q(f) = N(f | K alpha, [K^-1 + diag(square(lambda))]^-1) .
"""
K = self.kern.K(self.X)
f_mean = tf.matmul(K, self.q_alpha) + self.mean_function(self.X)
#for each of the data-dimensions (columns of Y), find the diagonal of the
#variance, and also relevant parts of the KL.
f_var, A_logdet, trAi = [], tf.zeros((1,), tf.float64), tf.zeros((1,), tf.float64)
for d in range(self.num_latent):
b = self.q_lambda[:,d]
B = tf.expand_dims(b, 1)
A = eye(self.num_data) + K*B*tf.transpose(B)
L = tf.cholesky(A)
Li = tf.matrix_triangular_solve(L, eye(self.num_data), lower=True)
LiBi = Li / b
#full_sigma:return tf.diag(b**-2) - LiBi.T.dot(LiBi)
f_var.append(1./tf.square(b) - tf.reduce_sum(tf.square(LiBi),0))
A_logdet += 2*tf.reduce_sum(tf.log(tf.user_ops.get_diag(L)))
trAi += tf.reduce_sum(tf.square(Li))
f_var = tf.transpose(tf.pack(f_var))
KL = 0.5*(A_logdet + trAi - self.num_data*self.num_latent + tf.reduce_sum(f_mean*self.q_alpha))
return tf.reduce_sum(self.likelihood.variational_expectations(f_mean, f_var, self.Y)) - KL
开发者ID:agarbuno,项目名称:GPflow,代码行数:35,代码来源:vgp.py
示例17: _build_predict
def _build_predict(self, Xnew, full_cov=False):
"""
The posterior variance of F is given by
q(f) = N(f | K alpha + mean, [K^-1 + diag(lambda**2)]^-1)
Here we project this to F*, the values of the GP at Xnew which is given
by
q(F*) = N ( F* | K_{*F} alpha + mean, K_{**} - K_{*f}[K_{ff} +
diag(lambda**-2)]^-1 K_{f*} )
"""
# compute kernel things
Kx = self.kern.K(self.X, Xnew)
K = self.kern.K(self.X)
# predictive mean
f_mean = tf.matmul(Kx, self.q_alpha, transpose_a=True) + self.mean_function(Xnew)
# predictive var
A = K + tf.matrix_diag(tf.transpose(1. / tf.square(self.q_lambda)))
L = tf.cholesky(A)
Kx_tiled = tf.tile(tf.expand_dims(Kx, 0), [self.num_latent, 1, 1])
LiKx = tf.matrix_triangular_solve(L, Kx_tiled)
if full_cov:
f_var = self.kern.K(Xnew) - tf.matmul(LiKx, LiKx, transpose_a=True)
else:
f_var = self.kern.Kdiag(Xnew) - tf.reduce_sum(tf.square(LiKx), 1)
return f_mean, tf.transpose(f_var)
开发者ID:sanket-kamthe,项目名称:GPflow,代码行数:27,代码来源:vgp.py
示例18: multivariate_normal
def multivariate_normal(x, mu, L):
"""
Computes the log-density of a multivariate normal.
:param x : Dx1 or DxN sample(s) for which we want the density
:param mu : Dx1 or DxN mean(s) of the normal distribution
:param L : DxD Cholesky decomposition of the covariance matrix
:return p : (1,) or (N,) vector of log densities for each of the N x's and/or mu's
x and mu are either vectors or matrices. If both are vectors (N,1):
p[0] = log pdf(x) where x ~ N(mu, LL^T)
If at least one is a matrix, we assume independence over the *columns*:
the number of rows must match the size of L. Broadcasting behaviour:
p[n] = log pdf of:
x[n] ~ N(mu, LL^T) or x ~ N(mu[n], LL^T) or x[n] ~ N(mu[n], LL^T)
"""
if x.shape.ndims is None:
warnings.warn('Shape of x must be 2D at computation.')
elif x.shape.ndims != 2:
raise ValueError('Shape of x must be 2D.')
if mu.shape.ndims is None:
warnings.warn('Shape of mu may be unknown or not 2D.')
elif mu.shape.ndims != 2:
raise ValueError('Shape of mu must be 2D.')
d = x - mu
alpha = tf.matrix_triangular_solve(L, d, lower=True)
num_dims = tf.cast(tf.shape(d)[0], L.dtype)
p = - 0.5 * tf.reduce_sum(tf.square(alpha), 0)
p -= 0.5 * num_dims * np.log(2 * np.pi)
p -= tf.reduce_sum(tf.log(tf.matrix_diag_part(L)))
return p
开发者ID:vincentadam87,项目名称:GPflow,代码行数:31,代码来源:logdensities.py
示例19: _verifySolve
def _verifySolve(self, x, y, lower=True, adjoint=False, batch_dims=None, use_gpu=False):
for np_type in [np.float32, np.float64]:
a = x.astype(np_type)
b = y.astype(np_type)
# For numpy.solve we have to explicitly zero out the strictly
# upper or lower triangle.
if lower and a.size > 0:
a_np = np.tril(a)
elif a.size > 0:
a_np = np.triu(a)
else:
a_np = a
if adjoint:
a_np = np.conj(np.transpose(a_np))
if batch_dims is not None:
a = np.tile(a, batch_dims + [1, 1])
a_np = np.tile(a_np, batch_dims + [1, 1])
b = np.tile(b, batch_dims + [1, 1])
with self.test_session(use_gpu=use_gpu):
tf_ans = tf.matrix_triangular_solve(a, b, lower=lower, adjoint=adjoint)
out = tf_ans.eval()
np_ans = np.linalg.solve(a_np, b)
self.assertEqual(np_ans.shape, tf_ans.get_shape())
self.assertEqual(np_ans.shape, out.shape)
self.assertAllClose(np_ans, out)
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:27,代码来源:matrix_triangular_solve_op_test.py
示例20: get_cholesky_solve_terms
def get_cholesky_solve_terms(Z, C=C):
C_inv_z = tf.matrix_triangular_solve(
C, tf.tile(tf.expand_dims(tf.transpose(Z), 0),
[N, 1, 1]), lower=True) # [N, D, M]
z_CC_inv_z = tf.reduce_sum(tf.square(C_inv_z), 1) # [N, M]
return C_inv_z, z_CC_inv_z
开发者ID:sanket-kamthe,项目名称:GPflow,代码行数:7,代码来源:expectations.py
注:本文中的tensorflow.matrix_triangular_solve函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论