本文整理汇总了Python中tensorflow.diag_part函数的典型用法代码示例。如果您正苦于以下问题:Python diag_part函数的具体用法?Python diag_part怎么用?Python diag_part使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了diag_part函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: build_likelihood
def build_likelihood(self):
"""
Constuct a tensorflow function to compute the bound on the marginal
likelihood. For a derivation of the terms in here, see the associated
SGPR notebook.
"""
num_inducing = tf.shape(self.Z)[0]
num_data = tf.shape(self.Y)[0]
output_dim = tf.shape(self.Y)[1]
err = self.Y - self.mean_function(self.X)
Kdiag = self.kern.Kdiag(self.X)
Kuf = self.kern.K(self.Z, self.X)
Kuu = self.kern.K(self.Z) + eye(num_inducing) * 1e-6
L = tf.cholesky(Kuu)
# Compute intermediate matrices
A = tf.matrix_triangular_solve(L, Kuf, lower=True) /\
tf.sqrt(self.likelihood.variance)
AAT = tf.matmul(A, tf.transpose(A))
B = AAT + eye(num_inducing)
LB = tf.cholesky(B)
c = tf.matrix_triangular_solve(LB, tf.matmul(A, err), lower=True) /\
tf.sqrt(self.likelihood.variance)
# compute log marginal bound
bound = -0.5 * tf.cast(num_data * output_dim, tf.float64)*np.log(2*np.pi)
bound += -tf.cast(output_dim, tf.float64)*tf.reduce_sum(tf.log(tf.diag_part(LB)))
bound += -0.5*tf.cast(num_data*output_dim, tf.float64)*tf.log(self.likelihood.variance)
bound += -0.5*tf.reduce_sum(tf.square(err))/self.likelihood.variance
bound += 0.5*tf.reduce_sum(tf.square(c))
bound += -0.5*(tf.reduce_sum(Kdiag)/self.likelihood.variance - tf.reduce_sum(tf.diag_part(AAT)))
return bound
开发者ID:ShuaiW,项目名称:GPflow,代码行数:35,代码来源:sgpr.py
示例2: gauss_kl
def gauss_kl(q_mu, q_sqrt, K, num_latent):
"""
Compute the KL divergence from
q(x) = N(q_mu, q_sqrt^2)
to
p(x) = N(0, K)
We assume num_latent independent distributions, given by the columns of
q_mu and the last dimension of q_sqrt.
q_mu is a matrix, each column contains a mean.
q_sqrt is a 3D tensor, each matrix within is a lower triangular square-root
matrix of the covariance of q.
K is a positive definite matrix: the covariance of p.
num_latent is an integer: the number of independent distributions (equal to
the columns of q_mu and the last dim of q_sqrt).
"""
L = tf.cholesky(K)
alpha = tf.matrix_triangular_solve(L, q_mu, lower=True)
KL = 0.5 * tf.reduce_sum(tf.square(alpha)) # Mahalanobis term.
KL += num_latent * 0.5 * tf.reduce_sum(
tf.log(tf.square(tf.diag_part(L)))) # Prior log-det term.
KL += -0.5 * tf.cast(tf.shape(q_sqrt)[0] * num_latent, tf.float64)
for d in range(num_latent):
Lq = tf.batch_matrix_band_part(q_sqrt[:, :, d], -1, 0)
# Log determinant of q covariance:
KL += -0.5*tf.reduce_sum(tf.log(tf.square(tf.diag_part(Lq))))
LiLq = tf.matrix_triangular_solve(L, Lq, lower=True)
KL += 0.5 * tf.reduce_sum(tf.square(LiLq)) # Trace term
return KL
开发者ID:davharris,项目名称:GPflow,代码行数:34,代码来源:kullback_leiblers.py
示例3: gauss_kl_diag
def gauss_kl_diag(q_mu, q_sqrt, K, num_latent):
"""
Compute the KL divergence from
q(x) = N(q_mu, q_sqrt^2)
to
p(x) = N(0, K)
We assume num_latent independent distributions, given by the columns of
q_mu and q_sqrt.
q_mu is a matrix, each column contains a mean
q_sqrt is a matrix, each column represents the diagonal of a square-root
matrix of the covariance of q.
K is a positive definite matrix: the covariance of p.
num_latent is an integer: the number of independent distributions (equal to
the columns of q_mu and q_sqrt).
"""
L = tf.cholesky(K)
alpha = tf.matrix_triangular_solve(L, q_mu, lower=True)
KL = 0.5 * tf.reduce_sum(tf.square(alpha)) # Mahalanobis term.
KL += num_latent * 0.5 * tf.reduce_sum(
tf.log(tf.square(tf.diag_part(L)))) # Prior log-det term.
KL += -0.5 * tf.cast(tf.shape(q_sqrt)[0] * num_latent, tf.float64)
KL += -0.5 * tf.reduce_sum(tf.log(tf.square(q_sqrt))) # Log-det of q-cov
L_inv = tf.matrix_triangular_solve(L, eye(tf.shape(L)[0]), lower=True)
K_inv = tf.matrix_triangular_solve(tf.transpose(L), L_inv, lower=False)
KL += 0.5 * tf.reduce_sum(tf.expand_dims(tf.diag_part(K_inv), 1)
* tf.square(q_sqrt)) # Trace term.
return KL
开发者ID:blutooth,项目名称:dgp,代码行数:33,代码来源:kullback_leiblers.py
示例4: compute_upper_bound
def compute_upper_bound(self):
num_data = tf.cast(tf.shape(self.Y)[0], settings.float_type)
Kdiag = self.kern.Kdiag(self.X)
Kuu = self.feature.Kuu(self.kern, jitter=settings.numerics.jitter_level)
Kuf = self.feature.Kuf(self.kern, self.X)
L = tf.cholesky(Kuu)
LB = tf.cholesky(Kuu + self.likelihood.variance ** -1.0 * tf.matmul(Kuf, Kuf, transpose_b=True))
LinvKuf = tf.matrix_triangular_solve(L, Kuf, lower=True)
# Using the Trace bound, from Titsias' presentation
c = tf.reduce_sum(Kdiag) - tf.reduce_sum(LinvKuf ** 2.0)
# Kff = self.kern.K(self.X)
# Qff = tf.matmul(Kuf, LinvKuf, transpose_a=True)
# Alternative bound on max eigenval:
# c = tf.reduce_max(tf.reduce_sum(tf.abs(Kff - Qff), 0))
corrected_noise = self.likelihood.variance + c
const = -0.5 * num_data * tf.log(2 * np.pi * self.likelihood.variance)
logdet = tf.reduce_sum(tf.log(tf.diag_part(L))) - tf.reduce_sum(tf.log(tf.diag_part(LB)))
LC = tf.cholesky(Kuu + corrected_noise ** -1.0 * tf.matmul(Kuf, Kuf, transpose_b=True))
v = tf.matrix_triangular_solve(LC, corrected_noise ** -1.0 * tf.matmul(Kuf, self.Y), lower=True)
quad = -0.5 * corrected_noise ** -1.0 * tf.reduce_sum(self.Y ** 2.0) + 0.5 * tf.reduce_sum(v ** 2.0)
return const + logdet + quad
开发者ID:vincentadam87,项目名称:GPflow,代码行数:28,代码来源:sgpr.py
示例5: gauss_kl
def gauss_kl(min_q_mu, q_sq,K):
q_mu=-1*min_q_mu
#q_sqrt=tf.cholesky(tf.squeeze(q_sqrt))
# K is a variance...we sqrt later
'''
N=1
Q=5
q_mu=tf.random_normal([Q,1],dtype=tf.float64)
q_var=tf.random_normal([Q,Q],dtype=tf.float64)
q_var=q_var+tf.transpose(q_var [1,0])+1e+1*np.eye(Q)
K=q_var
q_sqrt=tf.cholesky(q_var)
q_sqrt=tf.expand_dims(q_sqrt,-1)
num_latent=1
s=tf.Session()
s.run(tf.initialize_all_variables())
'''
"""
Compute the KL divergence from
q(x) = N(q_mu, q_sqrt^2)
to
p(x) = N(0, K)
We assume num_latent independent distributions, given by the columns of
q_mu and the last dimension of q_sqrt.
q_mu is a matrix, each column contains a mean.
q_sqrt is a 3D tensor, each matrix within is a lower triangular square-root
matrix of the covariance of q.
K is a positive definite matrix: the covariance of p.
num_latent is an integer: the number of independent distributions (equal to
the columns of q_mu and the last dim of q_sqrt).
q_sqrt=tf.cholesky(K)
L = tf.cholesky(q_sq)
alpha = tf.matrix_triangular_solve(L, q_mu, lower=True)
KL = 0.5 * tf.reduce_sum(tf.square(alpha)) # Mahalanobis term.
KL += 0.5 * tf.reduce_sum(
tf.log(tf.square(tf.diag_part(L)))) # Prior log-det term.
KL += -0.5 * tf.cast(tf.shape(q_sqrt)[0], tf.float64)
Lq = tf.batch_matrix_band_part(q_sqrt, -1, 0)
# Log determinant of q covariance:
KL += -0.5*tf.reduce_sum(tf.log(tf.square(tf.diag_part(Lq))))
LiLq = tf.matrix_triangular_solve(L, Lq, lower=True)
KL += 0.5 * tf.reduce_sum(tf.square(LiLq)) # Trace term
"""
V2=tf.cholesky(K)
V1=tf.cholesky(q_sq)
KL=h.Mul(tf.transpose(q_mu),tf.cholesky_solve(V2,q_mu))
KL+=tf.trace(tf.cholesky_solve(V2,q_sq))
KL-=h.get_dim(K,0)
KL+=tf.reduce_sum(2*tf.log(tf.diag_part(V2))-2*tf.log(tf.diag_part(V1)))
return KL/2
开发者ID:blutooth,项目名称:dgp,代码行数:59,代码来源:maxKL.py
示例6: log_det
def log_det(Z):
#conditioned=condition(Z)
Z=(Z+tf.transpose(Z))/2
return 2*tf.reduce_sum(tf.log(tf.diag_part(tf.cholesky(Z))))
chol=tf.cholesky(Z)
logdet=2*tf.reduce_sum(tf.log(tf.diag_part(chol)))
return logdet
开发者ID:blutooth,项目名称:gp,代码行数:8,代码来源:helper.py
示例7: multivariate_gaussian_log_density
def multivariate_gaussian_log_density(x, mu,
Sigma=None, L=None,
prec=None, L_prec=None):
"""
Assume X is a single vector described by a multivariate Gaussian
distribution with x ~ N(mu, Sigma).
We accept parameterization in terms of the covariance matrix or
its cholesky decomposition L (more efficient if available), or the
precision matrix or its cholesky decomposition L_prec.
The latter is useful when representing a Gaussian in its natural
parameterization. Note that we still require the explicit mean mu
(not the natural parameter prec*mu) since I'm too lazy to cover
all the permutations of possible arguments (though this should be
straightforward).
"""
s = extract_shape(x)
try:
n, = s
except:
n, m = s
assert(m==1)
if L is None and Sigma is not None:
L = tf.cholesky(Sigma)
if L_prec is None and prec is not None:
L_prec = tf.cholesky(prec)
if L is not None:
neg_half_logdet = -tf.reduce_sum(tf.log(tf.diag_part(L)))
else:
assert(L_prec is not None)
neg_half_logdet = tf.reduce_sum(tf.log(tf.diag_part(L_prec)))
d = tf.reshape(x - mu, (n,1))
if L is not None:
alpha = tf.matrix_triangular_solve(L, d, lower=True)
exponential_part= tf.reduce_sum(tf.square(alpha))
elif prec is not None:
d = tf.reshape(d, (n, 1))
exponential_part = tf.reduce_sum(d * tf.matmul(prec, d))
else:
assert(L_prec is not None)
d = tf.reshape(d, (1, n))
alpha = tf.matmul(d, L_prec)
exponential_part= tf.reduce_sum(tf.square(alpha))
n_log2pi = n * 1.83787706641
logp = -0.5 * n_log2pi
logp += neg_half_logdet
logp += -0.5 * exponential_part
return logp
开发者ID:BenJamesbabala,项目名称:bayesflow,代码行数:54,代码来源:dists.py
示例8: multivariate_gaussian_entropy
def multivariate_gaussian_entropy(Sigma=None, L=None, L_prec=None):
if L is None and Sigma is not None:
L = tf.cholesky(Sigma)
if L is not None:
half_logdet = tf.reduce_sum(tf.log(tf.diag_part(L)))
n, _ = extract_shape(L)
else:
half_logdet = -tf.reduce_sum(tf.log(tf.diag_part(L_prec)))
n, _ = extract_shape(L_prec)
log_2pi = 1.83787706641
entropy = .5*n*(1 + log_2pi) + half_logdet
return entropy
开发者ID:BenJamesbabala,项目名称:bayesflow,代码行数:15,代码来源:dists.py
示例9: gauss_kl
def gauss_kl(q_mu, q_sqrt, K):
"""
Compute the KL divergence from
q(x) = N(q_mu, q_sqrt^2)
to
p(x) = N(0, K)
We assume multiple independent distributions, given by the columns of
q_mu and the last dimension of q_sqrt.
q_mu is a matrix, each column contains a mean.
q_sqrt is a 3D tensor, each matrix within is a lower triangular square-root
matrix of the covariance of q.
K is a positive definite matrix: the covariance of p.
"""
L = tf.cholesky(K)
alpha = tf.matrix_triangular_solve(L, q_mu, lower=True)
KL = 0.5 * tf.reduce_sum(tf.square(alpha)) # Mahalanobis term.
num_latent = tf.cast(tf.shape(q_sqrt)[2], float_type)
KL += num_latent * 0.5 * tf.reduce_sum(tf.log(tf.square(tf.diag_part(L)))) # Prior log-det term.
KL += -0.5 * tf.cast(tf.reduce_prod(tf.shape(q_sqrt)[1:]), float_type) # constant term
Lq = tf.matrix_band_part(tf.transpose(q_sqrt, (2, 0, 1)), -1, 0) # force lower triangle
KL += -0.5*tf.reduce_sum(tf.log(tf.square(tf.matrix_diag_part(Lq)))) # logdet
L_tiled = tf.tile(tf.expand_dims(L, 0), tf.pack([tf.shape(Lq)[0], 1, 1]))
LiLq = tf.matrix_triangular_solve(L_tiled, Lq, lower=True)
KL += 0.5 * tf.reduce_sum(tf.square(LiLq)) # Trace term
return KL
开发者ID:GPflow,项目名称:GPflow,代码行数:30,代码来源:kullback_leiblers.py
示例10: gauss_kl_white
def gauss_kl_white(q_mu, q_sqrt, num_latent):
"""
Compute the KL divergence from
q(x) = N(q_mu, q_sqrt^2)
to
p(x) = N(0, I)
We assume num_latent independent distributions, given by the columns of
q_mu and the last dimension of q_sqrt.
q_mu is a matrix, each column contains a mean
q_sqrt is a 3D tensor, each matrix within is a lower triangular square-root
matrix of the covariance.
num_latent is an integer: the number of independent distributions (equal to
the columns of q_mu and the last dim of q_sqrt).
"""
KL = 0.5 * tf.reduce_sum(tf.square(q_mu)) # Mahalanobis term
KL += -0.5 * tf.cast(tf.shape(q_sqrt)[0] * num_latent, tf.float64)
for d in range(num_latent):
Lq = tf.batch_matrix_band_part(q_sqrt[:, :, d], -1, 0)
# Log determinant of q covariance:
KL -= 0.5 * tf.reduce_sum(tf.log(tf.square(tf.diag_part(Lq))))
KL += 0.5 * tf.reduce_sum(tf.square(Lq)) # Trace term.
return KL
开发者ID:blutooth,项目名称:dgp,代码行数:27,代码来源:kullback_leiblers.py
示例11: initialize
def initialize(self, *args, **kwargs):
# Store latent variables in a temporary attribute; MAP will
# optimize `PointMass` random variables, which subsequently
# optimizes mean parameters of the normal approximations.
latent_vars_normal = self.latent_vars.copy()
self.latent_vars = {z: PointMass(params=qz.loc)
for z, qz in six.iteritems(latent_vars_normal)}
super(Laplace, self).initialize(*args, **kwargs)
hessians = tf.hessians(self.loss, list(six.itervalues(self.latent_vars)))
self.finalize_ops = []
for z, hessian in zip(six.iterkeys(self.latent_vars), hessians):
qz = latent_vars_normal[z]
if isinstance(qz, (MultivariateNormalDiag, Normal)):
scale_var = get_variables(qz.variance())[0]
scale = 1.0 / tf.diag_part(hessian)
else: # qz is MultivariateNormalTriL
scale_var = get_variables(qz.covariance())[0]
scale = tf.matrix_inverse(tf.cholesky(hessian))
self.finalize_ops.append(scale_var.assign(scale))
self.latent_vars = latent_vars_normal.copy()
del latent_vars_normal
开发者ID:wujsAct,项目名称:edward,代码行数:25,代码来源:laplace.py
示例12: diagPartOp
def diagPartOp(self, tensor, dtype, expected_ans, use_gpu=False):
with self.test_session(use_gpu=use_gpu):
tensor = tf.convert_to_tensor(tensor.astype(dtype))
tf_ans_inv = tf.diag_part(tensor)
inv_out = tf_ans_inv.eval()
self.assertAllClose(inv_out, expected_ans)
self.assertShapeEqual(expected_ans, tf_ans_inv)
开发者ID:Nishant23,项目名称:tensorflow,代码行数:7,代码来源:diag_op_test.py
示例13: build_likelihood
def build_likelihood(self):
"""
q_alpha, q_lambda are variational parameters, size N x R
This method computes the variational lower lound on the likelihood, which is:
E_{q(F)} [ \log p(Y|F) ] - KL[ q(F) || p(F)]
with
q(f) = N(f | K alpha, [K^-1 + diag(square(lambda))]^-1) .
"""
K = self.kern.K(self.X)
f_mean = tf.matmul(K, self.q_alpha) + self.mean_function(self.X)
#for each of the data-dimensions (columns of Y), find the diagonal of the
#variance, and also relevant parts of the KL.
f_var, A_logdet, trAi = [], tf.zeros((1,), tf.float64), tf.zeros((1,), tf.float64)
for d in range(self.num_latent):
b = self.q_lambda[:,d]
B = tf.expand_dims(b, 1)
A = eye(self.num_data) + K*B*tf.transpose(B)
L = tf.cholesky(A)
Li = tf.matrix_triangular_solve(L, eye(self.num_data), lower=True)
LiBi = Li / b
#full_sigma:return tf.diag(b**-2) - LiBi.T.dot(LiBi)
f_var.append(1./tf.square(b) - tf.reduce_sum(tf.square(LiBi),0))
A_logdet += 2*tf.reduce_sum(tf.log(tf.diag_part(L)))
trAi += tf.reduce_sum(tf.square(Li))
f_var = tf.transpose(tf.pack(f_var))
KL = 0.5*(A_logdet + trAi - self.num_data*self.num_latent + tf.reduce_sum(f_mean*self.q_alpha))
return tf.reduce_sum(self.likelihood.variational_expectations(f_mean, f_var, self.Y)) - KL
开发者ID:ShuaiW,项目名称:GPflow,代码行数:35,代码来源:vgp.py
示例14: logpdf
def logpdf(self, x, mean=None, cov=1):
"""Log of the probability density function.
Parameters
----------
x : tf.Tensor
A 1-D or 2-D tensor.
mean : tf.Tensor, optional
A 1-D tensor. Defaults to zero mean.
cov : tf.Tensor, optional
A 1-D or 2-D tensor. Defaults to identity matrix.
Returns
-------
tf.Tensor
A tensor of one dimension less than the input.
"""
x = tf.cast(x, dtype=tf.float32)
x_shape = get_dims(x)
if len(x_shape) == 1:
d = x_shape[0]
else:
d = x_shape[1]
if mean is None:
r = x
else:
mean = tf.cast(mean, dtype=tf.float32)
r = x - mean
if cov is 1:
L_inv = tf.diag(tf.ones([d]))
det_cov = tf.constant(1.0)
else:
cov = tf.cast(cov, dtype=tf.float32)
if len(cov.get_shape()) == 1: # vector
L_inv = tf.diag(1.0 / tf.sqrt(cov))
det_cov = tf.reduce_prod(cov)
else: # matrix
L = tf.cholesky(cov)
L_inv = tf.matrix_inverse(L)
det_cov = tf.pow(tf.reduce_prod(tf.diag_part(L)), 2)
lps = -0.5*d*tf.log(2*np.pi) - 0.5*tf.log(det_cov)
if len(x_shape) == 1: # vector
r = tf.reshape(r, shape=(d, 1))
inner = tf.matmul(L_inv, r)
lps -= 0.5 * tf.matmul(inner, inner, transpose_a=True)
return tf.squeeze(lps)
else: # matrix
# TODO vectorize further
out = []
for r_vec in tf.unpack(r):
r_vec = tf.reshape(r_vec, shape=(d, 1))
inner = tf.matmul(L_inv, r_vec)
out += [tf.squeeze(lps -
0.5 * tf.matmul(inner, inner, transpose_a=True))]
return tf.pack(out)
开发者ID:TalkingData,项目名称:edward,代码行数:59,代码来源:distributions.py
示例15: test
def test(self):
for k in self.kernels:
with k.tf_mode():
k1 = k.Kdiag(self.X)
k2 = tf.diag_part(k.K(self.X))
k1, k2 = tf.Session().run([k1, k2],
feed_dict={self.x_free: k.get_free_state(), self.X: self.X_data})
self.failUnless(np.allclose(k1, k2))
开发者ID:blutooth,项目名称:dgp,代码行数:8,代码来源:test_kerns.py
示例16: pred
def pred(X,X_m_1,mu,len_sc_1,noise_1):
Kmm=h.tf_SE_K(X_m_1,X_m_1,len_sc_1,noise_1)
Knm=h.tf_SE_K(X,X_m_1,len_sc_1,noise_1)
posterior_mean= h.Mul(Knm,tf.matrix_solve(Kmm,mu))
K_nn=h.tf_SE_K(X,X,len_sc_1,noise_1)
full_cov=K_nn-h.Mul(Knm,tf.matrix_solve(Kmm,tf.transpose(Knm)))
posterior_cov=tf.diag_part(full_cov)
return posterior_mean,tf.reshape(posterior_cov,[N,1]),full_cov
开发者ID:blutooth,项目名称:gp,代码行数:8,代码来源:dgp3.py
示例17: _compute_predictions
def _compute_predictions(self, init = None):
""" Compute vanilla-RNN states and predictions. """
with tf.variable_scope('states'):
with tf.variable_scope("HMM"):
with tf.variable_scope("transition"):
skip_prob = tf.get_variable("skip", shape=[1], initializer=tf.constant_initializer(1e-1))
#skip_prob = tf.Variable( np.array(1e-1, dtype=np.float32), name="skip") # .astype(np.float32)
self.W_trans = (1-skip_prob) * get_transition_matrix().astype(np.float32) + skip_prob* np.eye(self.hidden_layer_size).astype(np.float32)
#self.W_trans = tf.Variable( transition_with_skips,
# name='W_trans', trainable=True)
print("W_trans", self.W_trans.get_shape())
with tf.variable_scope("emission"):
"W_emit: [self.input_size, self.hidden_layer_size]"
if self.emission_init is None:
self.W_emit = tf.get_variable("W_emit", shape = [self.hidden_layer_size, self.input_size],
initializer = tf.random_normal_initializer(0.0, 1e-6))
else:
if not (self.emission_init.shape == (self.hidden_layer_size, self.input_size)):
print("self.emission_init.shape", self.emission_init.shape)
print("(self.hidden_layer_size, self.input_size)", (self.hidden_layer_size, self.input_size))
raise ValueError("wrong dimensions of `self.emission_init`")
self.W_emit = tf.Variable(self.emission_init.astype(np.float32), name = "W_emit", trainable = False)
self.W_emit_summary = tf.image_summary("W_emit", tf.reshape(self.W_emit, [1,self.hidden_layer_size, self.input_size,1]))
"idea: impose kernel similarity: maximize(W K W)"
"[ self.hidden_layer_size, self.nt_in_pore ]"
emission_in_pore_space = tf.matmul( self.map_hex_to_pore, self.W_emit)
self.emission_similarity = tf.reduce_sum( tf.diag_part( tf.matmul( tf.transpose(emission_in_pore_space),(emission_in_pore_space)) ),
name="emission_w_similarity")
if init is None:
initial_state = tf.ones([self.hidden_layer_size],
name='initial_state')
initial_state = initial_state/ self.hidden_layer_size
else:
initial_state = init
#states = self._rnn_step_fw(initial_state[:,0], self.inputs[0,:])
states = functional_ops.scan(self._rnn_step_fw, tf.identity(self.inputs),
initializer=initial_state, name='states')
states_fw_summary = tf.histogram_summary("states_fw", states)
#states = states_fw
#print("states:", states.get_shape())
with tf.variable_scope('predictions'):
# set some explicit initializer, orthogonal inialization
"for now, keep identity mapping from hidden states to labels"
"assume probability interpretation of values: should sum to one"
W_pred = tf.Variable(np.eye(self.target_size, dtype = np.float32), name="W_pred", trainable=False)
predictions = tf.matmul(states, W_pred, name='predictions')
#predictions = states
predictions_summary = tf.histogram_summary("predictions", predictions)
#predictions = tf.nn.softmax(tf.matmul(states, W_pred), name='predictions'))
# do predictions sum to one?
return states, predictions
开发者ID:DSLituiev,项目名称:fast5-rnn,代码行数:57,代码来源:main.py
示例18: predict2
def predict2():
# predicitions
cov=h.Mul(K_mm_2,tf.matrix_inverse(K_mm_2+K_mnnm_2/tf.square(sigma_2)),K_mm_2)
cov_chol=tf.cholesky(cov)
mu=h.Mul(K_mm_2,tf.cholesky_solve(cov_chol,K_mn_2),Ytr)/tf.square(sigma_2)
mean=h.Mul(K_nm_2,tf.matrix_solve(K_mm_1,mu))
variance=K_nn_2-h.Mul(K_nm_2,h.safe_chol(K_mm_2,tf.transpose(K_nm_2)))
var_terms=2*tf.sqrt(tf.reshape(tf.diag_part(variance)+tf.square(sigma_2),[N,1]))
return mean, var_terms
开发者ID:blutooth,项目名称:gp,代码行数:9,代码来源:deepGP.py
示例19: predict
def predict(K_mn,sigma,K_mm,K_nn):
# predicitions
K_nm=tf.transpose(K_mn)
Sig_Inv=1e-1*np.eye(M)+K_mm+K_mnnm_2/tf.square(sigma)
mu_post=h.Mul(tf.matrix_solve(Sig_Inv,K_mn),Ytr)/tf.square(sigma)
mean=h.Mul(K_nm,mu_post)
variance=K_nn-h.Mul(K_nm,h.safe_chol(K_mm,K_mn))+h.Mul(K_nm,tf.matrix_solve(Sig_Inv,K_mn))
var_terms=2*tf.sqrt(tf.reshape(tf.diag_part(variance)+tf.square(sigma),[N,1]))
return mean, var_terms
开发者ID:blutooth,项目名称:gp,代码行数:10,代码来源:dgp3.py
示例20: diagOp
def diagOp(self, diag, dtype, expected_ans, use_gpu=False):
with self.test_session(use_gpu=use_gpu):
tf_ans = tf.diag(tf.convert_to_tensor(diag.astype(dtype)))
out = tf_ans.eval()
tf_ans_inv = tf.diag_part(expected_ans)
inv_out = tf_ans_inv.eval()
self.assertAllClose(out, expected_ans)
self.assertAllClose(inv_out, diag)
self.assertShapeEqual(expected_ans, tf_ans)
self.assertShapeEqual(diag, tf_ans_inv)
开发者ID:Nishant23,项目名称:tensorflow,代码行数:10,代码来源:diag_op_test.py
注:本文中的tensorflow.diag_part函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论