本文整理汇总了Python中tensorflow.square函数的典型用法代码示例。如果您正苦于以下问题:Python square函数的具体用法?Python square怎么用?Python square使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了square函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: loss
def loss(self):
# 1. The margin loss
# [batch_size, 10, 1, 1]
# max_l = max(0, m_plus-||v_c||)^2
max_l = tf.square(tf.maximum(0., cfg.m_plus - self.v_length))
# max_r = max(0, ||v_c||-m_minus)^2
max_r = tf.square(tf.maximum(0., self.v_length - cfg.m_minus))
assert max_l.get_shape() == [cfg.batch_size, 10, 1, 1]
# reshape: [batch_size, 10, 1, 1] => [batch_size, 10]
max_l = tf.reshape(max_l, shape=(cfg.batch_size, -1))
max_r = tf.reshape(max_r, shape=(cfg.batch_size, -1))
# calc T_c: [batch_size, 10]
# T_c = Y, is my understanding correct? Try it.
T_c = self.Y
# [batch_size, 10], element-wise multiply
L_c = T_c * max_l + cfg.lambda_val * (1 - T_c) * max_r
self.margin_loss = tf.reduce_mean(tf.reduce_sum(L_c, axis=1))
# 2. The reconstruction loss
orgin = tf.reshape(self.X, shape=(cfg.batch_size, -1))
squared = tf.square(self.decoded - orgin)
self.reconstruction_err = tf.reduce_mean(squared)
# 3. Total loss
# The paper uses sum of squared error as reconstruction error, but we
# have used reduce_mean in `# 2 The reconstruction loss` to calculate
# mean squared error. In order to keep in line with the paper,the
# regularization scale should be 0.0005*784=0.392
self.total_loss = self.margin_loss + cfg.regularization_scale * self.reconstruction_err
开发者ID:wangjianyuweg,项目名称:CapsNet-Tensorflow,代码行数:33,代码来源:capsNet.py
示例2: multilinear_square_product
def multilinear_square_product(emb, tuples, l2=0):
"""
Compute the square-product of real vectors at selected embeddings.
This is the sum over all dimensions of the square of summed embedding vectors.
:param emb: embedding matrix of size [n_emb, rank] containing float numbers
:param tuples: tuple matrix of size [n_t, arity] containing integers
:param l2: optional l2 regularization strength that is added to the score. If it is different from 0, the function
returns a pair (pred, l2norm) where pred is the sample prediction, but l2norm is the l2 norm of the selected
embeddings
:return: the multilinear square product between selected embeddings
S[i] = sum_k ( sum_j E[I[i,k],j] )^2
>>> emb = [[12., 0, 0], [0, 1, 0], [-1, 1, 1]]
>>> idx = tf.Variable([[1,0,0],[1,1,0]])
>>> g = multilinear_square_product(emb, idx)
>>> print(tf_eval(g))
[ 577. 148.]
"""
emb_sel = tf.gather(emb, tuples)
pred = tf.reduce_sum(tf.square(tf.reduce_sum(emb_sel, 1)), 1)
if l2 == 0: # unregularized prediction ==> returns only the predictions
return pred
else: # l2 regularization of the selected embeddings
reg = l2 * tf.reduce_sum(tf.square(emb_sel))
return pred, reg
开发者ID:Peratham,项目名称:factorix,代码行数:26,代码来源:scoring.py
示例3: standard_reg
def standard_reg():
reg = tf.constant(0.0, dtype=tf.float32)
reg = reg + standard_w_weight_reg * tf.reduce_mean(tf.square(net_params['sDW1']))
#reg = reg + standard_w_weight_reg * tf.reduce_mean(tf.square(net_params['sDW2']))
reg = reg + regressor_w_weight_reg * tf.reduce_mean(tf.square(net_params['sRW']))
return reg
开发者ID:rbharath,项目名称:deepchem,代码行数:7,代码来源:bondvolution.py
示例4: triplet_loss
def triplet_loss(y_true, y_pred, alpha = 0.2):
"""
Implementation of the triplet loss as defined by formula
Arguments:
y_true -- true labels, required when you define a loss in Keras, you don't need it in this function.
y_pred -- python list containing three objects:
anchor -- the encodings for the anchor images, of shape (None, 128)
positive -- the encodings for the positive images, of shape (None, 128)
negative -- the encodings for the negative images, of shape (None, 128)
Returns:
loss -- real number, value of the loss
"""
anchor, positive, negative = y_pred[0], y_pred[1], y_pred[2]
### START CODE HERE ### (≈ 4 lines)
# Step 1: Compute the (encoding) distance between the anchor and the positive, you will need to sum over axis=-1
pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)))
# Step 2: Compute the (encoding) distance between the anchor and the negative, you will need to sum over axis=-1
neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)))
# Step 3: subtract the two previous distances and add alpha.
basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)
# Step 4: Take the maximum of basic_loss and 0.0. Sum over the training examples.
loss = tf.reduce_sum(tf.maximum(basic_loss, 0.))
### END CODE HERE ###
return loss
开发者ID:sunlinyu1993,项目名称:Machine-Learning-Toolbox,代码行数:29,代码来源:triplet_loss.py
示例5: e_step
def e_step(o_mean, o_stdv, o_activations, votes):
"""The E-Step in EM Routing.
:param o_mean: (24, 6, 6, 1, 32, 16)
:param o_stdv: (24, 6, 6, 1, 32, 16)
:param o_activations: (24, 6, 6, 1, 32, 1)
:param votes: (24, 6, 6, 288, 32, 16)
:return: rr
"""
o_p_unit0 = - tf.reduce_sum(
tf.square(votes - o_mean) / (2 * tf.square(o_stdv)), axis=-1, keep_dims=True
)
o_p_unit2 = - tf.reduce_sum(
tf.log(o_stdv + epsilon), axis=-1, keep_dims=True
)
# o_p is the probability density of the h-th component of the vote from i to j
# (24, 6, 6, 1, 32, 16)
o_p = o_p_unit0 + o_p_unit2
# rr: (24, 6, 6, 288, 32, 1)
zz = tf.log(o_activations + epsilon) + o_p
rr = tf.nn.softmax(
zz, dim=len(zz.get_shape().as_list())-2
)
return rr
开发者ID:lzqkean,项目名称:deep_learning,代码行数:30,代码来源:core.py
示例6: _ssim_helper
def _ssim_helper(var_x, var_y, max_val, kernel, compensation=1.0):
"""
Helper function for computing SSIM.
SSIM estimates covariances with weighted sums. The default parameters
use a biased estimate of the covariance:
Suppose `reducer` is a weighted sum, then the mean estimators are
mu_x = sum_i w_i x_i,
mu_y = sum_i w_i y_i,
where w_i's are the weighted-sum weights, and covariance estimator is
cov_{xy} = sum_i w_i (x_i - mu_x) (y_i - mu_y)
with assumption sum_i w_i = 1. This covariance estimator is biased, since
E[cov_{xy}] = (1 - sum_i w_i ^ 2) Cov(X, Y).
For SSIM measure with unbiased covariance estimators, pass as `compensation`
argument (1 - sum_i w_i ^ 2).
Arguments:
x: First set of images.
y: Second set of images.
reducer: Function that computes 'local' averages from set of images.
For non-covolutional version, this is usually tf.reduce_mean(x, [1, 2]),
and for convolutional version, this is usually tf.nn.avg_pool or
tf.nn.conv2d with weighted-sum kernel.
max_val: The dynamic range (i.e., the difference between the maximum
possible allowed value and the minimum allowed value).
compensation: Compensation factor. See above.
Returns:
A pair containing the luminance measure, and the contrast-structure measure.
"""
def reducer(var_x, kernel):
shape = tf.shape(var_x)
var_x = tf.reshape(var_x, shape=tf.concat([[-1], shape[-3:]], 0))
var_y = tf.nn.depthwise_conv2d(var_x, kernel, strides=[1, 1, 1, 1], padding='VALID')
return tf.reshape(var_y, tf.concat([shape[:-3], tf.shape(var_y)[1:]], 0))
_ssim_k1 = 0.01
_ssim_k2 = 0.03
c_1 = (_ssim_k1 * max_val) ** 2
c_2 = (_ssim_k2 * max_val) ** 2
# SSIM luminance measure is
# (2 * mu_x * mu_y + c_1) / (mu_x ** 2 + mu_y ** 2 + c_1).
mean0 = reducer(var_x, kernel)
mean1 = reducer(var_y, kernel)
num0 = mean0 * mean1 * 2.0
den0 = tf.square(mean0) + tf.square(mean1)
luminance = (num0 + c_1) / (den0 + c_1)
# SSIM contrast-structure measure is
# (2 * cov_{xy} + c_2) / (cov_{xx} + cov_{yy} + c_2).
# Note that `reducer` is a weighted sum with weight w_k, \sum_i w_i = 1, then
# cov_{xy} = \sum_i w_i (x_i - \mu_x) (y_i - \mu_y)
# = \sum_i w_i x_i y_i - (\sum_i w_i x_i) (\sum_j w_j y_j).
num1 = reducer(var_x * var_y, kernel) * 2.0
den1 = reducer(tf.square(var_x) + tf.square(var_y), kernel)
c_2 *= compensation
c_s = (num1 - num0 + c_2) / (den1 - den0 + c_2)
# SSIM score is the product of the luminance and contrast-structure measures.
return luminance, c_s
开发者ID:stonezuohui,项目名称:faceswap,代码行数:60,代码来源:losses.py
示例7: build_psi_stats_rbf_plus_linear
def build_psi_stats_rbf_plus_linear(Z, kern, mu, S):
# TODO: make sure the acvite dimensions are overlapping completely
# use only active dimensions
mu, S = kern._slice(mu, S) # only use the active dimensions.
Z, _ = kern._slice(Z, None)
psi0_lin, psi1_lin, psi2_lin = build_psi_stats_linear(Z, kern.linear, mu, S)
psi0_rbf, psi1_rbf, psi2_rbf = build_psi_stats_rbf(Z, kern.rbf, mu, S)
psi0, psi1, psi2 = psi0_lin + psi0_rbf, psi1_lin + psi1_rbf, psi2_lin + psi2_rbf
# extra terms for the 'interaction' of linear and rbf
l2 = tf.square(kern.rbf.lengthscales)
A = tf.expand_dims(1./S + 1./l2, 1) # N x 1 x Q
m = (tf.expand_dims(mu/S, 1) + tf.expand_dims(Z/l2, 0)) / A # N x M x Q
mTAZ = tf.reduce_sum(tf.expand_dims(m * kern.linear.variance, 1) *
tf.expand_dims(tf.expand_dims(Z, 0), 0), 3) # N x M x M
Z2 = tf.reduce_sum(tf.square(Z) / l2, 1) # M,
mu2 = tf.reduce_sum(tf.square(mu) / S, 1) # N
mAm = tf.reduce_sum(tf.square(m) * A, 2) # N x M
exp_term = tf.exp(-(tf.reshape(Z2, (1, -1)) + tf.reshape(mu2, (-1, 1))-mAm) / 2.) # N x M
psi2_extra = tf.reduce_sum(kern.rbf.variance *
tf.expand_dims(exp_term, 2) *
tf.expand_dims(tf.expand_dims(tf.reduce_prod(S, 1), 1), 2) *
tf.expand_dims(tf.reduce_prod(A, 2), 1) *
mTAZ, 0)
psi2 = psi2 + psi2_extra + tf.transpose(psi2_extra)
return psi0, psi1, psi2
开发者ID:blutooth,项目名称:dgp,代码行数:29,代码来源:kernel_expectations.py
示例8: gaussian
def gaussian(y, mu_k, sigma_k):
y = tf.reshape(y, [batchDim,1,L_out])
norm = tf.reduce_sum(tf.square(y-mu_k),axis=2) # sums over the L dimensions -> we get shape (N,K) again
phi_k = -tf.div(norm, 2*tf.square(sigma_k))
phi_k = tf.exp(phi_k)
phi_k = tf.divide(phi_k, sigma_k)
return phi_k
开发者ID:LucNaterop,项目名称:cmc-rnn,代码行数:7,代码来源:ffmdn_2d.py
示例9: gauss_kl
def gauss_kl(q_mu, q_sqrt, K):
"""
Compute the KL divergence from
q(x) = N(q_mu, q_sqrt^2)
to
p(x) = N(0, K)
We assume multiple independent distributions, given by the columns of
q_mu and the last dimension of q_sqrt.
q_mu is a matrix, each column contains a mean.
q_sqrt is a 3D tensor, each matrix within is a lower triangular square-root
matrix of the covariance of q.
K is a positive definite matrix: the covariance of p.
"""
L = tf.cholesky(K)
alpha = tf.matrix_triangular_solve(L, q_mu, lower=True)
KL = 0.5 * tf.reduce_sum(tf.square(alpha)) # Mahalanobis term.
num_latent = tf.cast(tf.shape(q_sqrt)[2], float_type)
KL += num_latent * 0.5 * tf.reduce_sum(tf.log(tf.square(tf.diag_part(L)))) # Prior log-det term.
KL += -0.5 * tf.cast(tf.reduce_prod(tf.shape(q_sqrt)[1:]), float_type) # constant term
Lq = tf.matrix_band_part(tf.transpose(q_sqrt, (2, 0, 1)), -1, 0) # force lower triangle
KL += -0.5*tf.reduce_sum(tf.log(tf.square(tf.matrix_diag_part(Lq)))) # logdet
L_tiled = tf.tile(tf.expand_dims(L, 0), tf.pack([tf.shape(Lq)[0], 1, 1]))
LiLq = tf.matrix_triangular_solve(L_tiled, Lq, lower=True)
KL += 0.5 * tf.reduce_sum(tf.square(LiLq)) # Trace term
return KL
开发者ID:GPflow,项目名称:GPflow,代码行数:30,代码来源:kullback_leiblers.py
示例10: build_rmsprop_optimizer
def build_rmsprop_optimizer(self, learning_rate, rmsprop_decay, rmsprop_constant, gradient_clip, version):
with tf.name_scope('rmsprop'):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(self.loss)
grads = [gv[0] for gv in grads_and_vars]
params = [gv[1] for gv in grads_and_vars]
if gradient_clip > 0:
grads = tf.clip_by_global_norm(grads, gradient_clip)
if version == 'rmsprop':
return optimizer.apply_gradients(zip(grads, params))
elif version == 'graves_rmsprop':
square_grads = [tf.square(grad) for grad in grads]
avg_grads = [tf.Variable(tf.ones(var.get_shape())) for var in params]
avg_square_grads = [tf.Variable(tf.ones(var.get_shape())) for var in params]
update_avg_grads = [grad_pair[0].assign((rmsprop_decay * grad_pair[0]) + ((1 - rmsprop_decay) * grad_pair[1]))
for grad_pair in zip(avg_grads, grads)]
update_avg_square_grads = [grad_pair[0].assign((rmsprop_decay * grad_pair[0]) + ((1 - rmsprop_decay) * tf.square(grad_pair[1])))
for grad_pair in zip(avg_square_grads, grads)]
avg_grad_updates = update_avg_grads + update_avg_square_grads
rms = [tf.sqrt(avg_grad_pair[1] - tf.square(avg_grad_pair[0]) + rmsprop_constant)
for avg_grad_pair in zip(avg_grads, avg_square_grads)]
rms_updates = [grad_rms_pair[0] / grad_rms_pair[1] for grad_rms_pair in zip(grads, rms)]
train = optimizer.apply_gradients(zip(rms_updates, params))
return tf.group(train, tf.group(*avg_grad_updates))
开发者ID:hihell,项目名称:deep_rl_ale,代码行数:34,代码来源:parallel_q_network.py
示例11: gauss_kl
def gauss_kl(q_mu, q_sqrt, K, num_latent):
"""
Compute the KL divergence from
q(x) = N(q_mu, q_sqrt^2)
to
p(x) = N(0, K)
We assume num_latent independent distributions, given by the columns of
q_mu and the last dimension of q_sqrt.
q_mu is a matrix, each column contains a mean.
q_sqrt is a 3D tensor, each matrix within is a lower triangular square-root
matrix of the covariance of q.
K is a positive definite matrix: the covariance of p.
num_latent is an integer: the number of independent distributions (equal to
the columns of q_mu and the last dim of q_sqrt).
"""
L = tf.cholesky(K)
alpha = tf.matrix_triangular_solve(L, q_mu, lower=True)
KL = 0.5 * tf.reduce_sum(tf.square(alpha)) # Mahalanobis term.
KL += num_latent * 0.5 * tf.reduce_sum(
tf.log(tf.square(tf.diag_part(L)))) # Prior log-det term.
KL += -0.5 * tf.cast(tf.shape(q_sqrt)[0] * num_latent, tf.float64)
for d in range(num_latent):
Lq = tf.batch_matrix_band_part(q_sqrt[:, :, d], -1, 0)
# Log determinant of q covariance:
KL += -0.5*tf.reduce_sum(tf.log(tf.square(tf.diag_part(Lq))))
LiLq = tf.matrix_triangular_solve(L, Lq, lower=True)
KL += 0.5 * tf.reduce_sum(tf.square(LiLq)) # Trace term
return KL
开发者ID:davharris,项目名称:GPflow,代码行数:34,代码来源:kullback_leiblers.py
示例12: build_loss
def build_loss(self, error_clip, num_actions, double_dqn):
''' build loss graph '''
with tf.name_scope("loss"):
predictions = tf.reduce_sum(tf.mul(self.gpu_q_layer, self.actions), 1)
max_action_values = None
if double_dqn: # Double Q-Learning:
max_actions = tf.to_int32(tf.argmax(self.gpu_q_layer, 1))
# tf.gather doesn't support multidimensional indexing yet, so we flatten output activations for indexing
indices = tf.range(0, tf.size(max_actions) * num_actions, num_actions) + max_actions
max_action_values = tf.gather(tf.reshape(self.target_q_layer, shape=[-1]), indices)
else:
max_action_values = tf.reduce_max(self.target_q_layer, 1)
targets = tf.stop_gradient(self.rewards + (self.discount_factor * max_action_values * self.terminals))
difference = tf.abs(predictions - targets)
if error_clip >= 0:
quadratic_part = tf.clip_by_value(difference, 0.0, error_clip)
linear_part = difference - quadratic_part
errors = (0.5 * tf.square(quadratic_part)) + (error_clip * linear_part)
else:
errors = (0.5 * tf.square(difference))
return tf.reduce_sum(errors)
开发者ID:hihell,项目名称:deep_rl_ale,代码行数:27,代码来源:parallel_q_network.py
示例13: __graph__
def __graph__():
"""Building the inference graph"""
with tf.name_scope('input'):
# [BATCH_SIZE, NUM_FEATURES]
x_input = tf.placeholder(dtype=tf.float32, shape=[None, self.num_features], name='x_input')
# [BATCH_SIZE]
y_input = tf.placeholder(dtype=tf.uint8, shape=[None], name='y_input')
# [BATCH_SIZE, NUM_CLASSES]
y_onehot = tf.one_hot(indices=y_input, depth=self.num_classes, on_value=1, off_value=-1,
name='y_onehot')
learning_rate = tf.placeholder(dtype=tf.float32, name='learning_rate')
with tf.name_scope('training_ops'):
with tf.name_scope('weights'):
weight = tf.get_variable(name='weights',
initializer=tf.random_normal([self.num_features, self.num_classes],
stddev=0.01))
self.variable_summaries(weight)
with tf.name_scope('biases'):
bias = tf.get_variable(name='biases', initializer=tf.constant([0.1], shape=[self.num_classes]))
self.variable_summaries(bias)
with tf.name_scope('Wx_plus_b'):
output = tf.matmul(x_input, weight) + bias
tf.summary.histogram('pre-activations', output)
with tf.name_scope('svm'):
regularization = tf.reduce_mean(tf.square(weight))
hinge_loss = tf.reduce_mean(tf.square(tf.maximum(tf.zeros([self.batch_size, self.num_classes]),
1 - tf.cast(y_onehot, tf.float32) * output)))
with tf.name_scope('loss'):
loss = regularization + self.svm_c * hinge_loss
tf.summary.scalar('loss', loss)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
with tf.name_scope('accuracy'):
predicted_class = tf.sign(output)
predicted_class = tf.identity(predicted_class, name='prediction')
with tf.name_scope('correct_prediction'):
correct = tf.equal(tf.argmax(predicted_class, 1), tf.argmax(y_onehot, 1))
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
tf.summary.scalar('accuracy', accuracy)
merged = tf.summary.merge_all()
self.x_input = x_input
self.y_input = y_input
self.y_onehot = y_onehot
self.learning_rate = learning_rate
self.loss = loss
self.optimizer = optimizer
self.output = output
self.predicted_class = predicted_class
self.accuracy = accuracy
self.merged = merged
开发者ID:TaihuLight,项目名称:wisconsin-breast-cancer,代码行数:60,代码来源:svm.py
示例14: _build_loss
def _build_loss(self):
with tf.variable_scope("loss"):
# Compute y_j = r_j * discount*best_qvalue
self.tf_discount = tf.constant(self.discount)
self.qtarget = tf.add(self.pl_rewards, tf.mul(1.0-self.pl_terminals, tf.mul(self.tf_discount, self.pl_qtargets)))
# Select Q-values for given actions
self.actions_one_hot = tf.one_hot(self.pl_actions, self.num_actions, 1.0, 0.0)
self.qvalue_pred = tf.reduce_sum(tf.mul(self.qvalues, self.actions_one_hot), reduction_indices=1)
# Difference between target and predicted Q-network output
self.delta = tf.sub(self.qtarget, self.qvalue_pred)
if self.clip_delta > 0:
# Perform clipping of the error term, default clipping is to (-1, +1) range
self.quadratic_part = tf.minimum(tf.abs(self.delta), tf.constant(self.clip_delta))
self.linear_part = tf.sub(tf.abs(self.delta), self.quadratic_part)
self.delta_square = tf.mul(tf.constant(0.5), tf.square(self.quadratic_part)) + (self.clip_delta*self.linear_part)
#self.delta_clipped = tf.clip_by_value(self.delta, -1.0*self.clip_delta, self.clip_delta)
#self.delta_square = tf.square(self.delta_clipped)
else:
# No error clipping
self.delta_square = tf.square(self.delta)
# Actual loss
if self.batch_accumulator == "sum":
self.loss = tf.reduce_sum(self.delta_square)
else:
self.loss = tf.reduce_mean(self.delta_square)
# Running average of the loss for TensorBoard
self.loss_moving_avg = tf.train.ExponentialMovingAverage(decay=0.999)
self.loss_moving_avg_op = self.loss_moving_avg.apply([self.loss])
开发者ID:tomrunia,项目名称:DeepReinforcementLearning-Atari,代码行数:35,代码来源:qnetwork.py
示例15: batchnormalize
def batchnormalize(X, eps=1e-8, g=None, b=None):
if X.get_shape().ndims == 4:
mean = tf.reduce_mean(X, [0,1,2])
std = tf.reduce_mean( tf.square(X-mean), [0,1,2] )
X = (X-mean) / tf.sqrt(std+eps)
if g is not None and b is not None:
g = tf.reshape(g, [1,1,1,-1])
b = tf.reshape(b, [1,1,1,-1])
X = X*g + b
elif X.get_shape().ndims == 2:
mean = tf.reduce_mean(X, 0)
std = tf.reduce_mean(tf.square(X-mean), 0)
X = (X-mean) / tf.sqrt(std+eps)#std
if g is not None and b is not None:
g = tf.reshape(g, [1,-1])
b = tf.reshape(b, [1,-1])
X = X*g + b
else:
raise NotImplementedError
return X
开发者ID:yihui-he,项目名称:GAN-MNIST,代码行数:25,代码来源:model.py
示例16: cross_entropy
def cross_entropy(u, label_u, alpha=0.5, normed=False):
label_ip = tf.cast(
tf.matmul(label_u, tf.transpose(label_u)), tf.float32)
s = tf.clip_by_value(label_ip, 0.0, 1.0)
# compute balance param
# s_t \in {-1, 1}
s_t = tf.multiply(tf.add(s, tf.constant(-0.5)), tf.constant(2.0))
sum_1 = tf.reduce_sum(s)
sum_all = tf.reduce_sum(tf.abs(s_t))
balance_param = tf.add(tf.abs(tf.add(s, tf.constant(-1.0))),
tf.multiply(tf.div(sum_all, sum_1), s))
if normed:
# ip = tf.clip_by_value(tf.matmul(u, tf.transpose(u)), -1.5e1, 1.5e1)
ip_1 = tf.matmul(u, tf.transpose(u))
def reduce_shaper(t):
return tf.reshape(tf.reduce_sum(t, 1), [tf.shape(t)[0], 1])
mod_1 = tf.sqrt(tf.matmul(reduce_shaper(tf.square(u)),
reduce_shaper(tf.square(u)), transpose_b=True))
ip = tf.div(ip_1, mod_1)
else:
ip = tf.clip_by_value(tf.matmul(u, tf.transpose(u)), -1.5e1, 1.5e1)
ones = tf.ones([tf.shape(u)[0], tf.shape(u)[0]])
return tf.reduce_mean(tf.multiply(tf.log(ones + tf.exp(alpha * ip)) - s * alpha * ip, balance_param))
开发者ID:AllenMao,项目名称:DeepHash,代码行数:27,代码来源:dhn.py
示例17: drawGraph
def drawGraph(self, n_row, n_latent, n_col):
with tf.name_scope('matDecomp'):
self._p = tf.placeholder(tf.float32, shape=[None, n_col])
self._c = tf.placeholder(tf.float32, shape=[None, n_col])
self._lambda = tf.placeholder(tf.float32)
self._index = tf.placeholder(tf.float32, shape=[None, n_row])
self._A = tf.Variable(tf.truncated_normal([n_row, n_latent]))
self._B = tf.Variable(tf.truncated_normal([n_latent, n_col]))
self._h = tf.matmul(tf.matmul(self._index, self._A), self._B)
weighted_loss = tf.reduce_mean(tf.mul(self._c, tf.squared_difference(self._p, self._h)))
self._weighted_loss = weighted_loss
l2_A = tf.reduce_sum(tf.square(self._A))
l2_B = tf.reduce_sum(tf.square(self._B))
n_w = tf.constant(n_row * n_latent + n_latent * n_col, tf.float32)
l2 = tf.truediv(tf.add(l2_A, l2_B), n_w)
reg_term = tf.mul(self._lambda, l2)
self._loss = tf.add(weighted_loss, reg_term)
self._mask = tf.placeholder(tf.float32, shape=[n_row, n_col])
one = tf.constant(1, tf.float32)
pred = tf.cast(tf.greater_equal(tf.matmul(self._A, self._B), one), tf.float32)
cor = tf.mul(tf.cast(tf.equal(pred, self._p), tf.float32), self._c)
self._vali_err = tf.reduce_sum(tf.mul(cor, self._mask))
self._saver = tf.train.Saver([v for v in tf.all_variables() if v.name.find('matDecomp') != -1])
tf.scalar_summary('training_weighted_loss_l2', self._loss)
tf.scalar_summary('validation_weighted_loss', self._weighted_loss)
merged = tf.merge_all_summaries()
开发者ID:cning,项目名称:ehc,代码行数:29,代码来源:model.py
示例18: gauss_kl_white_diag
def gauss_kl_white_diag(q_mu, q_sqrt, num_latent):
"""
Compute the KL divergence from
q(x) = N(q_mu, q_sqrt^2)
to
p(x) = N(0, I)
We assume num_latent independent distributions, given by the columns of
q_mu and q_sqrt
q_mu is a matrix, each column contains a mean
q_sqrt is a matrix, each column represents the diagonal of a square-root
matrix of the covariance.
num_latent is an integer: the number of independent distributions (equal to
the columns of q_mu and q_sqrt).
"""
KL = 0.5 * tf.reduce_sum(tf.square(q_mu)) # Mahalanobis term
KL += -0.5 * tf.cast(tf.shape(q_sqrt)[0] * num_latent, tf.float64)
KL += -0.5 * tf.reduce_sum(tf.log(tf.square(q_sqrt))) # Log-det of q-cov
KL += 0.5 * tf.reduce_sum(tf.square(q_sqrt)) # Trace term
return KL
开发者ID:blutooth,项目名称:dgp,代码行数:25,代码来源:kullback_leiblers.py
示例19: build_graph
def build_graph(self, image_pos):
image_pos = image_pos / 128.0 - 1
z = tf.random_normal([self.batch, self.zdim], name='z_train')
z = tf.placeholder_with_default(z, [None, self.zdim], name='z')
with argscope([Conv2D, Conv2DTranspose, FullyConnected],
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02)):
with tf.variable_scope('gen'):
image_gen = self.generator(z)
tf.summary.image('generated-samples', image_gen, max_outputs=30)
alpha = tf.random_uniform(shape=[self.batch, 1, 1, 1],
minval=0., maxval=1., name='alpha')
interp = image_pos + alpha * (image_gen - image_pos)
with tf.variable_scope('discrim'):
vecpos = self.discriminator(image_pos)
vecneg = self.discriminator(image_gen)
vec_interp = self.discriminator(interp)
# the Wasserstein-GAN losses
self.d_loss = tf.reduce_mean(vecneg - vecpos, name='d_loss')
self.g_loss = tf.negative(tf.reduce_mean(vecneg), name='g_loss')
# the gradient penalty loss
gradients = tf.gradients(vec_interp, [interp])[0]
gradients = tf.sqrt(tf.reduce_sum(tf.square(gradients), [1, 2, 3]))
gradients_rms = symbolic_functions.rms(gradients, 'gradient_rms')
gradient_penalty = tf.reduce_mean(tf.square(gradients - 1), name='gradient_penalty')
add_moving_summary(self.d_loss, self.g_loss, gradient_penalty, gradients_rms)
self.d_loss = tf.add(self.d_loss, 10 * gradient_penalty)
self.collect_variables()
开发者ID:quanlzheng,项目名称:tensorpack,代码行数:35,代码来源:Improved-WGAN.py
示例20: gauss_kl_white
def gauss_kl_white(q_mu, q_sqrt, num_latent):
"""
Compute the KL divergence from
q(x) = N(q_mu, q_sqrt^2)
to
p(x) = N(0, I)
We assume num_latent independent distributions, given by the columns of
q_mu and the last dimension of q_sqrt.
q_mu is a matrix, each column contains a mean
q_sqrt is a 3D tensor, each matrix within is a lower triangular square-root
matrix of the covariance.
num_latent is an integer: the number of independent distributions (equal to
the columns of q_mu and the last dim of q_sqrt).
"""
KL = 0.5 * tf.reduce_sum(tf.square(q_mu)) # Mahalanobis term
KL += -0.5 * tf.cast(tf.shape(q_sqrt)[0] * num_latent, tf.float64)
for d in range(num_latent):
Lq = tf.batch_matrix_band_part(q_sqrt[:, :, d], -1, 0)
# Log determinant of q covariance:
KL -= 0.5 * tf.reduce_sum(tf.log(tf.square(tf.diag_part(Lq))))
KL += 0.5 * tf.reduce_sum(tf.square(Lq)) # Trace term.
return KL
开发者ID:blutooth,项目名称:dgp,代码行数:27,代码来源:kullback_leiblers.py
注:本文中的tensorflow.square函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论