本文整理汇总了Python中tensorflow.negative函数的典型用法代码示例。如果您正苦于以下问题:Python negative函数的具体用法?Python negative怎么用?Python negative使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了negative函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: get_top_nearest_neigbors
def get_top_nearest_neigbors(num_generated, nearneig, real_features_hdf5, gen_features_hdf5, maximum=False, random_select=False, save_path=None):
real_img_hdf5 = real_features_hdf5.replace('_features_', '_images_')
gen_img_hdf5 = gen_features_hdf5.replace('_features_', '_images_')
real_features_file = h5py.File(real_features_hdf5, 'r')
gen_features_file = h5py.File(gen_features_hdf5, 'r')
real_img_file = h5py.File(real_img_hdf5, 'r')
gen_img_file = h5py.File(gen_img_hdf5, 'r')
real_features = real_features_file['features']
gen_features = gen_features_file['features']
real_img = real_img_file['images']
gen_img = gen_img_file['images']
with tf.Session() as sess:
real_features = tf.constant(np.array(real_features), dtype=tf.float32)
gen_features = tf.constant(np.array(gen_features), dtype=tf.float32)
# Get Nearest Neighbors for all generated images.
gen_real_distances = tf.sqrt(tf.abs(euclidean_distance(gen_features, real_features)))
neg = tf.negative(gen_real_distances)
neg_s_distances, s_indices = tf.math.top_k(input=neg, k=nearneig, sorted=True)
s_distances = tf.negative(neg_s_distances)
# Getting the top smallest distances between Generated and Real images.
neg_s_distances1, s_indices1 = tf.math.top_k(input=neg, k=1, sorted=True)
neg_s_distances1 = tf.transpose(neg_s_distances1)
if not random_select:
if maximum:
neg_s_distances1 = tf.negative(neg_s_distances1)
neg_s_distances1, s_indices1 = tf.math.top_k(input=neg_s_distances1, k=num_generated, sorted=True)
s_indices1 = tf.transpose(s_indices1)
s_indices1 = s_indices1.eval()
else:
lin = list(range(int(gen_real_distances.shape[0])))
random.shuffle(lin)
s_indices1 = np.zeros((num_generated,1), dtype=np.int8)
s_indices1[:, 0] = lin[:num_generated]
s_indices = s_indices.eval()
s_distances = s_distances.eval()
# For the images with top smallest distances, show nearest neighbors.
height, width, channels = real_img.shape[1:]
neighbors = dict()
grid = np.zeros((num_generated*height, (nearneig+1)*width, channels))
for i, ind in enumerate(s_indices1):
ind = ind[0]
total = gen_img[ind]
neighbors[ind] = list()
for j in range(nearneig):
neighbors[ind].append((s_indices[ind,j], s_distances[ind,j]))
real = real_img[s_indices[ind,j]]/255.
total = np.concatenate([total, real], axis=1)
grid[i*height:(i+1)*height, :, :] = total
plt.imshow(grid)
if save_path is not None:
plt.imsave(save_path, grid)
return neighbors
开发者ID:AdalbertoCq,项目名称:Pathology-GAN,代码行数:60,代码来源:tools.py
示例2: logG
def logG(x, y, theta):
fv = tff(theta,y)
gv = tfg(theta,y)
mu = tf.add(y,tf.multiply(fv,gl.h))
pr = tf.subtract(x,mu)
pr2 = tf.square(pr)
gv2 = tf.square(gv)
my2 = tf.constant(2.0,dtype=gl.myftype)
mypi = tf.constant(np.pi,dtype=gl.myftype)
lgp1 = tf.negative(tf.divide(tf.log(tf.multiply(my2*mypi*gl.h,gv2)),my2))
lgp2 = tf.negative(tf.divide(pr2,tf.multiply(my2*gl.h,gv2)))
lg = tf.add(lgp1,lgp2)
return lg
开发者ID:hbhat4000,项目名称:sdeinference,代码行数:13,代码来源:tfdtqem2.py
示例3: __init__
def __init__(self, product_size, embedding_size, batch_size):
self.batch_size = batch_size
self.graph = tf.Graph()
with self.graph.as_default():
self.train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
self.train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
with tf.device('/cpu:0'):
embeddings = tf.Variable(tf.random_uniform([product_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, self.train_inputs)
output_embed = tf.nn.embedding_lookup(embeddings, self.train_labels)
weights = tf.Variable(tf.random_normal([embedding_size, embedding_size]))
bias = tf.Variable(tf.random_normal([embedding_size]))
output_layer = tf.matmul(embed, weights) + bias
self.loss = tf.reduce_sum(tf.abs(tf.add(output_layer, tf.negative(output_embed))), reduction_indices=1)
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0).minimize(self.loss)
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
self.normalized_embeddings = embeddings / norm
self.init = tf.initialize_all_variables()
开发者ID:yaoyaowd,项目名称:tensorflow_demo,代码行数:25,代码来源:nn_knn.py
示例4: test_all
def test_all(self):
with self.test_context() as session:
models = self.prepare()
likelihoods = []
for m in models:
opt = gpflow.train.ScipyOptimizer()
opt.minimize(m, maxiter=300)
neg_obj = tf.negative(m.objective)
likelihoods.append(session.run(neg_obj).squeeze())
assert_allclose(likelihoods, likelihoods[0], rtol=1e-6)
variances, lengthscales = [], []
for m in models:
if hasattr(m.kern, 'rbf'):
variances.append(m.kern.rbf.variance.read_value())
lengthscales.append(m.kern.rbf.lengthscales.read_value())
else:
variances.append(m.kern.variance.read_value())
lengthscales.append(m.kern.lengthscales.read_value())
variances, lengthscales = np.array(variances), np.array(lengthscales)
assert_allclose(variances, variances[0], 1e-5)
assert_allclose(lengthscales, lengthscales.mean(), 1e-4)
mu0, var0 = models[0].predict_y(self.Xtest)
for i, m in enumerate(models[1:]):
mu, var = m.predict_y(self.Xtest)
assert_allclose(mu, mu0, 1e-3)
assert_allclose(var, var0, 1e-4)
开发者ID:sanket-kamthe,项目名称:GPflow,代码行数:26,代码来源:test_method_equivalence.py
示例5: build_graph
def build_graph(self, image_pos):
image_pos = image_pos / 128.0 - 1
z = tf.random_normal([self.batch, self.zdim], name='z_train')
z = tf.placeholder_with_default(z, [None, self.zdim], name='z')
with argscope([Conv2D, Conv2DTranspose, FullyConnected],
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02)):
with tf.variable_scope('gen'):
image_gen = self.generator(z)
tf.summary.image('generated-samples', image_gen, max_outputs=30)
alpha = tf.random_uniform(shape=[self.batch, 1, 1, 1],
minval=0., maxval=1., name='alpha')
interp = image_pos + alpha * (image_gen - image_pos)
with tf.variable_scope('discrim'):
vecpos = self.discriminator(image_pos)
vecneg = self.discriminator(image_gen)
vec_interp = self.discriminator(interp)
# the Wasserstein-GAN losses
self.d_loss = tf.reduce_mean(vecneg - vecpos, name='d_loss')
self.g_loss = tf.negative(tf.reduce_mean(vecneg), name='g_loss')
# the gradient penalty loss
gradients = tf.gradients(vec_interp, [interp])[0]
gradients = tf.sqrt(tf.reduce_sum(tf.square(gradients), [1, 2, 3]))
gradients_rms = symbolic_functions.rms(gradients, 'gradient_rms')
gradient_penalty = tf.reduce_mean(tf.square(gradients - 1), name='gradient_penalty')
add_moving_summary(self.d_loss, self.g_loss, gradient_penalty, gradients_rms)
self.d_loss = tf.add(self.d_loss, 10 * gradient_penalty)
self.collect_variables()
开发者ID:quanlzheng,项目名称:tensorpack,代码行数:35,代码来源:Improved-WGAN.py
示例6: cross_entropy_loss
def cross_entropy_loss(y, yhat):
"""
Compute the cross entropy loss in tensorflow.
The loss should be summed over the current minibatch.
y is a one-hot tensor of shape (n_samples, n_classes) and yhat is a tensor
of shape (n_samples, n_classes). y should be of dtype tf.int32, and yhat should
be of dtype tf.float32.
The functions tf.to_float, tf.reduce_sum, and tf.log might prove useful. (Many
solutions are possible, so you may not need to use all of these functions).
Note: You are NOT allowed to use the tensorflow built-in cross-entropy
functions.
Args:
y: tf.Tensor with shape (n_samples, n_classes). One-hot encoded.
yhat: tf.Tensorwith shape (n_sample, n_classes). Each row encodes a
probability distribution and should sum to 1.
Returns:
out: tf.Tensor with shape (1,) (Scalar output). You need to construct this
tensor in the problem.
"""
### YOUR CODE HERE
l_yhat = tf.log(yhat) # log yhat
product = tf.multiply(tf.to_float(y), l_yhat) # multiply element-wise
out = tf.negative(tf.reduce_sum(product)) # negative summation to scalar
### END YOUR CODE
return out
开发者ID:ziyaochen,项目名称:CS224n,代码行数:31,代码来源:q1_softmax.py
示例7: gabor
def gabor(n_values=32, sigma=1.0, mean=0.0):
x = tf.linspace(-3.0, 3.0, n_values)
z = (tf.exp(tf.negative(tf.pow(x - mean, 2.0)/ (2.0 * tf.pow(sigma, 2.0)))) * (1.0 / (sigma * tf.sqrt(2.0 * 3.145))))
gauss_kernel = tf.matmul(tf.reshape(z, [n_values, 1]), tf.reshape(z,[1, n_values]))
x = tf.reshape(tf.sin(tf.linspace(-3.0, 3.0, n_values)), [n_values, 1])
y = tf.reshape(tf.ones_like(x), [1, n_values])
gabor_kernel = tf.multiply(tf.matmul(x ,y), gauss_kernel)
return gabor_kernel
开发者ID:stonecoder19,项目名称:machine_learning,代码行数:8,代码来源:basics_tensor.py
示例8: build_graph
def build_graph(self, graph):
self.xtr = tf.placeholder(dtype=tf.float32, shape=[None, 784])
self.xte = tf.placeholder(dtype=tf.float32, shape=[784]) # one vector compares with all in self.xtr
self.distance = tf.reduce_sum(tf.abs(tf.add(self.xtr, tf.negative(self.xte))), reduction_indices=1)
self.pred = tf.argmin(self.distance, 0)
self.global_step_t = tf.Variable(0, trainable=False, name='global_step_t')
return graph
开发者ID:jamescfli,项目名称:PythonTest,代码行数:9,代码来源:make_nearest_neighbour_model.py
示例9: integrandmat
def integrandmat(inx, iny, th):
my2 = tf.constant(2.0,gl.myftype)
tfmu = tf.add(iny,tf.multiply(tff(theta=th,x=iny),gl.h))
tfsig = tf.multiply(tf.sqrt(gl.h),tfg(theta=th,x=iny))
tfc0 = tf.reciprocal(tf.multiply(tf.sqrt(tf.multiply(my2,tf.constant(np.pi,dtype=gl.myftype))),tfsig))
tfnumer = tf.negative(tf.square(tf.subtract(inx,tfmu)))
tfdenom = tf.multiply(my2,tf.square(tfsig))
tfprop = tf.multiply(tfc0,tf.exp(tf.divide(tfnumer,tfdenom)))
return tfprop
开发者ID:hbhat4000,项目名称:sdeinference,代码行数:9,代码来源:tfdtqem2.py
示例10: calculate_loss
def calculate_loss(self, predictions, labels, **unused_params):
with tf.name_scope("loss_xent"):
epsilon = 10e-6
alpha = FLAGS.alpha
float_labels = tf.cast(labels, tf.float32)
cross_entropy_loss = 2*(alpha*float_labels * tf.log(predictions + epsilon) + (1-alpha)*(
1 - float_labels) * tf.log(1 - predictions + epsilon))
cross_entropy_loss = tf.negative(cross_entropy_loss)
return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1))
开发者ID:lvaleriu,项目名称:Youtube-8M-WILLOW,代码行数:10,代码来源:losses.py
示例11: find_top_nearest_neigbors
def find_top_nearest_neigbors(generated_list, nearneig, real_features_hdf5, gen_features_hdf5, maximum=False, save_path=None):
real_img_hdf5 = real_features_hdf5.replace('_features_', '_images_')
gen_img_hdf5 = gen_features_hdf5.replace('_features_', '_images_')
real_features_file = h5py.File(real_features_hdf5, 'r')
gen_features_file = h5py.File(gen_features_hdf5, 'r')
real_img_file = h5py.File(real_img_hdf5, 'r')
gen_img_file = h5py.File(gen_img_hdf5, 'r')
real_features = real_features_file['features']
gen_features = gen_features_file['features']
real_img = real_img_file['images']
gen_img = gen_img_file['images']
with tf.Session() as sess:
real_features = tf.constant(np.array(real_features), dtype=tf.float32)
gen_features = tf.constant(np.array(gen_features), dtype=tf.float32)
# Get Nearest Neighbors for all generated images.
gen_real_distances = tf.sqrt(tf.abs(euclidean_distance(gen_features, real_features)))
neg = tf.negative(gen_real_distances)
neg_s_distances, s_indices = tf.math.top_k(input=neg, k=nearneig, sorted=True)
s_distances = tf.negative(neg_s_distances)
s_indices = s_indices.eval()
s_distances = s_distances.eval()
# For the images with top smallest distances, show nearest neighbors.
height, width, channels = real_img.shape[1:]
neighbors = dict()
grid = np.zeros((len(generated_list)*height, (nearneig+1)*width, channels))
for i, ind in enumerate(generated_list):
total = gen_img[ind]
neighbors[ind] = list()
for j in range(nearneig):
neighbors[ind].append((s_indices[ind,j], s_distances[ind,j]))
real = real_img[s_indices[ind,j]]/255.
total = np.concatenate([total, real], axis=1)
grid[i*height:(i+1)*height, :, :] = total
plt.imshow(grid)
if save_path is not None:
plt.imsave(save_path, grid)
return neighbors
开发者ID:AdalbertoCq,项目名称:Pathology-GAN,代码行数:42,代码来源:tools.py
示例12: create_network
def create_network(self):
networks = {}
with tf.variable_scope('q_net'):
# Input parameters
x = networks['x'] = tf.placeholder(tf.float32, \
shape=[None, self.states], name='states')
u = networks['u'] = tf.placeholder(tf.float32, \
shape=[None, self.actions], name='actions')
# hidden layers
init = 1./self.hidden_nodes/self.actions
hid = tf.concat([x, u], axis=1)
hid = fully_connected(hid, self.hidden_nodes, \
weights_initializer=tf.random_normal_initializer(init, init/5), \
biases_initializer=tf.random_normal_initializer(init, init/5), \
activation_fn=tf.tanh)
for i in range(self.hidden_layers-1):
hid = fully_connected(hid, self.hidden_nodes, \
weights_initializer=tf.random_normal_initializer(init, init/5), \
biases_initializer=tf.random_normal_initializer(init, init/5), \
activation_fn=tf.nn.relu)
# Output parameters
pos_layer = fully_connected(hid, 1, \
weights_initializer=tf.random_normal_initializer(1./self.actions, 0.1), \
biases_initializer=tf.random_normal_initializer(1./self.actions, 0.1))
neg_layer = tf.negative(fully_connected(hid, 1, \
weights_initializer=tf.random_normal_initializer(1./self.actions, 0.1), \
biases_initializer=tf.random_normal_initializer(1./self.actions, 0.1)))
Q = networks['Q'] = pos_layer + neg_layer
# Describe loss functions.
y_ = networks['y_'] = tf.placeholder(tf.float32, [None, 1], name='y_i')
# Tensor outputs to calculate y_i values
networks['reward'] = tf.placeholder(tf.float32, [None, 1], name='reward')
networks['y_calc'] = tf.add(networks['reward'], tf.multiply(Q, self.gamma))
networks['mse'] = tf.reduce_mean(tf.squared_difference(y_, \
Q), name='mse')
networks['cross_entropy'] = -tf.reduce_sum(y_ * tf.log(Q), name='cross_entropy')
networks['optimize'] = tf.train.AdamOptimizer(\
learning_rate=self.alpha) \
.minimize(networks['mse'])
self.tensors = networks
return
开发者ID:dtimm,项目名称:mlnd-openai-gym,代码行数:54,代码来源:QLAgent.py
示例13: _GradientReversalGrad
def _GradientReversalGrad(_, grad):
"""The gradients for `gradient_reversal`.
Args:
_: The `gradient_reversal` `Operation` that we are differentiating,
which we can use to find the inputs and outputs of the original op.
grad: Gradient with respect to the output of the `gradient_reversal` op.
Returns:
Gradient with respect to the input of `gradient_reversal`, which is simply
the negative of the input gradient.
"""
return tf.negative(grad)
开发者ID:812864539,项目名称:models,代码行数:14,代码来源:grl_op_grads.py
示例14: k_nearest_neighbor_tf_part
def k_nearest_neighbor_tf_part(x, y, k):
x_samples = tf.shape(x)[0]
y_samples = tf.shape(y)[0]
xx_d = euclidean_distance(x, x)
yy_d = euclidean_distance(y, y)
xy_d = euclidean_distance(x, y)
labels = tf.concat([tf.ones((x_samples,1)), tf.zeros((y_samples,1))], axis=0)
x_dist = tf.concat([xx_d, xy_d], axis=-1)
y_dist = tf.concat([tf.transpose(xy_d), yy_d], axis=-1)
total_dist = tf.concat([x_dist, y_dist], axis=0)
'''
x1x1 x1x2 ... x1x100 | x1y1 x1xy2 ... x1y200
... | ...
x100x1 x100x2 ... x100x100 | x100y1 x100xy2 ... x100y200
________________________________________________________
y1x1 y1x2 ... y1x100 | y1y1 y1xy2 ... y1y200
... | ...
y100x1 y100x2 ... y100x100 | y100y1 y1xy2 ... y100y100
... | ...
y200x1 y200x2 ... y200x100 | y200y1 y200xy2 ... y200y200
Diagonals of this tensor are the distance for the vector with itself.
'''
total_dist = tf.sqrt(tf.abs(total_dist))
inf_eye = tf.eye(tf.shape(total_dist)[0])*1e+7
#All element positive now, no smallest elements functions.
all_dist = tf.math.add(inf_eye, total_dist)
neg_all_dist = tf.negative(all_dist)
values, indices = tf.math.top_k(input=neg_all_dist, k=k, sorted=True)
values = tf.negative(values)
return indices, labels
开发者ID:AdalbertoCq,项目名称:Pathology-GAN,代码行数:36,代码来源:k_nearest_neighbor.py
示例15: GMM_M_Step
def GMM_M_Step(X, Gama, ClusterNo, name='GMM_Statistics', **kwargs):
D, h, s = tf.split(X, [1,1,1], axis=3)
WXd = tf.multiply(Gama, tf.tile(D ,[1,1,1,ClusterNo]))
WXa = tf.multiply(Gama, tf.tile(h ,[1,1,1,ClusterNo]))
WXb = tf.multiply(Gama, tf.tile(s ,[1,1,1,ClusterNo]))
S = tf.reduce_sum(tf.reduce_sum(Gama, axis=1), axis=1)
S = tf.add(S, tf.contrib.keras.backend.epsilon())
S = tf.reshape(S,[1, ClusterNo])
M_d = tf.div(tf.reduce_sum(tf.reduce_sum(WXd, axis=1), axis=1) , S)
M_a = tf.div(tf.reduce_sum(tf.reduce_sum(WXa, axis=1), axis=1) , S)
M_b = tf.div(tf.reduce_sum(tf.reduce_sum(WXb, axis=1), axis=1) , S)
Mu = tf.split(tf.concat([M_d, M_a, M_b],axis=0), ClusterNo, 1)
Norm_d = tf.squared_difference(D, tf.reshape(M_d,[1, ClusterNo]))
Norm_h = tf.squared_difference(h, tf.reshape(M_a,[1, ClusterNo]))
Norm_s = tf.squared_difference(s, tf.reshape(M_b,[1, ClusterNo]))
WSd = tf.multiply(Gama, Norm_d)
WSh = tf.multiply(Gama, Norm_h)
WSs = tf.multiply(Gama, Norm_s)
S_d = tf.sqrt(tf.div(tf.reduce_sum(tf.reduce_sum(WSd, axis=1), axis=1) , S))
S_h = tf.sqrt(tf.div(tf.reduce_sum(tf.reduce_sum(WSh, axis=1), axis=1) , S))
S_s = tf.sqrt(tf.div(tf.reduce_sum(tf.reduce_sum(WSs, axis=1), axis=1) , S))
Std = tf.split(tf.concat([S_d, S_h, S_s],axis=0), ClusterNo, 1)
dist = list()
for k in range(0, ClusterNo):
dist.append(tf.contrib.distributions.MultivariateNormalDiag(tf.reshape(Mu[k],[1,3]), tf.reshape(Std[k],[1,3])))
PI = tf.split(Gama, ClusterNo, axis=3)
Prob0 = list()
for k in range(0, ClusterNo):
Prob0.append(tf.multiply(tf.squeeze(dist[k].prob(X)), tf.squeeze(PI[k])))
Prob = tf.convert_to_tensor(Prob0, dtype=tf.float32)
Prob = tf.minimum(tf.add(tf.reduce_sum(Prob, axis=0), tf.contrib.keras.backend.epsilon()), tf.constant(1.0, tf.float32))
Log_Prob = tf.negative(tf.log(Prob))
Log_Likelihood = tf.reduce_mean(Log_Prob)
return Log_Likelihood, Mu, Std
开发者ID:FarhadZanjani,项目名称:Histopathology-Stain-Color-Normalization,代码行数:47,代码来源:GMM_M_Step.py
示例16: testGradientReversalOp
def testGradientReversalOp(self):
with tf.Graph().as_default():
with self.test_session():
# Test that in forward prop, gradient reversal op acts as the
# identity operation.
examples = tf.constant([5.0, 4.0, 3.0, 2.0, 1.0])
output = grl_ops.gradient_reversal(examples)
expected_output = examples
self.assertAllEqual(output.eval(), expected_output.eval())
# Test that shape inference works as expected.
self.assertAllEqual(output.get_shape(), expected_output.get_shape())
# Test that in backward prop, gradient reversal op multiplies
# gradients by -1.
examples = tf.constant([[1.0]])
w = tf.get_variable(name='w', shape=[1, 1])
b = tf.get_variable(name='b', shape=[1])
init_op = tf.global_variables_initializer()
init_op.run()
features = tf.nn.xw_plus_b(examples, w, b)
# Construct two outputs: features layer passes directly to output1, but
# features layer passes through a gradient reversal layer before
# reaching output2.
output1 = features
output2 = grl_ops.gradient_reversal(features)
gold = tf.constant([1.0])
loss1 = gold - output1
loss2 = gold - output2
opt = tf.train.GradientDescentOptimizer(learning_rate=0.01)
grads_and_vars_1 = opt.compute_gradients(loss1,
tf.trainable_variables())
grads_and_vars_2 = opt.compute_gradients(loss2,
tf.trainable_variables())
self.assertAllEqual(len(grads_and_vars_1), len(grads_and_vars_2))
for i in range(len(grads_and_vars_1)):
g1 = grads_and_vars_1[i][0]
g2 = grads_and_vars_2[i][0]
# Verify that gradients of loss1 are the negative of gradients of
# loss2.
self.assertAllEqual(tf.negative(g1).eval(), g2.eval())
开发者ID:812864539,项目名称:models,代码行数:41,代码来源:grl_ops_test.py
示例17: qfun
def qfun(theta, allout, init, final):
q = []
# first term in the summation (j=1 case)
part1 = logG(gl.grid,init,theta)
q.append(tf.reduce_sum(tf.multiply(part1,allout[1][:,0]))*gl.k)
# last term in the summation (j=F case)
part2 = logG(final,gl.grid,theta)
q.append(tf.reduce_sum(tf.multiply(part2,allout[2][:,0]))*gl.k)
# all intermediate terms
part3 = logG(gl.gridx,gl.gridy,theta)
# for j in range(gl.numsteps-2):
# q.append(tf.tensordot(part3,allout[3][j,:],axes=[[0,1],[0,1]])*gl.k*gl.k)
# test = tf.add_n(test)
q.append(tf.reduce_sum(tf.multiply(tf.expand_dims(part3,0),allout[3]))*gl.k*gl.k)
qout = tf.negative(tf.add_n(q))
return qout
开发者ID:hbhat4000,项目名称:sdeinference,代码行数:22,代码来源:tfdtqem2.py
示例18: gauss
def gauss(mean, stddev, ksize):
"""Use Tensorflow to compute a Gaussian Kernel.
Parameters
----------
mean : float
Mean of the Gaussian (e.g. 0.0).
stddev : float
Standard Deviation of the Gaussian (e.g. 1.0).
ksize : int
Size of kernel (e.g. 16).
Returns
-------
kernel : np.ndarray
Computed Gaussian Kernel using Tensorflow.
"""
g = tf.Graph()
with tf.Session(graph=g):
x = tf.linspace(-3.0, 3.0, ksize)
z = (tf.exp(tf.negative(tf.pow(x - mean, 2.0) /
(2.0 * tf.pow(stddev, 2.0)))) *
(1.0 / (stddev * tf.sqrt(2.0 * 3.1415))))
return z.eval()
开发者ID:gojira,项目名称:CADL,代码行数:24,代码来源:utils.py
示例19: _word_dropout
def _word_dropout(words, input_keep_prob):
"""Drops words with probability 1 - input_keep_prob.
Args:
words: a list of lemmas from the paths.
input_keep_prob: the probability to keep the word.
Returns:
The revised list where some of the words are <UNK>ed.
"""
# Create the mask: (-1) to drop, 1 to keep
prob = tf.random_uniform(tf.shape(words), 0, 1)
condition = tf.less(prob, (1 - input_keep_prob))
mask = tf.where(condition,
tf.negative(tf.ones_like(words)), tf.ones_like(words))
# We need to keep zeros (<PAD>), and change other numbers to 1 (<UNK>)
# if their mask is -1. First, we multiply the mask and the words.
# Zeros will stay zeros, and words to drop will become negative.
# Then, we change negative values to 1.
masked_words = tf.multiply(mask, words)
condition = tf.less(masked_words, 0)
dropped_words = tf.where(condition, tf.ones_like(words), words)
return dropped_words
开发者ID:waterson,项目名称:models,代码行数:24,代码来源:path_model.py
示例20: _build_likelihood
def _build_likelihood(self):
"""
Construct a tensorflow function to compute the bound on the marginal
likelihood. For a derivation of the terms in here, see the associated
SGPR notebook.
"""
num_inducing = len(self.feature)
num_data = tf.cast(tf.shape(self.Y)[0], settings.float_type)
output_dim = tf.cast(tf.shape(self.Y)[1], settings.float_type)
err = self.Y - self.mean_function(self.X)
Kdiag = self.kern.Kdiag(self.X)
Kuf = self.feature.Kuf(self.kern, self.X)
Kuu = self.feature.Kuu(self.kern, jitter=settings.numerics.jitter_level)
L = tf.cholesky(Kuu)
sigma = tf.sqrt(self.likelihood.variance)
# Compute intermediate matrices
A = tf.matrix_triangular_solve(L, Kuf, lower=True) / sigma
AAT = tf.matmul(A, A, transpose_b=True)
B = AAT + tf.eye(num_inducing, dtype=settings.float_type)
LB = tf.cholesky(B)
Aerr = tf.matmul(A, err)
c = tf.matrix_triangular_solve(LB, Aerr, lower=True) / sigma
# compute log marginal bound
bound = -0.5 * num_data * output_dim * np.log(2 * np.pi)
bound += tf.negative(output_dim) * tf.reduce_sum(tf.log(tf.matrix_diag_part(LB)))
bound -= 0.5 * num_data * output_dim * tf.log(self.likelihood.variance)
bound += -0.5 * tf.reduce_sum(tf.square(err)) / self.likelihood.variance
bound += 0.5 * tf.reduce_sum(tf.square(c))
bound += -0.5 * output_dim * tf.reduce_sum(Kdiag) / self.likelihood.variance
bound += 0.5 * output_dim * tf.reduce_sum(tf.matrix_diag_part(AAT))
return bound
开发者ID:vincentadam87,项目名称:GPflow,代码行数:36,代码来源:sgpr.py
注:本文中的tensorflow.negative函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论