本文整理汇总了Python中tensorflow.subtract函数的典型用法代码示例。如果您正苦于以下问题:Python subtract函数的具体用法?Python subtract怎么用?Python subtract使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了subtract函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: killRegions
def killRegions(anchors, image_attr, axis=-1):
""" Prune the anchors so that only those entirely within the image remain
This function is the RPN-training analog of clipRegions, just more murderous
Output:
The anchors that survive the slaughter, along with their indices
"""
with tf.device("/cpu:0"):
# Assumes input of shape (numBaseAnchors, feature_h, feature_w, 4)
# Or, was previously as above but then got flattened to (-1,4)
anchors = tf.reshape(anchors, [-1, 4], name="flattened_anchors")
x1, y1, x2, y2 = tf.unstack(anchors, num=4, axis=axis)
zero = tf.constant([0.])
max_x = [tf.subtract(image_attr[1] * image_attr[2], tf.constant([1.]),
name="murder_img_w")]
max_y = [tf.subtract(image_attr[0] * image_attr[2], tf.constant([1.]),
name="murder_img_h")]
x1_valid = x1 >= zero
x2_valid = x2 <= max_x
y1_valid = y1 >= zero
y2_valid = y2 <= max_y
anchor_valid = x1_valid and x2_valid and y1_valid and y2_valid
valid_indices = tf.where(anchor_valid, name="surviving_indices")
return tf.gather_nd(anchors, valid_indices, name="surviving_anchors"), valid_indices
开发者ID:PentaHiggs,项目名称:fantastic-pancakes,代码行数:31,代码来源:rpn.py
示例2: logits_to_log_prob
def logits_to_log_prob(logits):
"""Computes log probabilities using numerically stable trick.
This uses two numerical stability tricks:
1) softmax(x) = softmax(x - c) where c is a constant applied to all
arguments. If we set c = max(x) then the softmax is more numerically
stable.
2) log softmax(x) is not numerically stable, but we can stabilize it
by using the identity log softmax(x) = x - log sum exp(x)
Args:
logits: Tensor of arbitrary shape whose last dimension contains logits.
Returns:
A tensor of the same shape as the input, but with corresponding log
probabilities.
"""
with tf.variable_scope('log_probabilities'):
reduction_indices = len(logits.shape.as_list()) - 1
max_logits = tf.reduce_max(
logits, reduction_indices=reduction_indices, keep_dims=True)
safe_logits = tf.subtract(logits, max_logits)
sum_exp = tf.reduce_sum(
tf.exp(safe_logits),
reduction_indices=reduction_indices,
keep_dims=True)
log_probs = tf.subtract(safe_logits, tf.log(sum_exp))
return log_probs
开发者ID:812864539,项目名称:models,代码行数:29,代码来源:utils.py
示例3: attention_mechanism_parallel
def attention_mechanism_parallel(self,c_full,m,q,i):
""" parallel implemtation of gate function given a list of candidate sentence, a query, and previous memory.
Input:
c_full: candidate fact. shape:[batch_size,story_length,hidden_size]
m: previous memory. shape:[batch_size,hidden_size]
q: question. shape:[batch_size,hidden_size]
Output: a scalar score (in batch). shape:[batch_size,story_length]
"""
q=tf.expand_dims(q,axis=1) #[batch_size,1,hidden_size]
m=tf.expand_dims(m,axis=1) #[batch_size,1,hidden_size]
# 1.define a large feature vector that captures a variety of similarities between input,memory and question vector: z(c,m,q)
c_q_elementwise=tf.multiply(c_full,q) #[batch_size,story_length,hidden_size]
c_m_elementwise=tf.multiply(c_full,m) #[batch_size,story_length,hidden_size]
c_q_minus=tf.abs(tf.subtract(c_full,q)) #[batch_size,story_length,hidden_size]
c_m_minus=tf.abs(tf.subtract(c_full,m)) #[batch_size,story_length,hidden_size]
# c_transpose Wq
c_w_q=self.x1Wx2_parallel(c_full,q,"c_w_q"+str(i)) #[batch_size,story_length,hidden_size]
c_w_m=self.x1Wx2_parallel(c_full,m,"c_w_m"+str(i)) #[batch_size,story_length,hidden_size]
# c_transposeWm
q_tile=tf.tile(q,[1,self.story_length,1]) #[batch_size,story_length,hidden_size]
m_tile=tf.tile(m,[1,self.story_length,1]) #[batch_size,story_length,hidden_size]
z=tf.concat([c_full,m_tile,q_tile,c_q_elementwise,c_m_elementwise,c_q_minus,c_m_minus,c_w_q,c_w_m],2) #[batch_size,story_length,hidden_size*9]
# 2. two layer feed foward
g=tf.layers.dense(z,self.hidden_size*3,activation=tf.nn.tanh) #[batch_size,story_length,hidden_size*3]
g=tf.layers.dense(g,1,activation=tf.nn.sigmoid) #[batch_size,story_length,1]
g=tf.squeeze(g,axis=2) #[batch_size,story_length]
return g
开发者ID:AmjadHisham,项目名称:text_classification,代码行数:28,代码来源:a8_dynamic_memory_network.py
示例4: __build
def __build(self):
self.__init_global_epoch()
self.__init_global_step()
self.__init_input()
with tf.name_scope('Preprocessing'):
red, green, blue = tf.split(self.X, num_or_size_splits=3, axis=3)
preprocessed_input = tf.concat([
tf.subtract(blue, ShuffleNet.MEAN[0]) * ShuffleNet.NORMALIZER,
tf.subtract(green, ShuffleNet.MEAN[1]) * ShuffleNet.NORMALIZER,
tf.subtract(red, ShuffleNet.MEAN[2]) * ShuffleNet.NORMALIZER,
], 3)
x_padded = tf.pad(preprocessed_input, [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT")
conv1 = conv2d('conv1', x=x_padded, w=None, num_filters=self.output_channels['conv1'], kernel_size=(3, 3),
stride=(2, 2), l2_strength=self.args.l2_strength, bias=self.args.bias,
batchnorm_enabled=self.args.batchnorm_enabled, is_training=self.is_training,
activation=tf.nn.relu, padding='VALID')
padded = tf.pad(conv1, [[0, 0], [0, 1], [0, 1], [0, 0]], "CONSTANT")
max_pool = max_pool_2d(padded, size=(3, 3), stride=(2, 2), name='max_pool')
stage2 = self.__stage(max_pool, stage=2, repeat=3)
stage3 = self.__stage(stage2, stage=3, repeat=7)
stage4 = self.__stage(stage3, stage=4, repeat=3)
global_pool = avg_pool_2d(stage4, size=(7, 7), stride=(1, 1), name='global_pool', padding='VALID')
logits_unflattened = conv2d('fc', global_pool, w=None, num_filters=self.args.num_classes,
kernel_size=(1, 1),
l2_strength=self.args.l2_strength,
bias=self.args.bias,
is_training=self.is_training)
self.logits = flatten(logits_unflattened)
self.__init_output()
开发者ID:soxueren,项目名称:ShuffleNet-tensorflow,代码行数:32,代码来源:model.py
示例5: getLoss
def getLoss(trueCosSim, falseCosSim, margin):
zero = tf.fill(tf.shape(trueCosSim), 0.0)
tfMargin = tf.fill(tf.shape(trueCosSim), margin)
with tf.name_scope("loss"):
losses = tf.maximum(zero, tf.subtract(tfMargin, tf.subtract(trueCosSim, falseCosSim)))
loss = tf.reduce_sum(losses)
return loss
开发者ID:sjqzhang,项目名称:QA,代码行数:7,代码来源:qaLSTMNet.py
示例6: r2_op
def r2_op(predictions, targets):
""" r2_op.
An op that calculates the standard error.
Examples:
```python
input_data = placeholder(shape=[None, 784])
y_pred = my_network(input_data) # Apply some ops
y_true = placeholder(shape=[None, 10]) # Labels
stderr_op = r2_op(y_pred, y_true)
# Calculate standard error by feeding data X and labels Y
std_error = sess.run(stderr_op, feed_dict={input_data: X, y_true: Y})
```
Arguments:
predictions: `Tensor`.
targets: `Tensor`.
Returns:
`Float`. The standard error.
"""
with tf.name_scope('StandardError'):
a = tf.reduce_sum(tf.square(tf.subtract(targets, predictions)))
b = tf.reduce_sum(tf.square(tf.subtract(targets, tf.reduce_mean(targets))))
return tf.subtract(1.0, tf.divide(a, b))
开发者ID:tflearn,项目名称:tflearn,代码行数:28,代码来源:metrics.py
示例7: tf_image_processing
def tf_image_processing(tf_images, basenet, crop_size, distort=False, hp_filter=False):
if len(tf_images.shape) == 3:
tf_images = tf.expand_dims(tf_images, -1)
if basenet == 'sketchanet':
mean_value = 250.42
tf_images = tf.subtract(tf_images, mean_value)
if distort:
print("Distorting photos")
FLAGS.crop_size = crop_size
FLAGS.dist_chn_size = 1
tf_images = data_augmentation(tf_images)
else:
tf_images = tf.image.resize_images(tf_images, (crop_size, crop_size))
elif basenet in ['inceptionv1', 'inceptionv3', 'gen_cnn']:
tf_images = tf.divide(tf_images, 255.0)
tf_images = tf.subtract(tf_images, 0.5)
tf_images = tf.multiply(tf_images, 2.0)
if int(tf_images.shape[-1]) != 3:
tf_images = tf.concat([tf_images, tf_images, tf_images], axis=-1)
if distort:
print("Distorting photos")
FLAGS.crop_size = crop_size
FLAGS.dist_chn_size = 3
tf_images = data_augmentation(tf_images)
# Display the training images in the visualizer.
# tf.image_summary('input_images', input_images)
else:
tf_images = tf.image.resize_images(tf_images, (crop_size, crop_size))
if hp_filter:
tf_images = tf_high_pass_filter(tf_images)
return tf_images
开发者ID:seindlut,项目名称:deep_p2s,代码行数:33,代码来源:tf_data_work.py
示例8: triplet_loss
def triplet_loss(y_true, y_pred, alpha = 0.2):
"""
Implementation of the triplet loss as defined by formula
Arguments:
y_true -- true labels, required when you define a loss in Keras, you don't need it in this function.
y_pred -- python list containing three objects:
anchor -- the encodings for the anchor images, of shape (None, 128)
positive -- the encodings for the positive images, of shape (None, 128)
negative -- the encodings for the negative images, of shape (None, 128)
Returns:
loss -- real number, value of the loss
"""
anchor, positive, negative = y_pred[0], y_pred[1], y_pred[2]
### START CODE HERE ### (≈ 4 lines)
# Step 1: Compute the (encoding) distance between the anchor and the positive, you will need to sum over axis=-1
pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)))
# Step 2: Compute the (encoding) distance between the anchor and the negative, you will need to sum over axis=-1
neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)))
# Step 3: subtract the two previous distances and add alpha.
basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)
# Step 4: Take the maximum of basic_loss and 0.0. Sum over the training examples.
loss = tf.reduce_sum(tf.maximum(basic_loss, 0.))
### END CODE HERE ###
return loss
开发者ID:sunlinyu1993,项目名称:Machine-Learning-Toolbox,代码行数:29,代码来源:triplet_loss.py
示例9: addLayer
def addLayer(self, n, activation_function = 'tanh', include_bias = False, sd = 0.35, dropout = 0, normalization = None, weights = None):
"""
:Description:
Adds a layer to the network, including a weight tensor and an activation tensor.
:Input parameters:
activation_function: type of activation function to be applied to each unit (string)
include_bias: if true, a column of ones will be added to the weights (boolean)
sd: standard deviation of the zero-mean gaussian from which the weights will be drawn (float)
dropout: the chance with which each weight will be set to zero for a given training step (float)
normalization: the type of normalization imposed on the layer activations. Can be
a) 'softmax' for softmax normalization
b) 'Shift' for de-meaning
c) 'ShiftScale' for de-meaning and standard deviation normalization
weights: if provided, will be used as weights of layer instead of drawing from gaussian (tensor)
"""
""" initialize weights and use them to calculate layer activations """
if weights: # if weights are provided, use those
weights = tf.mul(tf.ones(weights.shape),weights)
activations = tf.matmul(self.data, weights) if not self.weights else tf.matmul(self.Activations[-1], weights)
elif not self.Weights: # else if first layer
weights = tf.Variable(tf.random_normal([self.data.get_shape()[1].value, n], stddev = sd))
weights = tf.concat(1,[weights,tf.ones([weights.get_shape()[0],1])]) if include_bias else weights
activations = tf.matmul(self.data, weights)
else: # for every other layer
weights = tf.Variable(tf.random_normal([self.Activations[-1].get_shape()[-1].value, n], stddev = sd))
weights = tf.concat(1,[weights,tf.ones([weights.get_shape()[0],1])]) if include_bias else weights
activations = tf.matmul(self.Activations[-1], weights)
self.Weights.append(weights)
self.Activations.append(self.applyActivation(activations, activation_function)) # apply activation function on raw activations
""" add dropout and/or normalization """
if dropout:
self.Activations.append(tf.nn.dropout(self.Activations[-1], dropout))
if normalization == 'softmax': # for softmax normalization
self.Activations.append(tf.nn.softmax(self.Activations[-1]))
elif normalization == 'Shift': # for de-meaning
self.Activations[-1] = tf.subtract(self.Activations[-1],tf.reduce_mean(self.Activations[-1]))
elif normalization == 'ShiftScale': # for de-meaning & and rescaling by variance
mu = tf.reduce_mean(self.Activations[-1])
diff = tf.subtract(self.Activations[-1],mu)
self.Activations[-1] = tf.div(diff,tf.reduce_sum(tf.mul(diff,diff)))
开发者ID:CogSciUOS,项目名称:Conceptors,代码行数:60,代码来源:DeepNN.py
示例10: __init__
def __init__(
self, sequence_length, vocab_size, embedding_size, hidden_units, l2_reg_lambda, batch_size, trainableEmbeddings):
# Placeholders for input, output and dropout
self.input_x1 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x1")
self.input_x2 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x2")
self.input_y = tf.placeholder(tf.float32, [None], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0, name="l2_loss")
# Embedding layer
with tf.name_scope("embedding"):
self.W = tf.Variable(
tf.constant(0.0, shape=[vocab_size, embedding_size]),
trainable=trainableEmbeddings,name="W")
self.embedded_words1 = tf.nn.embedding_lookup(self.W, self.input_x1)
self.embedded_words2 = tf.nn.embedding_lookup(self.W, self.input_x2)
print self.embedded_words1
# Create a convolution + maxpool layer for each filter size
with tf.name_scope("output"):
self.out1=self.stackedRNN(self.embedded_words1, self.dropout_keep_prob, "side1", embedding_size, sequence_length, hidden_units)
self.out2=self.stackedRNN(self.embedded_words2, self.dropout_keep_prob, "side2", embedding_size, sequence_length, hidden_units)
self.distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(self.out1,self.out2)),1,keep_dims=True))
self.distance = tf.div(self.distance, tf.add(tf.sqrt(tf.reduce_sum(tf.square(self.out1),1,keep_dims=True)),tf.sqrt(tf.reduce_sum(tf.square(self.out2),1,keep_dims=True))))
self.distance = tf.reshape(self.distance, [-1], name="distance")
with tf.name_scope("loss"):
self.loss = self.contrastive_loss(self.input_y,self.distance, batch_size)
#### Accuracy computation is outside of this class.
with tf.name_scope("accuracy"):
self.temp_sim = tf.subtract(tf.ones_like(self.distance),tf.rint(self.distance), name="temp_sim") #auto threshold 0.5
correct_predictions = tf.equal(self.temp_sim, self.input_y)
self.accuracy=tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
开发者ID:kumamandala,项目名称:deep-siamese-text-similarity-1,代码行数:34,代码来源:siamese_network_semantic.py
示例11: cosineface_losses
def cosineface_losses(embedding, labels, out_num, w_init=None, s=30., m=0.4):
'''
:param embedding: the input embedding vectors
:param labels: the input labels, the shape should be eg: (batch_size, 1)
:param s: scalar value, default is 30
:param out_num: output class num
:param m: the margin value, default is 0.4
:return: the final cacualted output, this output is send into the tf.nn.softmax directly
'''
with tf.variable_scope('cosineface_loss'):
# inputs and weights norm
embedding_norm = tf.norm(embedding, axis=1, keep_dims=True)
embedding = tf.div(embedding, embedding_norm, name='norm_embedding')
weights = tf.get_variable(name='embedding_weights', shape=(embedding.get_shape().as_list()[-1], out_num),
initializer=w_init, dtype=tf.float32)
weights_norm = tf.norm(weights, axis=0, keep_dims=True)
weights = tf.div(weights, weights_norm, name='norm_weights')
# cos_theta - m
cos_t = tf.matmul(embedding, weights, name='cos_t')
cos_t_m = tf.subtract(cos_t, m, name='cos_t_m')
mask = tf.one_hot(labels, depth=out_num, name='one_hot_mask')
inv_mask = tf.subtract(1., mask, name='inverse_mask')
output = tf.add(s * tf.multiply(cos_t, inv_mask), s * tf.multiply(cos_t_m, mask), name='cosineface_loss_output')
return output
开发者ID:xy694942097,项目名称:InsightFace_TF,代码行数:26,代码来源:face_losses.py
示例12: auto_encoder
def auto_encoder(x_1, x_2, x_mask_1, x_mask_2, y, dropout, opt):
x_1_emb, W_emb = embedding(x_1, opt) # batch L emb
x_2_emb = tf.nn.embedding_lookup(W_emb, x_2)
x_1_emb = tf.nn.dropout(x_1_emb, dropout) # batch L emb
x_2_emb = tf.nn.dropout(x_2_emb, dropout) # batch L emb
biasInit = tf.constant_initializer(0.001, dtype=tf.float32)
x_1_emb = layers.fully_connected(tf.squeeze(x_1_emb), num_outputs=opt.embed_size, biases_initializer=biasInit, activation_fn=tf.nn.relu, scope='trans', reuse=None) # batch L emb
x_2_emb = layers.fully_connected(tf.squeeze(x_2_emb), num_outputs=opt.embed_size, biases_initializer=biasInit, activation_fn=tf.nn.relu, scope='trans', reuse=True)
x_1_emb = tf.expand_dims(x_1_emb, 3) # batch L emb 1
x_2_emb = tf.expand_dims(x_2_emb, 3)
if opt.encoder == 'aver':
H_enc_1 = aver_emb_encoder(x_1_emb, x_mask_1)
H_enc_2 = aver_emb_encoder(x_2_emb, x_mask_2)
elif opt.encoder == 'max':
H_enc_1 = max_emb_encoder(x_1_emb, x_mask_1, opt)
H_enc_2 = max_emb_encoder(x_2_emb, x_mask_2, opt)
elif opt.encoder == 'concat':
H_enc_1 = concat_emb_encoder(x_1_emb, x_mask_1, opt)
H_enc_2 = concat_emb_encoder(x_2_emb, x_mask_2, opt)
# discriminative loss term
if opt.combine_enc == 'mult':
H_enc = tf.multiply(H_enc_1, H_enc_2) # batch * n_gan
if opt.combine_enc == 'concat':
H_enc = tf.concat([H_enc_1, H_enc_2], 1)
if opt.combine_enc == 'sub':
H_enc = tf.subtract(H_enc_1, H_enc_2)
if opt.combine_enc == 'mix':
H_1 = tf.multiply(H_enc_1, H_enc_2)
H_2 = tf.concat([H_enc_1, H_enc_2], 1)
H_3 = tf.subtract(H_enc_1, H_enc_2)
H_enc = tf.concat([H_1, H_2, H_3], 1)
# calculate the accuracy
logits = discriminator_2layer(H_enc, opt, dropout, prefix='classify_', num_outputs=opt.category, is_reuse=None)
prob = tf.nn.softmax(logits)
correct_prediction = tf.equal(tf.argmax(prob, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits))
train_op = layers.optimize_loss(
loss,
framework.get_global_step(),
optimizer='Adam',
# variables=d_vars,
learning_rate=opt.lr)
return accuracy, loss, train_op, W_emb
开发者ID:niurouli,项目名称:SWEM,代码行数:58,代码来源:eval_snli_emb.py
示例13: single_image_TV
def single_image_TV(patch, patch_size):
result = tf.Variable(tf.zeros([1, patch_size - 1, 3]))
slice_result = tf.assign(result, patch[0: 1, 1:, 0: 3])
for iter in range(1, patch_size - 1):
temp = tf.assign(result,tf.add(tf.subtract(patch[iter:iter + 1, 1:, 0: 3], patch[iter:iter + 1, 0:-1, 0: 3]),
tf.subtract(patch[iter:iter + 1, 0:-1, 0: 3],patch[iter + 1:iter + 2, 0:-1, 0: 3])))
slice_result = tf.concat([slice_result, temp], 0)
return slice_result
开发者ID:liuaishan,项目名称:AdvPGAN,代码行数:9,代码来源:utils.py
示例14: __init__
def __init__(self, config, is_training=True):
self.batch_size = tf.Variable(0, dtype=tf.int32, trainable=False)
num_step = config.num_step
embed_dim = config.embed_dim
self.input_data_s1 = tf.placeholder(tf.float64, [None, num_step, embed_dim])
self.input_data_s2 = tf.placeholder(tf.float64, [None, num_step, embed_dim])
self.target = tf.placeholder(tf.float64, [None])
self.mask_s1 = tf.placeholder(tf.float64, [None, num_step])
self.mask_s2 = tf.placeholder(tf.float64, [None, num_step])
self.hidden_neural_size = config.hidden_neural_size
self.new_batch_size = tf.placeholder(tf.int32, shape=[], name="new_batch_size")
self._batch_size_update = tf.assign(self.batch_size, self.new_batch_size)
with tf.name_scope('lstm_output_layer'):
self.cell_outputs1 = self.singleRNN(x=self.input_data_s1, scope='side1', cell='lstm', reuse=None)
self.cell_outputs2 = self.singleRNN(x=self.input_data_s2, scope='side1', cell='lstm', reuse=True)
with tf.name_scope('Sentence_Layer'):
# self.sent1 = tf.reduce_sum(self.cell_outputs1 * self.mask_s1[:, :, None], axis=1)
# self.sent2 = tf.reduce_sum(self.cell_outputs2 * self.mask_s2[:, :, None], axis=1)
# self.mask_s1_sum=tf.reduce_sum(self.mask_s1,axis=0)
# self.mask_s2_sum=tf.reduce_sum(self.mask_s2,axis=0)
# self.mask_s1_sum1 = tf.reduce_sum(self.mask_s1, axis=1)
# self.mask_s2_sum1 = tf.reduce_sum(self.mask_s2, axis=1)
self.sent1 = tf.reduce_sum(self.cell_outputs1 * self.mask_s1[:, :, None], axis=1)
self.sent2 = tf.reduce_sum(self.cell_outputs2 * self.mask_s2[:, :, None], axis=1)
with tf.name_scope('loss'):
diff = tf.abs(tf.subtract(self.sent1, self.sent2), name='err_l1')
diff = tf.reduce_sum(diff, axis=1)
self.sim = tf.clip_by_value(tf.exp(-1.0 * diff), 1e-7, 1.0 - 1e-7)
self.loss = tf.square(tf.subtract(self.sim, tf.clip_by_value((self.target - 1.0) / 4.0, 1e-7, 1.0 - 1e-7)))
with tf.name_scope('cost'):
self.cost = tf.reduce_mean(self.loss)
self.truecost = tf.reduce_mean(tf.square(tf.subtract(self.sim * 4.0 + 1.0, self.target)))
if not is_training:
return
self.globle_step = tf.Variable(0, name="globle_step", trainable=False, dtype=tf.float64)
self.lr = tf.Variable(0.0, trainable=False, dtype=tf.float64)
tvars = tf.trainable_variables()
grads = tf.gradients(self.cost, tvars)
optimizer = tf.train.AdadeltaOptimizer(learning_rate=self.lr, epsilon=1e-6)
with tf.name_scope('train'):
self.train_op = optimizer.apply_gradients(zip(grads, tvars))
self.new_lr = tf.placeholder(tf.float64, shape=[], name="new_learning_rate")
self._lr_update = tf.assign(self.lr, self.new_lr)
开发者ID:jx00109,项目名称:dependency-based-siamese-lstm,代码行数:54,代码来源:lstmRNN.py
示例15: tf_2d_normal
def tf_2d_normal(x1, x2, mu1, mu2, s1, s2, rho):
"""Returns result of eq # 24 and 25 of http://arxiv.org/abs/1308.0850."""
norm1 = tf.subtract(x1, mu1)
norm2 = tf.subtract(x2, mu2)
s1s2 = tf.multiply(s1, s2)
z = (tf.square(tf.div(norm1, s1)) + tf.square(tf.div(norm2, s2)) -
2 * tf.div(tf.multiply(rho, tf.multiply(norm1, norm2)), s1s2))
neg_rho = 1 - tf.square(rho)
result = tf.exp(tf.div(-z, 2 * neg_rho))
denom = 2 * np.pi * tf.multiply(s1s2, tf.sqrt(neg_rho))
result = tf.div(result, denom)
return result
开发者ID:memo,项目名称:magenta,代码行数:12,代码来源:model.py
示例16: _loss
def _loss(self, predictions):
with tf.name_scope("loss"):
# if training then crop center of y, else, padding was applied
slice_amt = (np.sum(self.filter_sizes) - len(self.filter_sizes)) / 2
slice_y = self.y_norm[:,slice_amt:-slice_amt, slice_amt:-slice_amt]
_y = tf.cond(self.is_training, lambda: slice_y, lambda: self.y_norm)
tf.subtract(predictions, _y)
err = tf.square(predictions - _y)
err_filled = utils.fill_na(err, 0)
finite_count = tf.reduce_sum(tf.cast(tf.is_finite(err), tf.float32))
mse = tf.reduce_sum(err_filled) / finite_count
return mse
开发者ID:liaoyongqiang,项目名称:srcnn-tensorflow-1,代码行数:12,代码来源:srcnn.py
示例17: triplet_loss
def triplet_loss(y_true, y_pred):
import tensorflow as tf
anchor = y_pred[:,0]
positive = y_pred[:,1]
negative = y_pred[:,2]
#anchor, positive, negative = y_pred
alpha = 0.2
pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)))
neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)))
basic_loss = tf.add(tf.subtract(pos_dist, neg_dist), alpha)
loss = tf.maximum(tf.reduce_mean(basic_loss), 0.0)
return loss
开发者ID:subramgo,项目名称:DeepLearning,代码行数:12,代码来源:01_train_inception.py
示例18: loss_with_step
def loss_with_step(self):
margin = 5.0
labels_t = self.y_
labels_f = tf.subtract(1.0, self.y_, name="1-yi") # labels_ = !labels;
eucd2 = tf.pow(tf.subtract(self.o1, self.o2), 2)
eucd2 = tf.reduce_sum(eucd2, 1)
eucd = tf.sqrt(eucd2+1e-6, name="eucd")
C = tf.constant(margin, name="C")
pos = tf.multiply(labels_t, eucd, name="y_x_eucd")
neg = tf.multiply(labels_f, tf.maximum(0.0, tf.subtract(C, eucd)), name="Ny_C-eucd")
losses = tf.add(pos, neg, name="losses")
loss = tf.reduce_mean(losses, name="loss")
return loss
开发者ID:JapneetSingh,项目名称:ml-course1,代码行数:13,代码来源:inference.py
示例19: __init__
def __init__(self, _margin, sequence_length, batch_size,
vocab_size, embedding_size,
filter_sizes, num_filters, l2_reg_lambda=0.0):
self.L, self.B, self.V, self.E, self.FS, self.NF = sequence_length, batch_size, \
vocab_size, embedding_size, filter_sizes, num_filters
#用户问题,字向量使用embedding_lookup
self.q = tf.placeholder(tf.int32, [self.B, self.L], name="q")
#待匹配正向问题
self.qp = tf.placeholder(tf.int32, [self.B, self.L], name="qp")
#负向问题
self.qn = tf.placeholder(tf.int32, [self.B, self.L], name="qn")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
l2_loss = tf.constant(0.0)
# Embedding layer
with tf.device('/cpu:0'), tf.name_scope("embedding"):
W = tf.get_variable(
initializer=tf.random_uniform([self.V, self.E], -1.0, 1.0),
name='We')
self.qe = tf.nn.embedding_lookup(W, self.q)
self.qpe = tf.nn.embedding_lookup(W, self.qp)
self.qne = tf.nn.embedding_lookup(W, self.qn)
self.qe = tf.expand_dims(self.qe, -1)
self.qpe = tf.expand_dims(self.qpe, -1)
self.qne = tf.expand_dims(self.qne, -1)
with tf.variable_scope('shared-conv') as scope:
self.qe = self.conv(self.qe)
scope.reuse_variables()
#tf.get_variable_scope().reuse_variables()
self.qpe = self.conv(self.qpe)
scope.reuse_variables()
#tf.get_variable_scope().reuse_variables()
self.qne = self.conv(self.qne)
self.cos_q_qp = self.cosine(self.qe, self.qpe)
self.cos_q_qn = self.cosine(self.qe, self.qne)
zero = tf.constant(0, shape=[self.B], dtype=tf.float32)
margin = tf.constant(_margin, shape=[self.B], dtype=tf.float32)
with tf.name_scope("loss"):
self.losses = tf.maximum(zero, tf.subtract(margin, tf.subtract(self.cos_q_qp, self.cos_q_qn)))
self.loss = tf.reduce_sum(self.losses) + l2_reg_lambda * l2_loss
print('loss ', self.loss)
# Accuracy
with tf.name_scope("accuracy"):
self.correct = tf.equal(zero, self.losses)
self.accuracy = tf.reduce_mean(tf.cast(self.correct, "float"), name="accuracy")
for v in tf.trainable_variables():
print(v)
开发者ID:white127,项目名称:insuranceQA-cnn-lstm,代码行数:51,代码来源:insqa_cnn.py
示例20: simple_margin_loss
def simple_margin_loss(anchor, positive, negative, alpha):
'''Calculate the simple margin loss according to paper 'Sampling matters in Deep Embedding Learning'
Args:
alpha: here alpha is not in triplet loss, instead, it means the alpha in simple margin loss, need experiment
belta: default 1.1
'''
belta = 1.1
with tf.variable_scope('triplet_loss'):
pos_dist = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1))
neg_dist = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1))
basic_loss = tf.maximum(alpha+pos_dist-belta,0.0) + tf.maximum(alpha-neg_dist+belta,0.0)
#basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)
loss = tf.reduce_mean(basic_loss)
return loss
开发者ID:Joker316701882,项目名称:Additive-Margin-Softmax,代码行数:14,代码来源:facenet.py
注:本文中的tensorflow.subtract函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论