本文整理汇总了Python中tensorflow.divide函数的典型用法代码示例。如果您正苦于以下问题:Python divide函数的具体用法?Python divide怎么用?Python divide使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了divide函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: add_dyprune
def add_dyprune(weights):
crate = config.crate[weights.name[:-2]] #hyperpara C rate
prune_mask = tf.Variable(tf.ones_like(weights),name=weights.name[:-2]+'mask', trainable=False)
#calculate mask
mean = tf.divide(tf.reduce_sum(tf.multiply(tf.abs(weights),prune_mask)),tf.reduce_sum(prune_mask))
var = tf.multiply(weights,prune_mask)
var = tf.square(var)
mean_q = tf.square(mean)*tf.reduce_sum(prune_mask)
var = tf.reduce_sum(var) - mean_q
var = tf.divide(var,tf.reduce_sum(prune_mask))
var = tf.sqrt(var)
t1_lower = (mean+var*crate)*0.25 #hyperpara a
t1_upper = (mean+var*crate)*0.45 #hyperpara b
indicator_lower1 = tf.greater_equal(tf.abs(weights), tf.ones_like(weights) * t1_lower)
indicator_upper1 = tf.greater_equal(tf.abs(weights), tf.ones_like(weights) * t1_upper)
indicator_matrix1 = tf.greater_equal(prune_mask, tf.zeros_like(weights))
indicator_matrix1 = tf.logical_and(indicator_matrix1,indicator_lower1)
indicator_matrix1 = tf.logical_or(indicator_matrix1,indicator_upper1)
indicator_matrix1 = tf.to_float(indicator_matrix1)
update = prune_mask.assign(indicator_matrix1)
prune_fc = tf.multiply(weights, prune_mask)
return prune_fc
开发者ID:Ewenwan,项目名称:Project,代码行数:25,代码来源:densenetfinalDNS.py
示例2: compute_nats_and_bits_per_dim
def compute_nats_and_bits_per_dim(data_dim,
latent_dim,
average_reconstruction,
average_prior):
"""Computes negative ELBO, which is an upper bound on the negative likelihood.
Args:
data_dim: int-like indicating data dimensionality.
latent_dim: int-like indicating latent dimensionality.
average_reconstruction: Scalar Tensor indicating the reconstruction cost
averaged over all data dimensions and any data batches.
average_prior: Scalar Tensor indicating the negative log-prior probability
averaged over all latent dimensions and any data batches.
Returns:
Tuple of scalar Tensors, representing the nats and bits per data dimension
(e.g., subpixels) respectively.
"""
with tf.name_scope(None, default_name="compute_nats_per_dim"):
data_dim = tf.cast(data_dim, average_reconstruction.dtype)
latent_dim = tf.cast(latent_dim, average_prior.dtype)
negative_log_likelihood = data_dim * average_reconstruction
negative_log_prior = latent_dim * average_prior
negative_elbo = negative_log_likelihood + negative_log_prior
nats_per_dim = tf.divide(negative_elbo, data_dim, name="nats_per_dim")
bits_per_dim = tf.divide(nats_per_dim, tf.log(2.), name="bits_per_dim")
return nats_per_dim, bits_per_dim
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:27,代码来源:latent_layers.py
示例3: init_training_graph
def init_training_graph(self):
with tf.name_scope('Evaluation'):
logits = self.last
prob_b = tf.squeeze(logits, squeeze_dims=[1,2])
self.predictions = tf.argmax(prob_b, axis=1)
with tf.name_scope('Loss'):
self.loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prob_b,
labels=tf.cast(self.train_labels_node, tf.int32),
name="entropy")))
tf.summary.scalar("entropy", self.loss)
with tf.name_scope('Accuracy'):
LabelInt = tf.cast(self.train_labels_node, tf.int64)
CorrectPrediction = tf.equal(self.predictions, LabelInt)
self.accuracy = tf.reduce_mean(tf.cast(CorrectPrediction, tf.float32))
tf.summary.scalar("accuracy", self.accuracy)
with tf.name_scope('Prediction'):
self.TP = tf.count_nonzero(self.predictions * LabelInt)
self.TN = tf.count_nonzero((self.predictions - 1) * (LabelInt - 1))
self.FP = tf.count_nonzero(self.predictions * (LabelInt - 1))
self.FN = tf.count_nonzero((self.predictions - 1) * LabelInt)
with tf.name_scope('Precision'):
self.precision = tf.divide(self.TP, tf.add(self.TP, self.FP))
tf.summary.scalar('Precision', self.precision)
with tf.name_scope('Recall'):
self.recall = tf.divide(self.TP, tf.add(self.TP, self.FN))
tf.summary.scalar('Recall', self.recall)
with tf.name_scope('F1'):
num = tf.multiply(self.precision, self.recall)
dem = tf.add(self.precision, self.recall)
self.F1 = tf.scalar_mul(2, tf.divide(num, dem))
tf.summary.scalar('F1', self.F1)
with tf.name_scope('MeanAccuracy'):
Nprecision = tf.divide(self.TN, tf.add(self.TN, self.FN))
self.MeanAcc = tf.divide(tf.add(self.precision, Nprecision) ,2)
#self.batch = tf.Variable(0, name = "batch_iterator")
self.train_prediction = tf.nn.softmax(logits)
self.test_prediction = tf.nn.softmax(logits)
tf.global_variables_initializer().run()
print('Computational graph initialised')
开发者ID:PeterJackNaylor,项目名称:PhD_Fabien,代码行数:59,代码来源:vgg16.py
示例4: add_tensorboard
def add_tensorboard(self, session, tensorboard_dir, tb_run_name=None, timeline_enabled=False):
"""
Add the tensorboard operations to the acoustic RNN
This method will add ops to feed tensorboard
self.train_summaries_op : will produce the summary for a training step
self.test_summaries_op : will produce the summary for a test step
self.summary_writer_op : will write the summary to disk
Parameters
----------
:param session: the tensorflow session
:param tensorboard_dir: path to tensorboard directory
:param tb_run_name: directory name for the tensorboard files inside tensorboard_dir, if None a default dir
will be created
:param timeline_enabled: enable the output of a trace file for timeline visualization
"""
self.tensorboard_dir = tensorboard_dir
self.timeline_enabled = timeline_enabled
# Define GraphKeys for TensorBoard
graphkey_training = tf.GraphKeys()
graphkey_test = tf.GraphKeys()
# Learning rate
tf.summary.scalar('Learning_rate', self.learning_rate_var, collections=[graphkey_training, graphkey_test])
# Loss
with tf.name_scope('Mean_loss'):
mean_loss = tf.divide(self.accumulated_mean_loss, self.mini_batch)
tf.summary.scalar('Training', mean_loss, collections=[graphkey_training])
tf.summary.scalar('Test', mean_loss, collections=[graphkey_test])
# Accuracy
with tf.name_scope('Accuracy_-_Error_Rate'):
mean_error_rate = tf.divide(self.accumulated_error_rate, self.mini_batch)
tf.summary.scalar('Training', mean_error_rate, collections=[graphkey_training])
tf.summary.scalar('Test', mean_error_rate, collections=[graphkey_test])
# Hidden state
with tf.name_scope('RNN_internal_state'):
for idx, state_variable in enumerate(self.rnn_tuple_state):
tf.summary.histogram('Training_layer-{0}_cell_state'.format(idx), state_variable[0],
collections=[graphkey_training])
tf.summary.histogram('Test_layer-{0}_cell_state'.format(idx), state_variable[0],
collections=[graphkey_test])
tf.summary.histogram('Training_layer-{0}_hidden_state'.format(idx), state_variable[1],
collections=[graphkey_training])
tf.summary.histogram('Test_layer-{0}_hidden_state'.format(idx), state_variable[1],
collections=[graphkey_test])
self.train_summaries_op = tf.summary.merge_all(key=graphkey_training)
self.test_summaries_op = tf.summary.merge_all(key=graphkey_test)
if tb_run_name is None:
run_name = datetime.now().strftime('%Y-%m-%d--%H-%M-%S')
else:
run_name = tb_run_name
self.summary_writer_op = tf.summary.FileWriter(tensorboard_dir + '/' + run_name + '/', graph=session.graph)
开发者ID:inikdom,项目名称:rnn-speech,代码行数:57,代码来源:AcousticModel.py
示例5: tf_fastfood_transform
def tf_fastfood_transform(in_x, dd, DD, use_get=False, use_C=False):
'''Transform from d to D. Pads as necessary.
For now: assume dd and DD are known in python.'''
# Tensor d and D
#assert_D_big = tf.assert_greater_equal(DD, dd, message='d cannot be larger than D')
#with tf.control_dependencies([assert_D_big]):
# ll = tf.cast(tf.round(tf.log(tf.to_float(DD)) / np.log(2)), 'int32')
# LL = tf.pow(2, ll)
# Python d and D
assert isinstance(dd, int), 'd should be int'
assert isinstance(DD, int), 'D should be int'
assert DD >= dd, 'd cannot be larger than D'
assert dd > 0, 'd and D must be positive'
ll = int(np.ceil(np.log(DD) / np.log(2)))
LL = 2 ** ll
# Make vars
init_BB = tf.to_float(tf.random_uniform((LL,), 0, 2, dtype='int32')) * 2 - 1
init_Pi = tf.random_shuffle(tf.range(LL))
init_GG = tf.random_normal((LL,))
init_divisor = lambda GG: tf.sqrt(LL * tf.reduce_sum(tf.pow(GG.initialized_value(), 2)))
if use_get:
BB = tf.get_variable('B', initializer=init_BB, trainable=False)
Pi = tf.get_variable('Pi', initializer=init_Pi, trainable=False)
GG = tf.get_variable('G', initializer=init_GG, trainable=False)
divisor = tf.get_variable('divisor', initializer=init_divisor(GG), trainable=False)
else:
BB = tf.Variable(init_BB, name='B', trainable=False)
Pi = tf.Variable(init_Pi, name='Pi', trainable=False)
GG = tf.Variable(init_GG, name='G', trainable=False)
divisor = tf.Variable(init_divisor(GG), name='divisor', trainable=False)
fastfood_vars = [BB, Pi, GG, divisor]
# Implement transform
dd_pad = tf.pad(in_x, [[0, LL - dd]])
mul_1 = tf.multiply(BB, dd_pad)
if use_C:
mul_2 = tf_fast_walsh_hadamard(mul_1, 0, method='c', normalize=True)
else:
mul_2 = tf_fast_walsh_hadamard(mul_1, 0, method='two', normalize=False)
mul_3 = tf.gather(mul_2, Pi)
mul_4 = tf.multiply(mul_3, GG)
if use_C:
mul_5 = tf_fast_walsh_hadamard(mul_4, 0, method='c', normalize=True)
print '\nWARNING: check normalization on this next line more carefully\n'
ret = tf.divide(tf.slice(mul_5, [0], [DD]), divisor * np.sqrt(float(DD) / LL / ll))
else:
mul_5 = tf_fast_walsh_hadamard(mul_4, 0, method='two', normalize=False)
ret = tf.divide(tf.slice(mul_5, [0], [DD]), divisor * np.sqrt(float(DD) / LL))
return fastfood_vars, ret
开发者ID:niurouli,项目名称:SWEM,代码行数:56,代码来源:rproj_layers_util.py
示例6: logG
def logG(x, y, theta):
fv = tff(theta,y)
gv = tfg(theta,y)
mu = tf.add(y,tf.multiply(fv,gl.h))
pr = tf.subtract(x,mu)
pr2 = tf.square(pr)
gv2 = tf.square(gv)
my2 = tf.constant(2.0,dtype=gl.myftype)
mypi = tf.constant(np.pi,dtype=gl.myftype)
lgp1 = tf.negative(tf.divide(tf.log(tf.multiply(my2*mypi*gl.h,gv2)),my2))
lgp2 = tf.negative(tf.divide(pr2,tf.multiply(my2*gl.h,gv2)))
lg = tf.add(lgp1,lgp2)
return lg
开发者ID:hbhat4000,项目名称:sdeinference,代码行数:13,代码来源:tfdtqem2.py
示例7: init_training_graph
def init_training_graph(self):
with tf.name_scope('Evaluation'):
self.logits = self.conv_layer_f(self.last, self.logits_weight, strides=[1,1,1,1], scope_name="logits/")
self.predictions = tf.argmax(self.logits, axis=3)
with tf.name_scope('Loss'):
self.loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits,
labels=tf.squeeze(tf.cast(self.train_labels_node, tf.int32), squeeze_dims=[3]),
name="entropy")))
tf.summary.scalar("entropy", self.loss)
with tf.name_scope('Accuracy'):
LabelInt = tf.squeeze(tf.cast(self.train_labels_node, tf.int64), squeeze_dims=[3])
CorrectPrediction = tf.equal(self.predictions, LabelInt)
self.accuracy = tf.reduce_mean(tf.cast(CorrectPrediction, tf.float32))
tf.summary.scalar("accuracy", self.accuracy)
with tf.name_scope('ClassPrediction'):
flat_LabelInt = tf.reshape(LabelInt, [-1])
flat_predictions = tf.reshape(self.predictions, [-1])
self.cm = tf.confusion_matrix(flat_LabelInt, flat_predictions, self.NUM_LABELS)
flatten_confusion_matrix = tf.reshape(self.cm, [-1])
total = tf.reduce_sum(self.cm)
for i in range(self.NUM_LABELS):
name = "Label_{}".format(i)
TP, TN, FP, FN = GetCMInfo_TF(self.cm, i, self.NUM_LABELS)
precision = tf.divide(TP, tf.add(TP, FP))
recall = tf.divide(TP, tf.add(TP, FN))
num = tf.multiply(precision, recall)
dem = tf.add(precision, recall)
F1 = tf.scalar_mul(2, tf.divide(num, dem))
Nprecision = tf.divide(TN, tf.add(TN, FN))
MeanAcc = tf.divide(tf.add(precision, Nprecision) ,2)
tf.summary.scalar(name + '_Precision', precision)
tf.summary.scalar(name + '_Recall', recall)
tf.summary.scalar(name + '_F1', F1)
tf.summary.scalar(name + '_Performance', MeanAcc)
confusion_image = tf.reshape( tf.cast( self.cm, tf.float32),
[1, self.NUM_LABELS, self.NUM_LABELS, 1])
tf.summary.image('confusion', confusion_image)
self.train_prediction = tf.nn.softmax(self.logits)
self.test_prediction = self.train_prediction
tf.global_variables_initializer().run()
print('Computational graph initialised')
开发者ID:PeterJackNaylor,项目名称:PhD_Fabien,代码行数:51,代码来源:UNetMultiClass_v2.py
示例8: adloss
def adloss(self,x,xt,y,global_step):
with tf.variable_scope('reuse_inference') as scope:
scope.reuse_variables()
self.inference(x,training=True)
source_feature=self.feature
scope.reuse_variables()
self.inference(xt,training=True)
target_feature=self.feature
target_pred=self.output
with tf.variable_scope('reuse') as scope:
source_logits,_=D(source_feature)
scope.reuse_variables()
target_logits,_=D(target_feature)
self.source_feature=source_feature
self.target_feature=target_feature
self.concat_feature=tf.concat([source_feature,target_feature],0)
source_result=tf.argmax(y,1)
target_result=tf.argmax(target_pred,1)
ones=tf.ones_like(source_feature)
current_source_count=tf.unsorted_segment_sum(ones,source_result,self.num_classes)
current_target_count=tf.unsorted_segment_sum(ones,target_result,self.num_classes)
current_positive_source_count=tf.maximum(current_source_count,tf.ones_like(current_source_count))
current_positive_target_count=tf.maximum(current_target_count,tf.ones_like(current_target_count))
current_source_centroid=tf.divide(tf.unsorted_segment_sum(data=source_feature,segment_ids=source_result,num_segments=self.num_classes),current_positive_source_count)
current_target_centroid=tf.divide(tf.unsorted_segment_sum(data=target_feature,segment_ids=target_result,num_segments=self.num_classes),current_positive_target_count)
decay=tf.constant(0.3)
self.decay=decay
target_centroid=(decay)*current_target_centroid+(1.-decay)*self.target_moving_centroid
source_centroid=(decay)*current_source_centroid+(1.-decay)*self.source_moving_centroid
self.Semanticloss=protoloss(source_centroid,target_centroid)
tf.summary.scalar('semanticloss',self.Semanticloss)
D_real_loss=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=target_logits,labels=tf.ones_like(target_logits)))
D_fake_loss=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=source_logits,labels=tf.zeros_like(source_logits)))
self.D_loss=D_real_loss+D_fake_loss
self.G_loss=-self.D_loss
tf.summary.scalar('G_loss',self.G_loss)
tf.summary.scalar('JSD',self.G_loss/2+math.log(2))
self.G_loss=0.1*self.G_loss
self.D_loss=0.1*self.D_loss
return self.G_loss,self.D_loss,source_centroid,target_centroid
开发者ID:slowbull,项目名称:Moving-Semantic-Transfer-Network,代码行数:48,代码来源:mstnmodel.py
示例9: read_tensor_from_image_file
def read_tensor_from_image_file(file_name):
input_name = "file_reader"
output_name = "normalized"
width = input_size
height = input_size
num_channels = 3
file_reader = tf.read_file(file_name, input_name)
if file_name.endswith(".png"):
image_reader = tf.image.decode_png(file_reader, channels = 3,
name='png_reader')
elif file_name.endswith(".gif"):
image_reader = tf.squeeze(tf.image.decode_gif(file_reader,
name='gif_reader'))
elif file_name.endswith(".bmp"):
image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')
else:
image_reader = tf.image.decode_jpeg(file_reader, channels = 3,
name='jpeg_reader')
float_caster = tf.cast(image_reader, tf.float32)
dims_expander = tf.expand_dims(float_caster, 0);
# resized = tf.image.resize_bilinear(dims_expander, [input_size, input_size])
normalized = tf.divide(tf.subtract(dims_expander, [input_mean]), [input_std])
patches = tf.extract_image_patches(normalized,
ksizes=[1, patch_height, patch_width, 1],
strides=[1, patch_height/4, patch_width/4, 1],
rates=[1,1,1,1],
padding="VALID")
patches_shape = tf.shape(patches)
patches = tf.reshape(patches, [-1, patch_height, patch_width, num_channels])
patches = tf.image.resize_images(patches, [height, width])
patches = tf.reshape(patches, [-1, height, width, num_channels])
sess = tf.Session()
return sess.run([patches, patches_shape])
开发者ID:jembezmamy,项目名称:away-pigeons,代码行数:33,代码来源:classifier.py
示例10: __init__
def __init__(self, n_inputs, n_rules, learning_rate=1e-2):
self.n = n_inputs
self.m = n_rules
self.inputs = tf.placeholder(tf.float32, shape=(None, n_inputs)) # Input
self.targets = tf.placeholder(tf.float32, shape=None) # Desired output
mu = tf.get_variable("mu", [n_rules * n_inputs],
initializer=tf.random_normal_initializer(0, 1)) # Means of Gaussian MFS
sigma = tf.get_variable("sigma", [n_rules * n_inputs],
initializer=tf.random_normal_initializer(0, 1)) # Standard deviations of Gaussian MFS
y = tf.get_variable("y", [1, n_rules], initializer=tf.random_normal_initializer(0, 1)) # Sequent centers
self.params = tf.trainable_variables()
self.rul = tf.reduce_prod(
tf.reshape(tf.exp(-0.5 * tf.square(tf.subtract(tf.tile(self.inputs, (1, n_rules)), mu)) / tf.square(sigma)),
(-1, n_rules, n_inputs)), axis=2) # Rule activations
# Fuzzy base expansion function:
num = tf.reduce_sum(tf.multiply(self.rul, y), axis=1)
den = tf.clip_by_value(tf.reduce_sum(self.rul, axis=1), 1e-12, 1e12)
self.out = tf.divide(num, den)
self.loss = tf.losses.huber_loss(self.targets, self.out) # Loss function computation
# Other loss functions for regression, uncomment to try them:
# loss = tf.sqrt(tf.losses.mean_squared_error(target, out))
# loss = tf.losses.absolute_difference(target, out)
self.optimize = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.loss) # Optimization step
# Other optimizers, uncomment to try them:
# self.optimize = tf.train.RMSPropOptimizer(learning_rate=learning_rate).minimize(self.loss)
# self.optimize = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(self.loss)
self.init_variables = tf.global_variables_initializer() # Variable initializer
开发者ID:tiagoCuervo,项目名称:TensorANFIS,代码行数:30,代码来源:anfis.py
示例11: _make_activity_op
def _make_activity_op(self, input_tensor):
""" Creates the op for calculating the activity of a SOM
:param input_tensor: A tensor to calculate the activity of. Must be of shape `[batch_size, dim]` where `dim` is
the dimensionality of the SOM's weights.
:return A handle to the newly created activity op:
"""
with self._graph.as_default():
with tf.name_scope("Activity"):
# This constant controls the width of the gaussian.
# The closer to 0 it is, the wider it is.
c = tf.constant(self._c, dtype="float32")
# Get the euclidean distance between each neuron and the input vectors
dist = tf.norm(tf.subtract(
tf.expand_dims(self._weights, axis=0),
tf.expand_dims(input_tensor, axis=1)),
name="Distance") # [batch_size, neurons]
# Calculate the Gaussian of the activity. Units with distances closer to 0 will have activities
# closer to 1.
activity = tf.exp(tf.multiply(tf.pow(dist, 2), c), name="Gaussian")
# Convert the activity into a softmax probability distribution
if self._softmax_activity:
activity = tf.divide(tf.exp(activity),
tf.expand_dims(tf.reduce_sum(tf.exp(activity), axis=1), axis=-1),
name="Softmax")
return tf.identity(activity, name="Output")
开发者ID:alexander-gabriel,项目名称:tensorflow-som,代码行数:28,代码来源:tf_som.py
示例12: dia
def dia(model, config, scope, connectsegment, connectfeature):
with tf.variable_scope(scope), tf.name_scope(scope):
with tf.variable_scope('inputs'), tf.name_scope('inputs'):
model['%s_in0length_segment' %scope] = model['%s_out0length' %connectsegment]
model['%s_in1length_segment' %scope] = model['%s_out1length' %connectsegment]
model['%s_in2length_segment' %scope] = model['%s_out2length' %connectsegment]
model['%s_maxin2length_segment' %scope] = model['%s_maxout2length' %connectsegment]
model['%s_in0length_feature' %scope] = model['%s_out0length' %connectfeature]
model['%s_in1length_feature' %scope] = model['%s_out1length' %connectfeature]
model['%s_in2length_feature' %scope] = model['%s_out2length' %connectfeature]
model['%s_maxin2length_feature' %scope] = model['%s_maxout2length' %connectfeature]
model['%s_inputs_segment' %scope] = tf.squeeze(model['%s_outputs' %connectsegment], 2, '%s_inputs_segment' %scope)
model['%s_inputs_feature' %scope] = tf.unstack(tf.transpose(model['%s_outputs' %connectfeature], [1, 0, 2]), name = '%s_inputs_feature' %scope)
model['%s_out0length' %scope] = model['%s_in0length_feature' %scope]
model['%s_out1length' %scope] = config.getint('global', 'speaker_size')
model['%s_out2length' %scope] = tf.stack([config.getint('global', 'speaker_size') for _ in xrange(model['%s_out0length' %scope])])
model['%s_maxout2length' %scope] = config.getint('global', 'speaker_size')
with tf.variable_scope('outputs'), tf.name_scope('outputs'):
model['%s_topsegmentvalues' %scope], model['%s_topsegmentindices' %scope] = tf.nn.top_k(tf.transpose(model['%s_inputs_segment' %scope], [1, 0]), config.getint('global', 'speaker_size'))
model['%s_scores' %scope] = [tf.gather(feature, index) for feature, index in zip(model['%s_inputs_feature' %scope], tf.unstack(model['%s_topsegmentindices' %scope]))]
model['%s_normalizedscores' %scope] = [tf.divide(score, tf.norm(score, 2, 1, True)) for score in model['%s_scores' %scope]]
model['%s_outputs' %scope] = tf.add(0.5, tf.multiply(0.5, tf.stack([tf.matmul(score, score, transpose_b = True) for score in model['%s_normalizedscores' %scope]], name = '%s_outputs' %scope)))
return model
开发者ID:aaiijmrtt,项目名称:DEEPSPEECH,代码行数:25,代码来源:dia.py
示例13: compute_loss
def compute_loss(self, input_tensor, label, name):
"""
计算损失函数
:param input_tensor:
:param label:
:param name:
:return:
"""
with tf.variable_scope(name):
# 前向传播获取logits
inference_ret = self.build_model(input_tensor=input_tensor, name='inference')
# 计算损失
decode_logits = inference_ret['logits']
# 加入bounded inverse class weights
inverse_class_weights = tf.divide(1.0,
tf.log(tf.add(tf.constant(1.02, tf.float32),
tf.nn.softmax(decode_logits))))
decode_logits_weighted = tf.multiply(decode_logits, inverse_class_weights)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=decode_logits_weighted, labels=tf.squeeze(label, squeeze_dims=[3]),
name='entropy_loss')
ret = dict()
ret['entropy_loss'] = loss
ret['inference_logits'] = inference_ret['logits']
return ret
开发者ID:dandancat123,项目名称:bilibli_notes2,代码行数:28,代码来源:lanenet_binary_segmentation.py
示例14: weighted_r2_op
def weighted_r2_op(predictions, targets, inputs):
""" weighted_r2_op.
An op that calculates the standard error.
Examples:
```python
input_data = placeholder(shape=[None, 784])
y_pred = my_network(input_data) # Apply some ops
y_true = placeholder(shape=[None, 10]) # Labels
stderr_op = weighted_r2_op(y_pred, y_true, input_data)
# Calculate standard error by feeding data X and labels Y
std_error = sess.run(stderr_op, feed_dict={input_data: X, y_true: Y})
```
Arguments:
predictions: `Tensor`.
targets: `Tensor`.
inputs: `Tensor`.
Returns:
`Float`. The standard error.
"""
with tf.name_scope('WeightedStandardError'):
if hasattr(inputs, '__len__'):
inputs = tf.add_n(inputs)
if inputs.get_shape().as_list() != targets.get_shape().as_list():
raise Exception("Weighted R2 metric requires Inputs and Targets to "
"have same shape.")
a = tf.reduce_sum(tf.square(predictions - inputs))
b = tf.reduce_sum(tf.square(targets - inputs))
return tf.divide(a, b)
开发者ID:tflearn,项目名称:tflearn,代码行数:34,代码来源:metrics.py
示例15: __init__
def __init__(self, state_values, cumulative_rewards, logits, actions,
action_space, beta):
ma_adv_norm = tf.get_variable(
name="moving_average_of_advantage_norm",
dtype=tf.float32,
initializer=100.0,
trainable=False)
# advantage estimation
adv = cumulative_rewards - state_values
# update averaged advantage norm
update_adv_norm = tf.assign_add(
ref=ma_adv_norm,
value=1e-6 * (tf.reduce_mean(tf.square(adv)) - ma_adv_norm))
# exponentially weighted advantages
with tf.control_dependencies([update_adv_norm]):
exp_advs = tf.exp(
beta * tf.divide(adv, 1e-8 + tf.sqrt(ma_adv_norm)))
# log\pi_\theta(a|s)
dist_cls, _ = ModelCatalog.get_action_dist(action_space, {})
action_dist = dist_cls(logits)
logprobs = action_dist.logp(actions)
self.loss = -1.0 * tf.reduce_mean(
tf.stop_gradient(exp_advs) * logprobs)
开发者ID:robertnishihara,项目名称:ray,代码行数:26,代码来源:marwil_policy_graph.py
示例16: normalize_to_unit_sum
def normalize_to_unit_sum(x, EPS=1e-10):
''' Along the last dim '''
EPS = tf.constant(EPS, dtype=tf.float32)
x = x + EPS
x_sum = tf.reduce_sum(x, -1, keep_dims=True)
x = tf.divide(x, x_sum)
return x
开发者ID:QianQQ,项目名称:Voice-Conversion,代码行数:7,代码来源:layers.py
示例17: running_mean
def running_mean(cost, tag_name, batch_size=1):
with tf.name_scope("running_mean_" + tag_name):
with tf.variable_scope(tag_name):
cost_sum = tf.get_variable(
"cost_sum",
initializer=tf.zeros_initializer,
dtype=tf.float64,
shape=(),
collections=[tf.GraphKeys.LOCAL_VARIABLES],
trainable=False)
batches = tf.get_variable(
"cost_num_batches",
initializer=tf.zeros_initializer,
dtype=tf.int32,
shape=(),
collections=[tf.GraphKeys.LOCAL_VARIABLES],
trainable=False)
cost_add = tf.assign_add(cost_sum, tf.cast(cost, dtype=tf.float64))
batches_add = tf.assign_add(batches, batch_size)
update_cost_mean = tf.group(cost_add, batches_add)
reset_batches = tf.assign(batches, 0)
reset_cost_sum = tf.assign(cost_sum, 0.0)
reset_cost_mean = tf.group(reset_batches, reset_cost_sum)
mean_cost = tf.divide(
cost_sum,
tf.cast(batches, dtype=tf.float64))
train_loss_summary = tf.summary.scalar(tag_name, mean_cost)
return reset_cost_mean, update_cost_mean, train_loss_summary
开发者ID:cupslab,项目名称:neural_network_cracking,代码行数:32,代码来源:pass_utils.py
示例18: compute_categorical_loss_and_accuracy
def compute_categorical_loss_and_accuracy(logits, targets):
"""return total loss, reg loss (subset of total), and accuracy"""
with tf.variable_scope('loss'):
regularization_losses = sum(
tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES
)
)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=targets
),
axis=0,
name='loss'
) + regularization_losses
preds = tf.nn.softmax(logits, name='preds')
correct_preds = tf.equal(
tf.argmax(preds, 1), tf.argmax(targets, 1),
name='correct_preds'
)
accuracy = tf.divide(
tf.reduce_sum(tf.cast(correct_preds, tf.float32)),
tf.cast(tf.shape(targets)[0], tf.float32),
name='accuracy'
)
return loss, regularization_losses, accuracy
开发者ID:TomaszGolan,项目名称:ANNMINERvA,代码行数:26,代码来源:models_tricolumnar.py
示例19: _ProcessSingleScale
def _ProcessSingleScale(scale_index,
boxes,
features,
scales,
scores,
reuse=True):
"""Resize the image and run feature extraction and keypoint selection.
This function will be passed into tf.while_loop() and be called
repeatedly. The input boxes are collected from the previous iteration
[0: scale_index -1]. We get the current scale by
image_scales[scale_index], and run image resizing, feature extraction and
keypoint selection. Then we will get a new set of selected_boxes for
current scale. In the end, we concat the previous boxes with current
selected_boxes as the output.
Args:
scale_index: A valid index in the image_scales.
boxes: Box tensor with the shape of [N, 4].
features: Feature tensor with the shape of [N, depth].
scales: Scale tensor with the shape of [N].
scores: Attention score tensor with the shape of [N].
reuse: Whether or not the layer and its variables should be reused.
Returns:
scale_index: The next scale index for processing.
boxes: Concatenated box tensor with the shape of [K, 4]. K >= N.
features: Concatenated feature tensor with the shape of [K, depth].
scales: Concatenated scale tensor with the shape of [K].
scores: Concatenated attention score tensor with the shape of [K].
"""
scale = tf.gather(image_scales, scale_index)
new_image_size = tf.to_int32(tf.round(original_image_shape_float * scale))
resized_image = tf.image.resize_bilinear(image_tensor, new_image_size)
attention, feature_map = model_fn(
resized_image, normalized_image=True, reuse=reuse)
rf_boxes = CalculateReceptiveBoxes(
tf.shape(feature_map)[1],
tf.shape(feature_map)[2], rf, stride, padding)
# Re-project back to the original image space.
rf_boxes = tf.divide(rf_boxes, scale)
attention = tf.reshape(attention, [-1])
feature_map = tf.reshape(feature_map, [-1, feature_depth])
# Use attention score to select feature vectors.
indices = tf.reshape(tf.where(attention >= abs_thres), [-1])
selected_boxes = tf.gather(rf_boxes, indices)
selected_features = tf.gather(feature_map, indices)
selected_scores = tf.gather(attention, indices)
selected_scales = tf.ones_like(selected_scores, tf.float32) / scale
# Concat with the previous result from different scales.
boxes = tf.concat([boxes, selected_boxes], 0)
features = tf.concat([features, selected_features], 0)
scales = tf.concat([scales, selected_scales], 0)
scores = tf.concat([scores, selected_scores], 0)
return scale_index + 1, boxes, features, scales, scores
开发者ID:812864539,项目名称:models,代码行数:60,代码来源:feature_extractor.py
示例20: InstanceNorm
def InstanceNorm(x, epsilon=1e-5, data_format='NHWC', use_affine=True):
"""
Instance Normalization, as in the paper:
`Instance Normalization: The Missing Ingredient for Fast Stylization
<https://arxiv.org/abs/1607.08022>`_.
Args:
x (tf.Tensor): a 4D tensor.
epsilon (float): avoid divide-by-zero
use_affine (bool): whether to apply learnable affine transformation
"""
shape = x.get_shape().as_list()
assert len(shape) == 4, "Input of InstanceNorm has to be 4D!"
if data_format == 'NHWC':
axis = [1, 2]
ch = shape[3]
new_shape = [1, 1, 1, ch]
else:
axis = [2, 3]
ch = shape[1]
new_shape = [1, ch, 1, 1]
assert ch is not None, "Input of InstanceNorm require known channel!"
mean, var = tf.nn.moments(x, axis, keep_dims=True)
if not use_affine:
return tf.divide(x - mean, tf.sqrt(var + epsilon), name='output')
beta = tf.get_variable('beta', [ch], initializer=tf.constant_initializer())
beta = tf.reshape(beta, new_shape)
gamma = tf.get_variable('gamma', [ch], initializer=tf.constant_initializer(1.0))
gamma = tf.reshape(gamma, new_shape)
return tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon, name='output')
开发者ID:caserzer,项目名称:tensorpack,代码行数:34,代码来源:layer_norm.py
注:本文中的tensorflow.divide函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论