本文整理汇总了Python中tensorflow.ones_like函数 的典型用法代码示例。如果您正苦于以下问题:Python ones_like函数的具体用法?Python ones_like怎么用?Python ones_like使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ones_like函数 的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: grad_fn
def grad_fn(inputs, variables, unused_outputs, unused_grad_outputs):
grad_inputs = [tf.ones_like(t) * (i + 1.) for i, t in enumerate(inputs)]
grad_vars = [
tf.ones_like(t) * (i + len(inputs) + 1.)
for i, t in enumerate(variables)
]
return grad_inputs, grad_vars
开发者ID:qixiuai, 项目名称:tensor2tensor, 代码行数:7, 代码来源:common_layers_test.py
示例2: generalised_dice_loss
def generalised_dice_loss(prediction,
ground_truth,
weight_map=None,
type_weight='Square'):
"""
Function to calculate the Generalised Dice Loss defined in
Sudre, C. et. al. (2017) Generalised Dice overlap as a deep learning
loss function for highly unbalanced segmentations. DLMIA 2017
:param prediction: the logits
:param ground_truth: the segmentation ground truth
:param weight_map:
:param type_weight: type of weighting allowed between labels (choice
between Square (square of inverse of volume),
Simple (inverse of volume) and Uniform (no weighting))
:return: the loss
"""
ground_truth = tf.to_int64(ground_truth)
n_voxels = ground_truth.shape[0].value
n_classes = prediction.shape[1].value
ids = tf.constant(np.arange(n_voxels), dtype=tf.int64)
ids = tf.stack([ids, ground_truth], axis=1)
one_hot = tf.SparseTensor(indices=ids,
values=tf.ones([n_voxels], dtype=tf.float32),
dense_shape=[n_voxels, n_classes])
if weight_map is not None:
weight_map_nclasses = tf.reshape(
tf.tile(weight_map, [n_classes]), prediction.get_shape())
ref_vol = tf.sparse_reduce_sum(
weight_map_nclasses * one_hot, reduction_axes=[0])
intersect = tf.sparse_reduce_sum(
weight_map_nclasses * one_hot * prediction, reduction_axes=[0])
seg_vol = tf.reduce_sum(
tf.multiply(weight_map_nclasses, prediction), 0)
else:
ref_vol = tf.sparse_reduce_sum(one_hot, reduction_axes=[0])
intersect = tf.sparse_reduce_sum(one_hot * prediction,
reduction_axes=[0])
seg_vol = tf.reduce_sum(prediction, 0)
if type_weight == 'Square':
weights = tf.reciprocal(tf.square(ref_vol))
elif type_weight == 'Simple':
weights = tf.reciprocal(ref_vol)
elif type_weight == 'Uniform':
weights = tf.ones_like(ref_vol)
else:
raise ValueError("The variable type_weight \"{}\""
"is not defined.".format(type_weight))
new_weights = tf.where(tf.is_inf(weights), tf.zeros_like(weights), weights)
weights = tf.where(tf.is_inf(weights), tf.ones_like(weights) *
tf.reduce_max(new_weights), weights)
generalised_dice_numerator = \
2 * tf.reduce_sum(tf.multiply(weights, intersect))
generalised_dice_denominator = \
tf.reduce_sum(tf.multiply(weights, seg_vol + ref_vol))
generalised_dice_score = \
generalised_dice_numerator / generalised_dice_denominator
return 1 - generalised_dice_score
开发者ID:nhu2000, 项目名称:NiftyNet, 代码行数:60, 代码来源:loss_segmentation.py
示例3: prune_outside_window
def prune_outside_window(keypoints, window, scope=None):
"""Prunes keypoints that fall outside a given window.
This function replaces keypoints that fall outside the given window with nan.
See also clip_to_window which clips any keypoints that fall outside the given
window.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window outside of which the op should prune the keypoints.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
with tf.name_scope(scope, 'PruneOutsideWindow'):
y, x = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
valid_indices = tf.logical_and(
tf.logical_and(y >= win_y_min, y <= win_y_max),
tf.logical_and(x >= win_x_min, x <= win_x_max))
new_y = tf.where(valid_indices, y, np.nan * tf.ones_like(y))
new_x = tf.where(valid_indices, x, np.nan * tf.ones_like(x))
new_keypoints = tf.concat([new_y, new_x], 2)
return new_keypoints
开发者ID:ALISCIFP, 项目名称:models, 代码行数:29, 代码来源:keypoint_ops.py
示例4: __init__
def __init__(self,
sess,
dataset_name='facades',
checkpoint_dir=None):
self.sess = sess
self.dataset_name = dataset_name
self.checkpoint_dir = checkpoint_dir
self.real_data = tf.placeholder(tf.float32,
[BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3 + 3],
name='input_images')
self.real_A = self.real_data[:, :, :, :3]
self.real_B = self.real_data[:, :, :, 3:6]
self.fake_B = generator(self.real_A, name="generatorA2B")
self.fake_A = generator(self.real_B, name="generatorB2A")
self.fake_B_fake_A = generator(self.fake_B, reuse=True, name="generatorB2A")
self.fake_A_fake_B = generator(self.fake_A, reuse=True, name="generatorA2B")
self.DA_real = discriminator(self.real_A, reuse=False, name="descriminatorA")
self.DB_real = discriminator(self.real_B, reuse=False, name="descriminatorB")
self.DA_fake = discriminator(self.fake_A, reuse=True, name="descriminatorA")
self.DB_fake = discriminator(self.fake_B, reuse=True, name="descriminatorB")
self.g_loss_a2b = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.DB_fake, labels=tf.ones_like(self.DB_fake))) + 100 * tf.reduce_mean(
tf.abs(self.real_A - self.fake_B_fake_A)) + 100 * tf.reduce_mean(
tf.abs(self.real_B - self.fake_B))
self.g_loss_b2a = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.DA_fake, labels=tf.ones_like(self.DA_fake))) + 100 * tf.reduce_mean(
tf.abs(self.real_B - self.fake_A_fake_B)) + 100 * tf.reduce_mean(
tf.abs(self.real_A - self.fake_A))
self.g_loss = self.g_loss_a2b + self.g_loss_b2a
self.d_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.DB_fake, labels=tf.zeros_like(self.DB_fake))) + tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.DB_real, labels=tf.ones_like(self.DB_real))) + tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.DA_fake, labels=tf.zeros_like(self.DA_fake))) + tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.DA_real, labels=tf.ones_like(self.DA_real)))
self.d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)
self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
self.g_loss_a2b_sum = tf.summary.scalar("g_loss_a2b", self.g_loss_a2b)
self.g_loss_b2a_sum = tf.summary.scalar("g_loss_b2a", self.g_loss_b2a)
self.real_A_sum = tf.summary.image("real_A", self.real_A)
self.real_B_sum = tf.summary.image("real_B", self.real_B)
self.fake_A_sum = tf.summary.image("fake_A", self.fake_A)
self.fake_B_sum = tf.summary.image("fake_B", self.fake_B)
self.fake_AB_sum = tf.summary.image("fake_AB", self.fake_A_fake_B)
self.fake_BA_sum = tf.summary.image("fake_BA", self.fake_B_fake_A)
self.d_sum = tf.summary.merge([self.d_loss_sum])
self.g_sum = tf.summary.merge([self.g_loss_sum, self.g_loss_a2b_sum, self.g_loss_b2a_sum,
self.real_A_sum, self.real_B_sum, self.fake_A_sum,
self.fake_B_sum, self.fake_AB_sum, self.fake_BA_sum])
training_vars = tf.trainable_variables()
self.d_vars = [var for var in training_vars if 'd_' in var.name]
self.g_vars = [var for var in training_vars if 'g_' in var.name]
self.saver = tf.train.Saver(max_to_keep=5)
开发者ID:yaoyaowd, 项目名称:tensorflow_demo, 代码行数:60, 代码来源:cycle_model.py
示例5: __init__
def __init__(self,
sess,
batch_size=32,
image_size=256,
lam=0.8,
checkpoint_dir=None):
self.sess = sess
self.batch_size = batch_size
self.image_size = image_size
self.image_shape = [image_size, image_size, 3]
self.lam = lam
self.checkpoint_dir = checkpoint_dir
self.global_step = tf.Variable(0, trainable=False)
self.images = tf.placeholder(tf.float32, [batch_size] + self.image_shape, name='images')
self.images_summary = tf.summary.image("image", self.images)
self.d_bns = [batch_norm(name='d_bn{}'.format(i)) for i in range(5)]
self.local_d_bns = [batch_norm(name='d_local_bn{}'.format(i)) for i in range(4)]
self.g_bns = [batch_norm(name='g_bn{}'.format(i, )) for i in range(15)]
self.D, self.D_logits = self.discriminator(self.images, self.image_size)
self.d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.D_logits, labels=tf.ones_like(self.D)))
self.D_summary = tf.summary.histogram("d", self.D)
self.d_loss_real_summary = tf.summary.scalar("d_loss_real", self.d_loss_real)
self.masks = tf.placeholder(tf.float32, [batch_size] + self.image_shape, name='masks')
self.MG = tf.multiply(self.images, self.masks)
self.G = self.generator(self.MG)
self.MG_summary = tf.summary.image("mg", self.MG)
self.G_summary = tf.summary.image("g", self.G)
self.D_fake, self.D_fake_logits = self.discriminator(self.G, self.image_size, reuse=True)
self.d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.D_fake_logits, labels=tf.zeros_like(self.D_fake)))
self.D_fake_summary = tf.summary.histogram("d_fake", self.D_fake)
self.d_loss_fake_summary = tf.summary.scalar("d_loss_fake", self.d_loss_fake)
self.d_loss = self.d_loss_real + self.d_loss_fake
self.g_loss_d = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.D_fake_logits, labels=tf.ones_like(self.D_fake)))
self.g_loss_l = tf.reduce_mean(tf.contrib.layers.flatten(
tf.multiply(self.G - self.images, self.G - self.images)))
self.g_loss = (1 - self.lam) * self.g_loss_d + self.lam * self.g_loss_l
self.g_loss_d_summary = tf.summary.scalar("g_loss_d", self.g_loss_d)
self.g_loss_l_summary = tf.summary.scalar("g_loss_l", self.g_loss_l)
self.g_loss_summary = tf.summary.scalar("g_loss", self.g_loss)
t_vars = tf.trainable_variables()
self.d_vars = [var for var in t_vars if 'd_' in var.name]
self.g_vars = [var for var in t_vars if 'g_' in var.name]
self.saver = tf.train.Saver(max_to_keep=10)
self.g_summary = tf.summary.merge([
self.G_summary, self.MG_summary, self.D_fake_summary, self.d_loss_fake_summary,
self.g_loss_summary, self.g_loss_d_summary, self.g_loss_l_summary])
self.d_summary = tf.summary.merge([
self.images_summary, self.D_summary, self.d_loss_real_summary])
self.writer = tf.summary.FileWriter(os.path.join(self.checkpoint_dir, "logs"), self.sess.graph)
开发者ID:yaoyaowd, 项目名称:tensorflow_demo, 代码行数:60, 代码来源:model.py
示例6: p_zt
def p_zt(self, prev_state, t):
"""Computes the model p(z_t| z_{t-1})."""
batch_size = tf.shape(prev_state)[0]
if t > 0:
z_mu_p = prev_state + self.bs[t - 1]
p_zt = tf.contrib.distributions.Normal(
loc=z_mu_p, scale=tf.sqrt(tf.ones_like(z_mu_p) * self.variance))
return p_zt
else: # p(z_0) is mixture of two Normals
mu_pos = tf.ones([batch_size, self.state_size], dtype=self.dtype) * self.prior_mode_mean
mu_neg = tf.ones([batch_size, self.state_size], dtype=self.dtype) * -self.prior_mode_mean
z0_pos = tf.contrib.distributions.Normal(
loc=mu_pos,
scale=tf.sqrt(tf.ones_like(mu_pos) * self.variance))
z0_neg = tf.contrib.distributions.Normal(
loc=mu_neg,
scale=tf.sqrt(tf.ones_like(mu_neg) * self.variance))
mode_probs = tf.convert_to_tensor([self.mixing_coeff, 1-self.mixing_coeff], dtype=tf.float64)
mode_probs = tf.tile(mode_probs[tf.newaxis, tf.newaxis, :], [batch_size, 1, 1])
mode_selection_dist = tf.contrib.distributions.Categorical(probs=mode_probs)
z0_dist = tf.contrib.distributions.Mixture(
cat=mode_selection_dist,
components=[z0_pos, z0_neg],
validate_args=False)
return z0_dist
开发者ID:812864539, 项目名称:models, 代码行数:25, 代码来源:models.py
示例7: __init__
def __init__(self, q_values, observations, num_actions, stochastic, eps,
softmax, softmax_temp):
if softmax:
action_dist = Categorical(q_values / softmax_temp)
self.action = action_dist.sample()
self.action_prob = action_dist.sampled_action_prob()
return
deterministic_actions = tf.argmax(q_values, axis=1)
batch_size = tf.shape(observations)[0]
# Special case masked out actions (q_value ~= -inf) so that we don't
# even consider them for exploration.
random_valid_action_logits = tf.where(
tf.equal(q_values, tf.float32.min),
tf.ones_like(q_values) * tf.float32.min, tf.ones_like(q_values))
random_actions = tf.squeeze(
tf.multinomial(random_valid_action_logits, 1), axis=1)
chose_random = tf.random_uniform(
tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions,
deterministic_actions)
self.action = tf.cond(stochastic, lambda: stochastic_actions,
lambda: deterministic_actions)
self.action_prob = None
开发者ID:robertnishihara, 项目名称:ray, 代码行数:26, 代码来源:dqn_policy_graph.py
示例8: build_losses
def build_losses(self, logits_real, logits_fake):
"""D and G play two-player minimax game with value function V(G,D)
min_G max _D V(D, G) = IE_{x ~ p_data} [log D(x)] + IE_{z ~ p_fake} [log (1 - D(G(z)))]
Args:
logits_real (tf.Tensor): discrim logits from real samples
logits_fake (tf.Tensor): discrim logits from fake samples produced by generator
"""
with tf.name_scope("GAN_loss"):
score_real = tf.sigmoid(logits_real)
score_fake = tf.sigmoid(logits_fake)
tf.summary.histogram('score-real', score_real)
tf.summary.histogram('score-fake', score_fake)
with tf.name_scope("discrim"):
d_loss_pos = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=logits_real, labels=tf.ones_like(logits_real)), name='loss_real')
d_loss_neg = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=logits_fake, labels=tf.zeros_like(logits_fake)), name='loss_fake')
d_pos_acc = tf.reduce_mean(tf.cast(score_real > 0.5, tf.float32), name='accuracy_real')
d_neg_acc = tf.reduce_mean(tf.cast(score_fake < 0.5, tf.float32), name='accuracy_fake')
d_accuracy = tf.add(.5 * d_pos_acc, .5 * d_neg_acc, name='accuracy')
self.d_loss = tf.add(.5 * d_loss_pos, .5 * d_loss_neg, name='loss')
with tf.name_scope("gen"):
self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=logits_fake, labels=tf.ones_like(logits_fake)), name='loss')
g_accuracy = tf.reduce_mean(tf.cast(score_fake > 0.5, tf.float32), name='accuracy')
add_moving_summary(self.g_loss, self.d_loss, d_accuracy, g_accuracy)
开发者ID:ahuirecome, 项目名称:tensorpack, 代码行数:33, 代码来源:GAN.py
示例9: _sample_n
def _sample_n(self, n, seed=None):
n_draws = tf.cast(self.total_count, dtype=tf.int32)
k = self.event_shape_tensor()[0]
# broadcast the total_count and logits to same shape
n_draws = tf.ones_like(
self.logits[..., 0], dtype=n_draws.dtype) * n_draws
logits = tf.ones_like(
n_draws[..., tf.newaxis], dtype=self.logits.dtype) * self.logits
# flatten the total_count and logits
flat_logits = tf.reshape(logits, [-1, k]) # [B1B2...Bm, k]
flat_ndraws = n * tf.reshape(n_draws, [-1]) # [B1B2...Bm]
# computes each total_count and logits situation by map_fn
def _sample_single(args):
logits, n_draw = args[0], args[1] # [K], []
x = tf.multinomial(logits[tf.newaxis, ...], n_draw,
seed) # [1, n*n_draw]
x = tf.reshape(x, shape=[n, -1]) # [n, n_draw]
x = tf.reduce_sum(tf.one_hot(x, depth=k), axis=-2) # [n, k]
return x
x = tf.map_fn(
_sample_single, [flat_logits, flat_ndraws],
dtype=self.dtype) # [B1B2...Bm, n, k]
# reshape the results to proper shape
x = tf.transpose(x, perm=[1, 0, 2])
final_shape = tf.concat([[n], self.batch_shape_tensor(), [k]], 0)
x = tf.reshape(x, final_shape) # [n, B1, B2,..., Bm, k]
return x
开发者ID:asudomoeva, 项目名称:probability, 代码行数:32, 代码来源:multinomial.py
示例10: add_dyprune
def add_dyprune(weights):
crate = config.crate[weights.name[:-2]] #hyperpara C rate
prune_mask = tf.Variable(tf.ones_like(weights),name=weights.name[:-2]+'mask', trainable=False)
#calculate mask
mean = tf.divide(tf.reduce_sum(tf.multiply(tf.abs(weights),prune_mask)),tf.reduce_sum(prune_mask))
var = tf.multiply(weights,prune_mask)
var = tf.square(var)
mean_q = tf.square(mean)*tf.reduce_sum(prune_mask)
var = tf.reduce_sum(var) - mean_q
var = tf.divide(var,tf.reduce_sum(prune_mask))
var = tf.sqrt(var)
t1_lower = (mean+var*crate)*0.25 #hyperpara a
t1_upper = (mean+var*crate)*0.45 #hyperpara b
indicator_lower1 = tf.greater_equal(tf.abs(weights), tf.ones_like(weights) * t1_lower)
indicator_upper1 = tf.greater_equal(tf.abs(weights), tf.ones_like(weights) * t1_upper)
indicator_matrix1 = tf.greater_equal(prune_mask, tf.zeros_like(weights))
indicator_matrix1 = tf.logical_and(indicator_matrix1,indicator_lower1)
indicator_matrix1 = tf.logical_or(indicator_matrix1,indicator_upper1)
indicator_matrix1 = tf.to_float(indicator_matrix1)
update = prune_mask.assign(indicator_matrix1)
prune_fc = tf.multiply(weights, prune_mask)
return prune_fc
开发者ID:Ewenwan, 项目名称:Project, 代码行数:25, 代码来源:densenetfinalDNS.py
示例11: create_model
def create_model(self):
"""Create tensorflow variables and graph."""
self.enc_inp = [tf.placeholder(tf.int32, shape=(None,),
name="inp%i" % t)
for t in range(self.bucket[0])]
self.labels = [tf.placeholder(tf.int32, shape=(None,),
name="labels%i" % t)
for t in range(self.bucket[1])]
self.weights = [tf.ones_like(labels_t, dtype=tf.float32)
for labels_t in self.labels]
# Decoder input: prepend some "GO" token and drop the final
# token of the encoder input
self.dec_inp = ([GO_ID * tf.ones_like(self.labels[0], dtype=np.int32,
name="GO")] + self.labels[:-1])
single_cell = tf.nn.rnn_cell.LSTMCell(self.memory_dim)
if self.num_layers > 1:
self.cell = tf.nn.rnn_cell.MultiRNNCell(
[single_cell] * self.num_layers)
else:
self.cell = single_cell
# Sequence to sequence model
self.dec_outputs, self.dec_memory = tf.nn.seq2seq.embedding_rnn_seq2seq(
self.enc_inp, self.dec_inp, self.cell, len(self.en_chars),
len(self.hi_chars), self.embedding_dim)
开发者ID:chsasank, 项目名称:indic-transliteration, 代码行数:29, 代码来源:train_transliterator.py
示例12: build_model
def build_model(self):
if self.y_dim:
self.y= tf.placeholder(tf.float32, [None, self.y_dim], name='y')
self.images = tf.placeholder(tf.float32, [self.batch_size] + self.image_shape,
name='real_images')
self.sample_images= tf.placeholder(tf.float32, [self.sample_size] + self.image_shape,
name='sample_images')
self.z = tf.placeholder(tf.float32, [None, self.z_dim],
name='z')
self.G = self.generator(self.z)
self.D = self.discriminator(self.images)
self.sampler = self.sampler(self.z)
self.D_ = self.discriminator(self.G, reuse=True)
self.d_loss_real = binary_cross_entropy_with_logits(tf.ones_like(self.D), self.D)
self.d_loss_fake = binary_cross_entropy_with_logits(tf.zeros_like(self.D_), self.D_)
self.g_loss = binary_cross_entropy_with_logits(tf.ones_like(self.D_), self.D_)
self.d_loss = self.d_loss_real + self.d_loss_fake
t_vars = tf.trainable_variables()
self.d_vars = [var for var in t_vars if 'd_' in var.name]
self.g_vars = [var for var in t_vars if 'g_' in var.name]
self.saver = tf.train.Saver()
开发者ID:ixtel, 项目名称:DCGAN-tensorflow, 代码行数:29, 代码来源:model.py
示例13: compute_losses
def compute_losses(self, images, wrong_images, fake_images, embeddings):
real_logit = self.model.get_discriminator(images, embeddings)
wrong_logit = self.model.get_discriminator(wrong_images, embeddings)
fake_logit = self.model.get_discriminator(fake_images, embeddings)
real_d_loss =\
tf.nn.sigmoid_cross_entropy_with_logits(real_logit,
tf.ones_like(real_logit))
real_d_loss = tf.reduce_mean(real_d_loss)
wrong_d_loss =\
tf.nn.sigmoid_cross_entropy_with_logits(wrong_logit,
tf.zeros_like(wrong_logit))
wrong_d_loss = tf.reduce_mean(wrong_d_loss)
fake_d_loss =\
tf.nn.sigmoid_cross_entropy_with_logits(fake_logit,
tf.zeros_like(fake_logit))
fake_d_loss = tf.reduce_mean(fake_d_loss)
if cfg.TRAIN.B_WRONG:
discriminator_loss =\
real_d_loss + (wrong_d_loss + fake_d_loss) / 2.
self.log_vars.append(("d_loss_wrong", wrong_d_loss))
else:
discriminator_loss = real_d_loss + fake_d_loss
self.log_vars.append(("d_loss_real", real_d_loss))
self.log_vars.append(("d_loss_fake", fake_d_loss))
generator_loss = \
tf.nn.sigmoid_cross_entropy_with_logits(fake_logit,
tf.ones_like(fake_logit))
generator_loss = tf.reduce_mean(generator_loss)
return discriminator_loss, generator_loss
开发者ID:Soledad89, 项目名称:StackGAN, 代码行数:32, 代码来源:trainer.py
示例14: add_optimization
def add_optimization(learning_rate, beta1, beta2, disc_gen, disc_true,
gen_label, disc_label):
gen_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
disc_gen, tf.ones_like(disc_gen)), name='gen_loss')
disc_g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
disc_gen, tf.zeros_like(disc_gen)))
disc_x_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
disc_true, tf.ones_like(disc_true)))
disc_loss = tf.add(disc_g_loss, disc_x_loss, name='disc_loss')
gen_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope=gen_label)
disc_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
scope=disc_label)
# print 'gen vars---------------------'
# for v in gen_vars:
# print v.name
# print 'disc vars----------------'
# for v in disc_vars:
# print v.name
gen_opt = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=beta1,
beta2=beta2).minimize(gen_loss,
var_list=gen_vars)
disc_opt = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=beta1,
beta2=beta2).minimize(disc_loss,
var_list=disc_vars)
return gen_loss, disc_loss, gen_opt, disc_opt
开发者ID:kkihara, 项目名称:GAN, 代码行数:31, 代码来源:model.py
示例15: build_loss_and_gradients
def build_loss_and_gradients(self, var_list):
x_true = list(six.itervalues(self.data))[0]
x_fake = list(six.iterkeys(self.data))[0]
with tf.variable_scope("Disc"):
d_true = self.discriminator(x_true)
with tf.variable_scope("Disc", reuse=True):
d_fake = self.discriminator(x_fake)
if self.logging:
tf.summary.histogram("discriminator_outputs",
tf.concat([d_true, d_fake], axis=0),
collections=[self._summary_key])
loss_d = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.ones_like(d_true), logits=d_true) + \
tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.zeros_like(d_fake), logits=d_fake)
loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.ones_like(d_fake), logits=d_fake)
loss_d = tf.reduce_mean(loss_d)
loss = tf.reduce_mean(loss)
var_list_d = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope="Disc")
if var_list is None:
var_list = [v for v in tf.trainable_variables() if v not in var_list_d]
grads_d = tf.gradients(loss_d, var_list_d)
grads = tf.gradients(loss, var_list)
grads_and_vars_d = list(zip(grads_d, var_list_d))
grads_and_vars = list(zip(grads, var_list))
return loss, grads_and_vars, loss_d, grads_and_vars_d
开发者ID:wujsAct, 项目名称:edward, 代码行数:33, 代码来源:gan_inference.py
示例16: build_model
def build_model(self):
self.is_training = tf.placeholder(tf.bool, name='is_training')
self.images = tf.placeholder(
tf.float32, [None] + self.image_shape, name='real_images')
self.lowres_images = tf.reduce_mean(tf.reshape(self.images,
[self.batch_size, self.lowres_size, self.lowres,
self.lowres_size, self.lowres, self.c_dim]), [2, 4])
self.z = tf.placeholder(tf.float32, [None, self.z_dim], name='z')
self.z_sum = tf.summary.histogram("z", self.z)
self.G = self.generator(self.z)
self.lowres_G = tf.reduce_mean(tf.reshape(self.G,
[self.batch_size, self.lowres_size, self.lowres,
self.lowres_size, self.lowres, self.c_dim]), [2, 4])
self.D, self.D_logits = self.discriminator(self.images)
self.D_, self.D_logits_ = self.discriminator(self.G, reuse=True)
self.d_sum = tf.summary.histogram("d", self.D)
self.d__sum = tf.summary.histogram("d_", self.D_)
self.G_sum = tf.summary.image("G", self.G)
self.d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits,
labels=tf.ones_like(self.D)))
self.d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,
labels=tf.zeros_like(self.D_)))
self.g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_logits_,
labels=tf.ones_like(self.D_)))
self.d_loss_real_sum = tf.summary.scalar("d_loss_real", self.d_loss_real)
self.d_loss_fake_sum = tf.summary.scalar("d_loss_fake", self.d_loss_fake)
self.d_loss = self.d_loss_real + self.d_loss_fake
self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
self.d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)
t_vars = tf.trainable_variables()
self.d_vars = [var for var in t_vars if 'd_' in var.name]
self.g_vars = [var for var in t_vars if 'g_' in var.name]
self.saver = tf.train.Saver(max_to_keep=1)
# Completion.
self.mask = tf.placeholder(tf.float32, self.image_shape, name='mask')
self.lowres_mask = tf.placeholder(tf.float32, self.lowres_shape, name='lowres_mask')
self.contextual_loss = tf.reduce_sum(
tf.contrib.layers.flatten(
tf.abs(tf.multiply(self.mask, self.G) - tf.multiply(self.mask, self.images))), 1)
self.contextual_loss += tf.reduce_sum(
tf.contrib.layers.flatten(
tf.abs(tf.multiply(self.lowres_mask, self.lowres_G) - tf.multiply(self.lowres_mask, self.lowres_images))), 1)
self.perceptual_loss = self.g_loss
self.complete_loss = self.contextual_loss + self.lam*self.perceptual_loss
self.grad_complete_loss = tf.gradients(self.complete_loss, self.z)
开发者ID:bamos, 项目名称:dcgan-completion.tensorflow, 代码行数:59, 代码来源:model.py
示例17: din_fcn_attention
def din_fcn_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False, forCnn=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN
# outputs.
facts = tf.concat(facts, 2)
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
# Trainable parameters
mask = tf.equal(mask, tf.ones_like(mask))
# D value - hidden size of the RNN layer
facts_size = facts.get_shape().as_list()[-1]
print("facts_size %s" % facts_size)
querry_size = query.get_shape().as_list()[-1]
print("querry_size %s" % querry_size)
#tf.truncated_normal_initializer(dtype=tf.float32, stddev=0.36, seed=3)
query = tf.layers.dense(
query, facts_size, activation=None, kernel_initializer=get_tf_initializer(), name='f1' + stag)
query = prelu(query, scope=stag)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat(
[queries, facts, queries - facts, queries * facts], axis=-1)
d_layer_1_all = tf.layers.dense(
din_all, 80, activation=tf.nn.sigmoid, kernel_initializer=get_tf_initializer(), name='f1_att' + stag)
d_layer_2_all = tf.layers.dense(
d_layer_1_all, 40, activation=tf.nn.sigmoid, kernel_initializer=get_tf_initializer(), name='f2_att' + stag)
d_layer_3_all = tf.layers.dense(
d_layer_2_all, 1, activation=None, kernel_initializer=get_tf_initializer(), name='f3_att' + stag)
d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])
scores = d_layer_3_all
# Mask
# key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T]
key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(scores) * (-2 ** 32 + 1)
if not forCnn:
scores = tf.where(key_masks, scores, paddings) # [B, 1, T]
# Scale
# scores = scores / (facts.get_shape().as_list()[-1] ** 0.5)
# Activation
if softmax_stag:
scores = tf.nn.softmax(scores) # [B, 1, T]
# Weighted sum
if mode == 'SUM':
output = tf.matmul(scores, facts) # [B, 1, H]
# output = tf.reshape(output, [-1, tf.shape(facts)[-1]])
else:
scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])
output = facts * tf.expand_dims(scores, -1)
output = tf.reshape(output, tf.shape(facts))
if return_alphas:
return output, scores
return output
开发者ID:q64545, 项目名称:x-deeplearning, 代码行数:59, 代码来源:utils.py
示例18: build_model
def build_model(self):
# some parameters
image_dims = [self.input_height, self.input_width, self.c_dim]
bs = self.batch_size
""" Graph Input """
# images
self.inputs = tf.placeholder(tf.float32, [bs] + image_dims, name='real_images')
# noises
self.z = tf.placeholder(tf.float32, [bs, self.z_dim], name='z')
""" Loss Function """
# output of D for real images
D_real, D_real_logits, _ = self.discriminator(self.inputs, is_training=True, reuse=False)
# output of D for fake images
G = self.generator(self.z, is_training=True, reuse=False)
D_fake, D_fake_logits, _ = self.discriminator(G, is_training=True, reuse=True)
# get loss for discriminator
d_loss_real = tf.reduce_mean(self.mse_loss(D_real_logits, tf.ones_like(D_real_logits)))
d_loss_fake = tf.reduce_mean(self.mse_loss(D_fake_logits, tf.zeros_like(D_fake_logits)))
self.d_loss = 0.5*(d_loss_real + d_loss_fake)
# get loss for generator
self.g_loss = tf.reduce_mean(self.mse_loss(D_fake_logits, tf.ones_like(D_fake_logits)))
""" Training """
# divide trainable variables into a group for D and a group for G
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if 'd_' in var.name]
g_vars = [var for var in t_vars if 'g_' in var.name]
# optimizers
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
self.d_optim = tf.train.AdamOptimizer(self.learning_rate, beta1=self.beta1) \
.minimize(self.d_loss, var_list=d_vars)
self.g_optim = tf.train.AdamOptimizer(self.learning_rate*5, beta1=self.beta1) \
.minimize(self.g_loss, var_list=g_vars)
# weight clipping
self.clip_D = [p.assign(tf.clip_by_value(p, -0.01, 0.01)) for p in d_vars]
"""" Testing """
# for test
self.fake_images = self.generator(self.z, is_training=False, reuse=True)
""" Summary """
d_loss_real_sum = tf.summary.scalar("d_loss_real", d_loss_real)
d_loss_fake_sum = tf.summary.scalar("d_loss_fake", d_loss_fake)
d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)
g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
# final summary operations
self.g_sum = tf.summary.merge([d_loss_fake_sum, g_loss_sum])
self.d_sum = tf.summary.merge([d_loss_real_sum, d_loss_sum])
开发者ID:RaceSu, 项目名称:tensorflow-generative-model-collections, 代码行数:59, 代码来源:LSGAN.py
示例19: __loss__
def __loss__(self):
"""
Calculate loss
:return:
"""
# regularization ?
# self.d_loss_real = tf.reduce_mean(
# tf.nn.sigmoid_cross_entropy_with_logits(logits=self.predict_d_logits,
# labels=tf.ones_like(self.predict_d)))
self.d_loss_real = tf.reduce_mean(
ops.binary_cross_entropy(preds=self.predict_d, targets=tf.ones_like(self.predict_d)))
tf.summary.scalar('d_loss_real', self.d_loss_real, collections='D')
# self.d_loss_fake = tf.reduce_mean(
# tf.nn.sigmoid_cross_entropy_with_logits(logits=self.predict_d_logits_for_g,
# labels=tf.zeros_like(self.predict_d_for_g)))
self.d_loss_fake = tf.reduce_mean(
ops.binary_cross_entropy(preds=self.predict_d_for_g, targets=tf.zeros_like(self.predict_d_for_g)))
tf.summary.scalar('d_loss_fake', self.d_loss_fake, collections='D')
self.d_loss = self.d_loss_real + self.d_loss_fake
tf.summary.scalar('d_loss', self.d_loss, collections='D')
if len(self.regularization_values_d) > 0:
reg_loss_d = self.reg_w * tf.reduce_sum(self.regularization_values_d)
self.d_loss += reg_loss_d
tf.summary.scalar('d_loss_plus_reg', self.d_loss, collections='D')
tf.summary.scalar('d_loss_reg_only', reg_loss_d, collections='D')
# Generative loss
# g_loss = tf.reduce_mean(
# tf.nn.sigmoid_cross_entropy_with_logits(logits=self.predict_d_logits_for_g,
# labels=tf.ones_like(self.predict_d_for_g)))
g_loss = tf.reduce_mean(
ops.binary_cross_entropy(preds=self.predict_d_for_g, targets=tf.ones_like(self.predict_d_for_g)))
tf.summary.scalar('g_loss', g_loss, collections='G')
context_loss = tf.reduce_mean(tf.square(tf.squeeze(self.predict_g) - self.labels), name='L2-Loss')
tf.summary.scalar('g_loss_context_only', context_loss, collections='G')
# print("from inside %f" % self.FLAGS.gen_loss_adversarial)
# self.g_loss = self.FLAGS.gen_loss_adversarial * g_loss + self.FLAGS.gen_loss_context * context_loss
self.g_loss = self.adb_loss_w * g_loss + self.FLAGS.gen_loss_context * context_loss
tf.summary.scalar('g_loss_plus_context', self.g_loss, collections='G')
if len(self.regularization_values) > 0:
reg_loss_g = self.reg_w * tf.reduce_sum(self.regularization_values)
self.g_loss += reg_loss_g
tf.summary.scalar('g_loss_plus_context_plus_reg', self.g_loss, collections='G')
tf.summary.scalar('g_loss_reg_only', reg_loss_g, collections='D')
tf.summary.scalar('diff-loss', tf.abs(self.d_loss - self.g_loss), collections='G')
开发者ID:shohad25, 项目名称:thesis, 代码行数:59, 代码来源:k_space_gan_fft_resnet.py
Python入门教程 Python 是一种解释型、面向对象、动态数据类型的高级程序设计语言。 P
阅读:13940| 2022-01-22
Python wikiutil.getFrontPage函数代码示例
阅读:10292| 2022-05-24
Python 简介 Python 是一个高层次的结合了解释性、编译性、互动性和面向对象的脚本
阅读:4175| 2022-01-22
Python tests.group函数代码示例
阅读:4064| 2022-05-27
Python util.check_if_user_has_permission函数代码示例
阅读:3889| 2022-05-27
Python 练习实例98 Python 100例题目:从键盘输入一个字符串,将小写字母全部转换成大
阅读:3539| 2022-01-22
Python 环境搭建 本章节我们将向大家介绍如何在本地搭建 Python 开发环境。 Py
阅读:3069| 2022-01-22
Python 基础语法 Python 语言与 Perl,C 和 Java 等语言有许多相似之处。但是,也
阅读:2728| 2022-01-22
Python output.darkgreen函数代码示例
阅读:2682| 2022-05-25
Python 中文编码前面章节中我们已经学会了如何用 Python 输出 Hello, World!,英文没
阅读:2348| 2022-01-22
请发表评论