本文整理汇总了Python中tensorflow.zeros_like函数的典型用法代码示例。如果您正苦于以下问题:Python zeros_like函数的具体用法?Python zeros_like怎么用?Python zeros_like使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了zeros_like函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _extend_support_with_default_value
def _extend_support_with_default_value(self, x, f, default_value):
"""Returns `f(x)` if x is in the support, and `default_value` otherwise.
Given `f` which is defined on the support of this distribution
(`x >= loc`), extend the function definition to the real line
by defining `f(x) = default_value` for `x < loc`.
Args:
x: Floating-point `Tensor` to evaluate `f` at.
f: Callable that takes in a `Tensor` and returns a `Tensor`. This
represents the function whose domain of definition we want to extend.
default_value: Python or numpy literal representing the value to use for
extending the domain.
Returns:
`Tensor` representing an extension of `f(x)`.
"""
with tf.name_scope(name="extend_support_with_default_value", values=[x]):
x = tf.convert_to_tensor(x, dtype=self.dtype, name="x")
loc = self.loc + tf.zeros_like(self.scale) + tf.zeros_like(x)
x = x + tf.zeros_like(loc)
# Substitute out-of-support values in x with values that are in the
# support of the distribution before applying f.
y = f(tf.where(x < loc, self._inv_z(0.5), x))
if default_value == 0.:
default_value = tf.zeros_like(y)
elif default_value == 1.:
default_value = tf.ones_like(y)
else:
default_value = tf.fill(
dims=tf.shape(y),
value=np.array(default_value, dtype=self.dtype.as_numpy_dtype))
return tf.where(x < loc, default_value, y)
开发者ID:asudomoeva,项目名称:probability,代码行数:32,代码来源:half_cauchy.py
示例2: create_discriminator
def create_discriminator(self, _input, reuse=False):
config = self.config
gan = self.gan
print("___", _input, self.g0, self.x0, self.c0)
_fs = tf.concat([tf.zeros_like(self.c0),tf.zeros_like(self.c0)],axis=0)
disc = self.create_component(config.discriminator, name='discriminator', input=_input, features=[_fs], reuse=reuse)
return disc
开发者ID:255BITS,项目名称:hyperchamber-gan,代码行数:7,代码来源:next-frame.py
示例3: apply_gradients
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
var_list = [ v for _,v in grads_and_vars]
d_vars = []
g_vars = []
all_grads = [ g for g, _ in grads_and_vars ]
for grad,var in grads_and_vars:
if var in self.gan.d_vars():
d_vars += [var]
elif var in self.gan.g_vars():
g_vars += [var]
else:
raise("Couldn't find var in g_vars or d_vars")
with ops.init_scope():
self.optimizer._create_slots([v for g,v in grads_and_vars])
self._prepare()
d_grads = all_grads[:len(d_vars)]
if self.config.type == 'sga':
Jgrads = tf.gradients(d_grads, d_vars, grad_ys=d_grads, stop_gradients=d_vars) + [tf.zeros_like(g) for g in g_vars]
elif self.config.type == 'magnitude':
consensus_reg = [tf.square(g) for g in d_grads if g is not None]
Jgrads = tf.gradients(consensus_reg, d_vars) + [tf.zeros_like(g) for g in g_vars]
else:
consensus_reg = 0.5 * sum(
tf.reduce_sum(tf.square(g)) for g in d_grads if g is not None
)
Jgrads = tf.gradients(consensus_reg, d_vars, stop_gradients=d_vars) + [tf.zeros_like(g) for g in g_vars]
new_grads = [g+jg*self._beta if jg is not None else g for g,v,jg in zip(all_grads, var_list, Jgrads)]
new_grads_and_vars = list(zip(new_grads, var_list)).copy()
return self.optimizer.apply_gradients(new_grads_and_vars, global_step=global_step, name=name)
开发者ID:255BITS,项目名称:hyperchamber-gan,代码行数:31,代码来源:consensus_optimizer.py
示例4: _loss_x_entropy
def _loss_x_entropy(self, x, z, noise=None):
with tf.name_scope("xentropy_loss"):
z_clipped = tf.clip_by_value(z, FLAGS.zero_bound, FLAGS.one_bound)
z_minus_1_clipped = tf.clip_by_value((1.0 - z), FLAGS.zero_bound, FLAGS.one_bound)
x_clipped = tf.clip_by_value(x, FLAGS.zero_bound, FLAGS.one_bound)
x_minus_1_clipped = tf.clip_by_value((1.0 - x), FLAGS.zero_bound, FLAGS.one_bound)
# cross_entropy = x * log(z) + (1 - x) * log(1 - z)
cross_entropy = tf.add(tf.mul(tf.log(z_clipped), x_clipped),
tf.mul(tf.log(z_minus_1_clipped), x_minus_1_clipped), name='X-Entr')
if noise:
with tf.name_scope("Given_Emphasis"):
a, b = self._get_emph_params
corrupted = tf.select(noise, cross_entropy, tf.zeros_like(cross_entropy), name='Corrupted_Emphasis')
# OR -- tf.select(tf.logical_not(noisy_points), cross_entropy, tf.zeros_like(cross_entropy), name='Uncorrupted_Emphasis')
uncorrupted = tf.select(noise, tf.zeros_like(cross_entropy), cross_entropy, name='Uncorrupted_Emphasis')
loss = a * (-1 * tf.reduce_sum(corrupted, 1)) + b * (-1 * tf.reduce_sum(uncorrupted, 1))
else:
# Sum the cost for each example
loss = -1 * tf.reduce_sum(cross_entropy, 1)
# Reduce mean to find the overall cost of the loss
cross_entropy_mean = tf.reduce_mean(loss, name='xentropy_mean')
return cross_entropy_mean
开发者ID:hussius,项目名称:StackedDAE,代码行数:29,代码来源:dae.py
示例5: __init__
def __init__(self,
sess,
dataset_name='facades',
checkpoint_dir=None):
self.sess = sess
self.dataset_name = dataset_name
self.checkpoint_dir = checkpoint_dir
self.real_data = tf.placeholder(tf.float32,
[BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3 + 3],
name='input_images')
self.real_A = self.real_data[:, :, :, :3]
self.real_B = self.real_data[:, :, :, 3:6]
self.fake_B = generator(self.real_A, name="generatorA2B")
self.fake_A = generator(self.real_B, name="generatorB2A")
self.fake_B_fake_A = generator(self.fake_B, reuse=True, name="generatorB2A")
self.fake_A_fake_B = generator(self.fake_A, reuse=True, name="generatorA2B")
self.DA_real = discriminator(self.real_A, reuse=False, name="descriminatorA")
self.DB_real = discriminator(self.real_B, reuse=False, name="descriminatorB")
self.DA_fake = discriminator(self.fake_A, reuse=True, name="descriminatorA")
self.DB_fake = discriminator(self.fake_B, reuse=True, name="descriminatorB")
self.g_loss_a2b = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.DB_fake, labels=tf.ones_like(self.DB_fake))) + 100 * tf.reduce_mean(
tf.abs(self.real_A - self.fake_B_fake_A)) + 100 * tf.reduce_mean(
tf.abs(self.real_B - self.fake_B))
self.g_loss_b2a = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.DA_fake, labels=tf.ones_like(self.DA_fake))) + 100 * tf.reduce_mean(
tf.abs(self.real_B - self.fake_A_fake_B)) + 100 * tf.reduce_mean(
tf.abs(self.real_A - self.fake_A))
self.g_loss = self.g_loss_a2b + self.g_loss_b2a
self.d_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.DB_fake, labels=tf.zeros_like(self.DB_fake))) + tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.DB_real, labels=tf.ones_like(self.DB_real))) + tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.DA_fake, labels=tf.zeros_like(self.DA_fake))) + tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=self.DA_real, labels=tf.ones_like(self.DA_real)))
self.d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)
self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
self.g_loss_a2b_sum = tf.summary.scalar("g_loss_a2b", self.g_loss_a2b)
self.g_loss_b2a_sum = tf.summary.scalar("g_loss_b2a", self.g_loss_b2a)
self.real_A_sum = tf.summary.image("real_A", self.real_A)
self.real_B_sum = tf.summary.image("real_B", self.real_B)
self.fake_A_sum = tf.summary.image("fake_A", self.fake_A)
self.fake_B_sum = tf.summary.image("fake_B", self.fake_B)
self.fake_AB_sum = tf.summary.image("fake_AB", self.fake_A_fake_B)
self.fake_BA_sum = tf.summary.image("fake_BA", self.fake_B_fake_A)
self.d_sum = tf.summary.merge([self.d_loss_sum])
self.g_sum = tf.summary.merge([self.g_loss_sum, self.g_loss_a2b_sum, self.g_loss_b2a_sum,
self.real_A_sum, self.real_B_sum, self.fake_A_sum,
self.fake_B_sum, self.fake_AB_sum, self.fake_BA_sum])
training_vars = tf.trainable_variables()
self.d_vars = [var for var in training_vars if 'd_' in var.name]
self.g_vars = [var for var in training_vars if 'g_' in var.name]
self.saver = tf.train.Saver(max_to_keep=5)
开发者ID:yaoyaowd,项目名称:tensorflow_demo,代码行数:60,代码来源:cycle_model.py
示例6: loss
def loss(self, x, y):
'''
Args:
x: shape=[s, b, c]
y: shape=[s, b]
Returns:
a `dict` of losses
'''
z_mu, z_lv = self._encode(x, is_training=self.is_training)
z = GaussianSampleLayer(z_mu, z_lv)
xh = self._decode(z, y, is_training=self.is_training)
with tf.name_scope('loss'):
with tf.name_scope('E_log_p_x_zy'):
L_x = -1.0 * tf.reduce_mean(
GaussianLogDensity(x, xh, tf.zeros_like(x)),
)
with tf.name_scope('D_KL_z'):
L_z = tf.reduce_mean(
GaussianKLD(
z_mu, z_lv,
tf.zeros_like(z_mu), tf.zeros_like(z_lv)
)
)
loss = {
'L_x': L_x,
'L_z': L_z,
}
tf.summary.scalar('L_x', L_x)
tf.summary.scalar('L_z', L_z)
return loss
开发者ID:QianQQ,项目名称:Voice-Conversion,代码行数:32,代码来源:models.py
示例7: default_exchange_proposed_fn_
def default_exchange_proposed_fn_(num_replica, seed=None):
"""Default function for `exchange_proposed_fn` of `kernel`."""
num_replica = tf.to_int32(num_replica)
seed = distributions_util.gen_new_seed(seed, 'default_exchange_proposed_fn')
random_uniform = tf.random_uniform([], seed=seed)
accept_proposed_exchange = random_uniform < probs
seed = distributions_util.gen_new_seed(seed, 'default_exchange_proposed_fn')
zero_start = tf.random_uniform([], seed=seed) > 0.5
if num_replica % 2 == 0:
exchange_proposed = tf.where(
zero_start, tf.range(num_replica),
tf.sparse_to_dense(tf.range(num_replica - 2), (num_replica,),
tf.range(1, num_replica - 1)))
exchange_proposed_n = tf.where(zero_start, num_replica // 2,
num_replica // 2 - 1)
else:
exchange_proposed = tf.where(
zero_start, tf.range(num_replica - 1), tf.range(1, num_replica))
exchange_proposed_n = num_replica // 2
exchange_proposed = tf.reshape(exchange_proposed, (num_replica // 2, 2))
exchange_proposed = tf.where(accept_proposed_exchange, exchange_proposed,
tf.zeros_like(exchange_proposed))
exchange_proposed_n = tf.where(accept_proposed_exchange,
exchange_proposed_n,
tf.zeros_like(exchange_proposed_n))
return exchange_proposed, exchange_proposed_n
开发者ID:lewisKit,项目名称:probability,代码行数:29,代码来源:replica_exchange_mc.py
示例8: evaluate_precision_recall
def evaluate_precision_recall(
input_layer, labels, threshold=0.5, per_example_weights=None, name=PROVIDED, phase=Phase.train
):
"""Computes the precision and recall of the prediction vs the labels.
Args:
input_layer: A Pretty Tensor object.
labels: The target labels to learn as a float tensor.
threshold: The threshold to use to decide if the prediction is true.
per_example_weights: A Tensor with a weight per example.
name: An optional name.
phase: The phase of this model; non training phases compute a total across
all examples.
Returns:
Precision and Recall.
"""
_ = name # Eliminate warning, name used for namescoping by PT.
selected, sum_retrieved, sum_relevant = _compute_precision_recall(
input_layer, labels, threshold, per_example_weights
)
if phase != Phase.train:
dtype = tf.float32
# Create the variables in all cases so that the load logic is easier.
relevant_count = tf.get_variable(
"relevant_count",
[],
dtype,
tf.zeros_initializer,
collections=[bookkeeper.GraphKeys.TEST_VARIABLES],
trainable=False,
)
retrieved_count = tf.get_variable(
"retrieved_count",
[],
dtype,
tf.zeros_initializer,
collections=[bookkeeper.GraphKeys.TEST_VARIABLES],
trainable=False,
)
selected_count = tf.get_variable(
"selected_count",
[],
dtype,
tf.zeros_initializer,
collections=[bookkeeper.GraphKeys.TEST_VARIABLES],
trainable=False,
)
with input_layer.g.device(selected_count.device):
selected = tf.assign_add(selected_count, selected)
with input_layer.g.device(retrieved_count.device):
sum_retrieved = tf.assign_add(retrieved_count, sum_retrieved)
with input_layer.g.device(relevant_count.device):
sum_relevant = tf.assign_add(relevant_count, sum_relevant)
return (
tf.select(tf.equal(sum_retrieved, 0), tf.zeros_like(selected), selected / sum_retrieved),
tf.select(tf.equal(sum_relevant, 0), tf.zeros_like(selected), selected / sum_relevant),
)
开发者ID:yaowenwu,项目名称:prettytensor,代码行数:60,代码来源:pretty_tensor_loss_methods.py
示例9: __call__
def __call__(self, next_state, observation, t):
# next state = z_{t+1}
# Compute the q distribution over z, q(z_{t}|z_n, z_{t+1}).
q_zt = self.q_zt(observation, next_state, t)
# sample from q
zt = q_zt.sample()
# Compute the p distribution over z, p(z_{t+1}|z_{t}).
p_zt = self.p_zt(zt, t)
# Compute log p(z_{t+1} | z_t)
if t == 0:
log_p_zt = p_zt.log_prob(observation)
else:
log_p_zt = p_zt.log_prob(next_state)
# Compute r prior over zt
r_zt = self.r(zt, t)
log_r_zt = r_zt.log_prob(zt)
# Compute proposal density at zt
log_q_zt = q_zt.log_prob(zt)
# If we're at the last timestep, also calc the logprob of the observation.
if t == self.num_timesteps - 1:
p_z0_dist = tf.contrib.distributions.Normal(
loc=tf.zeros_like(zt), scale=tf.ones_like(zt))
z0_log_prob = p_z0_dist.log_prob(zt)
else:
z0_log_prob = tf.zeros_like(log_q_zt)
return (zt, log_q_zt, log_p_zt, z0_log_prob, log_r_zt)
开发者ID:812864539,项目名称:models,代码行数:28,代码来源:models.py
示例10: UpdateProbs
def UpdateProbs(self, inp):
"""Update probabilities of each particle based on 2D matrix inp which is a 2D perspectiuve projection of the scene"""
projection, onscreen = self.project()
filtered_projection = tf.to_int64(tf.select(onscreen, projection, tf.zeros_like(projection)))
per_state_probabilities = tf.gather_nd(inp, filtered_projection)
filtered_probabilities = tf.select(onscreen, per_state_probabilities, tf.zeros_like(per_state_probabilities))
new_state_indicies = tf.squeeze(tf.multinomial(tf.expand_dims(tf.log(filtered_probabilities),0), self.particles/10*9))
new_state = tf.gather(self.state, new_state_indicies)
# Add momentum
new_state = tf.concat(1, [new_state[:, 0:3] + new_state[:, 3:6], new_state[:, 3:10]])
# Add in particles for the "just come onscreen" case.
new_state = tf.concat(0, [new_state, tf.random_normal([self.particles/10, 10]) * self.initial_std + self.initial_bias])
new_state = new_state + tf.random_normal([self.particles, 10]) * self.update_std
# Todo: permute state by adding noise.
return self.state.assign(new_state)
开发者ID:Hello1024,项目名称:quadcopter,代码行数:25,代码来源:particle.py
示例11: _survival_function
def _survival_function(self, y):
low = self._low
high = self._high
# Recall the promise:
# survival_function(y) := P[Y > y]
# = 0, if y >= high,
# = 1, if y < low,
# = P[X > y], otherwise.
# P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
# between.
j = tf.ceil(y)
# P[X > j], used when low < X < high.
result_so_far = self.distribution.survival_function(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += tf.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if low is not None:
result_so_far = tf.where(j < low, tf.ones_like(result_so_far),
result_so_far)
if high is not None:
result_so_far = tf.where(j >= high, tf.zeros_like(result_so_far),
result_so_far)
return result_so_far
开发者ID:lewisKit,项目名称:probability,代码行数:30,代码来源:quantized_distribution.py
示例12: compute_first_or_last
def compute_first_or_last(self, select, first=True):
#perform first ot last operation on row select with probabilistic row selection
answer = tf.zeros_like(select)
running_sum = tf.zeros([self.batch_size, 1], self.data_type)
for i in range(self.max_elements):
if (first):
current = tf.slice(select, [0, i], [self.batch_size, 1])
else:
current = tf.slice(select, [0, self.max_elements - 1 - i],
[self.batch_size, 1])
curr_prob = current * (1 - running_sum)
curr_prob = curr_prob * tf.cast(curr_prob >= 0.0, self.data_type)
running_sum += curr_prob
temp_ans = []
curr_prob = tf.expand_dims(tf.reshape(curr_prob, [self.batch_size]), 0)
for i_ans in range(self.max_elements):
if (not (first) and i_ans == self.max_elements - 1 - i):
temp_ans.append(curr_prob)
elif (first and i_ans == i):
temp_ans.append(curr_prob)
else:
temp_ans.append(tf.zeros_like(curr_prob))
temp_ans = tf.transpose(tf.concat(axis=0, values=temp_ans))
answer += temp_ans
return answer
开发者ID:Hukongtao,项目名称:models,代码行数:25,代码来源:model.py
示例13: _log_cdf
def _log_cdf(self, y):
low = self._low
high = self._high
# Recall the promise:
# cdf(y) := P[Y <= y]
# = 1, if y >= high,
# = 0, if y < low,
# = P[X <= y], otherwise.
# P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
# between.
j = tf.floor(y)
result_so_far = self.distribution.log_cdf(j)
# Broadcast, because it's possible that this is a single distribution being
# evaluated on a number of samples, or something like that.
j += tf.zeros_like(result_so_far)
# Re-define values at the cutoffs.
if low is not None:
neg_inf = -np.inf * tf.ones_like(result_so_far)
result_so_far = tf.where(j < low, neg_inf, result_so_far)
if high is not None:
result_so_far = tf.where(j >= high, tf.zeros_like(result_so_far),
result_so_far)
return result_so_far
开发者ID:lewisKit,项目名称:probability,代码行数:29,代码来源:quantized_distribution.py
示例14: Loop
def Loop(cell, w, i):
x = tf.unpack(i, self.NUM_UNROLL)
m = tf.zeros_like(x[0])
c = tf.zeros_like(x[0])
for i in range(self.NUM_UNROLL):
m, c = cell(x[i], m, c, w)
return m
开发者ID:13331151,项目名称:tensorflow,代码行数:7,代码来源:function_test.py
示例15: __init__
def __init__(self, gan=None, config=None, trainer=None, name="ProgressCompressTrainHook"):
super().__init__(config=config, gan=gan, trainer=trainer, name=name)
d_loss = []
self.x = tf.Variable(tf.zeros_like(gan.inputs.x))
self.g = tf.Variable(tf.zeros_like(gan.generator.sample))
stacked = tf.concat([self.gan.inputs.x, self.gan.generator.sample], axis=0)
self.assign_x = tf.assign(self.x, gan.inputs.x)
self.assign_g = tf.assign(self.g, gan.generator.sample)
self.re_init_d = [d.initializer for d in gan.discriminator.variables()]
gan.hack = self.g
self.assign_knowledge_base = []
bs = gan.batch_size()
real = gan.discriminator.named_layers['knowledge_base_target']#tf.reshape(gan.loss.sample[:2], [2,-1])
_inputs = hc.Config({'x':real})
inner_gan = KBGAN(config=self.config.knowledge_base, inputs=_inputs, x=real, latent=stacked)
self.kb_loss = inner_gan.loss
self.kb = inner_gan.generator
self.trainer = inner_gan.trainer
variables = inner_gan.variables()
#variables += self.kb.variables()
for c in gan.components:
if hasattr(c, 'knowledge_base'):
for name, net in c.knowledge_base:
assign = self.kb.named_layers[name]
if self.ops.shape(assign)[0] > self.ops.shape(net)[0]:
assign = tf.slice(assign,[0 for i in self.ops.shape(net)] , [self.ops.shape(net)[0]]+self.ops.shape(assign)[1:])
self.assign_knowledge_base.append(tf.assign(net, assign))
self.gan.add_metric('d_kb', self.kb_loss.sample[0])
self.gan.add_metric('g_kb', self.kb_loss.sample[1])
开发者ID:255BITS,项目名称:hyperchamber-gan,代码行数:35,代码来源:progress_compress_kbgan_train_hook.py
示例16: compute_losses
def compute_losses(self, images, wrong_images, fake_images, embeddings):
real_logit = self.model.get_discriminator(images, embeddings)
wrong_logit = self.model.get_discriminator(wrong_images, embeddings)
fake_logit = self.model.get_discriminator(fake_images, embeddings)
real_d_loss =\
tf.nn.sigmoid_cross_entropy_with_logits(real_logit,
tf.ones_like(real_logit))
real_d_loss = tf.reduce_mean(real_d_loss)
wrong_d_loss =\
tf.nn.sigmoid_cross_entropy_with_logits(wrong_logit,
tf.zeros_like(wrong_logit))
wrong_d_loss = tf.reduce_mean(wrong_d_loss)
fake_d_loss =\
tf.nn.sigmoid_cross_entropy_with_logits(fake_logit,
tf.zeros_like(fake_logit))
fake_d_loss = tf.reduce_mean(fake_d_loss)
if cfg.TRAIN.B_WRONG:
discriminator_loss =\
real_d_loss + (wrong_d_loss + fake_d_loss) / 2.
self.log_vars.append(("d_loss_wrong", wrong_d_loss))
else:
discriminator_loss = real_d_loss + fake_d_loss
self.log_vars.append(("d_loss_real", real_d_loss))
self.log_vars.append(("d_loss_fake", fake_d_loss))
generator_loss = \
tf.nn.sigmoid_cross_entropy_with_logits(fake_logit,
tf.ones_like(fake_logit))
generator_loss = tf.reduce_mean(generator_loss)
return discriminator_loss, generator_loss
开发者ID:Soledad89,项目名称:StackGAN,代码行数:32,代码来源:trainer.py
示例17: loss
def loss(self, x, y):
with tf.name_scope('loss'):
z_mu, z_lv = self._encode(x)
z = GaussianSampleLayer(z_mu, z_lv)
xh = self._generate(z, y)
D_KL = tf.reduce_mean(
GaussianKLD(
slim.flatten(z_mu),
slim.flatten(z_lv),
slim.flatten(tf.zeros_like(z_mu)),
slim.flatten(tf.zeros_like(z_lv)),
)
)
logPx = tf.reduce_mean(
GaussianLogDensity(
slim.flatten(x),
slim.flatten(xh),
tf.zeros_like(slim.flatten(xh))),
)
loss = dict()
loss['G'] = - logPx + D_KL
loss['D_KL'] = D_KL
loss['logP'] = logPx
tf.summary.scalar('KL-div', D_KL)
tf.summary.scalar('logPx', logPx)
tf.summary.histogram('xh', xh)
tf.summary.histogram('x', x)
return loss
开发者ID:QianQQ,项目名称:Voice-Conversion,代码行数:32,代码来源:vae.py
示例18: cut
def cut(self, hits, start, end):
"""
Cuts [start:end] diapason from input data
:param hits: hits timeseries
:param start: start index
:param end: end index
:return: tuple (train_hits, test_hits, dow, lagged_hits)
"""
# Pad hits to ensure we have enough array length for prediction
hits = tf.concat([hits, tf.fill([self.predict_window], np.NaN)], axis=0)
cropped_hit = hits[start:end]
# cut day of week
cropped_dow = self.inp.dow[start:end]
# Cut lagged hits
# gather() accepts only int32 indexes
cropped_lags = tf.cast(self.inp.lagged_ix[start:end], tf.int32)
# Mask for -1 (no data) lag indexes
lag_mask = cropped_lags < 0
# Convert -1 to 0 for gather(), it don't accept anything exotic
cropped_lags = tf.maximum(cropped_lags, 0)
# Translate lag indexes to hit values
lagged_hit = tf.gather(hits, cropped_lags)
# Convert masked (see above) or NaN lagged hits to zeros
lag_zeros = tf.zeros_like(lagged_hit)
lagged_hit = tf.where(lag_mask | tf.is_nan(lagged_hit), lag_zeros, lagged_hit)
# Split for train and test
x_hits, y_hits = tf.split(cropped_hit, [self.train_window, self.predict_window], axis=0)
# Convert NaN to zero in for train data
x_hits = tf.where(tf.is_nan(x_hits), tf.zeros_like(x_hits), x_hits)
return x_hits, y_hits, cropped_dow, lagged_hit
开发者ID:JXieHao,项目名称:kaggle-web-traffic,代码行数:34,代码来源:input_pipe.py
示例19: classification_costs
def classification_costs(logits, labels, name=None):
"""Compute classification cost mean and classification cost per sample
Assume unlabeled examples have label == -1. For unlabeled examples, cost == 0.
Compute the mean over all examples.
Note that unlabeled examples are treated differently in error calculation.
"""
with tf.name_scope(name, "classification_costs") as scope:
applicable = tf.not_equal(labels, -1)
# Change -1s to zeros to make cross-entropy computable
labels = tf.where(applicable, labels, tf.zeros_like(labels))
# This will now have incorrect values for unlabeled examples
per_sample = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)
# Retain costs only for labeled
per_sample = tf.where(applicable, per_sample, tf.zeros_like(per_sample))
# Take mean over all examples, not just labeled examples.
labeled_sum = tf.reduce_sum(per_sample)
total_count = tf.to_float(tf.shape(per_sample)[0])
mean = tf.div(labeled_sum, total_count, name=scope)
return mean, per_sample
开发者ID:ys2899,项目名称:mean-teacher,代码行数:25,代码来源:model.py
示例20: _extend_support
def _extend_support(self, x, f, alt):
"""Returns `f(x)` if x is in the support, and `alt` otherwise.
Given `f` which is defined on the support of this distribution
(e.g. x > scale), extend the function definition to the real line
by defining `f(x) = alt` for `x < scale`.
Args:
x: Floating-point Tensor to evaluate `f` at.
f: Lambda that takes in a tensor and returns a tensor. This represents
the function who we want to extend the domain of definition.
alt: Python or numpy literal representing the value to use for extending
the domain.
Returns:
Tensor representing an extension of `f(x)`.
"""
# We need to do a series of broadcasts for the tf.where.
scale = self.scale + tf.zeros_like(self.concentration)
is_invalid = x < scale
scale = scale + tf.zeros_like(x)
x = x + tf.zeros_like(scale)
# We need to do this to ensure gradients are sound.
y = f(tf.where(is_invalid, scale, x))
if alt == 0.:
alt = tf.zeros_like(y)
elif alt == 1.:
alt = tf.ones_like(y)
else:
alt = tf.fill(
dims=tf.shape(y),
value=np.array(alt, dtype=self.dtype.as_numpy_dtype))
return tf.where(is_invalid, alt, y)
开发者ID:asudomoeva,项目名称:probability,代码行数:32,代码来源:pareto.py
注:本文中的tensorflow.zeros_like函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论