本文整理汇总了Python中tensorflow.clip_by_value函数的典型用法代码示例。如果您正苦于以下问题:Python clip_by_value函数的具体用法?Python clip_by_value怎么用?Python clip_by_value使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了clip_by_value函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: discretize_range
def discretize_range(discretize_fn, levels, low, high, thermometer=False):
"""Get range of discretized values for in the interval (low, high).
For example, assume discretize_fn uniformly discretizes the values
between 0 and 1 into 10 bins each represented by either a one hot encoding
or a thermometer encoding. Then discretize_range(discretize_fn, .3, .7)
would return [0., 0., 0., 1., 1., 1., 1., 0., 0., 0.]. Note that it's output
is independent of the encoding used.
Args:
discretize_fn: Discretization function used to discretize input.
levels: Number of levels to discretize the input into.
low: Minimum value in the interval.
high: Maximum value in the interval.
thermometer: If True, then the discretize_fn returns thermometer codes,
else it returns one hot codes. (Default: False).
Returns:
Mask of 1's over the interval.
"""
low = tf.clip_by_value(low, 0., 1.)
high = tf.clip_by_value(high, 0., 1.)
out = 0.
for alpha in np.linspace(0., 1., levels):
q = discretize_fn(alpha * low + (1. - alpha) * high, levels, thermometer)
# Convert into one hot encoding if q is in thermometer encoding
if thermometer:
q = discretization_utils.thermometer_to_one_hot(q, levels, flattened=True)
out += q
return tf.to_float(tf.greater(out, 0.))
开发者ID:locussam,项目名称:obfuscated-gradients,代码行数:31,代码来源:discretization_attacks.py
示例2: bayes_crossentropy
def bayes_crossentropy(y_true, y_pred, nb_classes=None, reduction=tf.reduce_mean,
name=None):
with tf.name_scope(name, "bayes_crossentropy", [y_true, y_pred]):
y_pred_shape = y_pred.shape
if y_pred_shape.ndims == 1 or y_pred_shape[-1].value == 1:
if y_pred_shape.ndims == 1:
y_pred = tf.expand_dims(y_pred, -1)
y_pred0 = 1. - y_pred
y_pred = tf.concat([y_pred0, y_pred], axis=-1)
# get number of classes
if y_true.shape.ndims == 1:
if nb_classes is None:
raise Exception('y_pred and y_true must be one_hot encoded, '
'otherwise you have to provide nb_classes.')
y_true = tf.one_hot(y_true, depth=nb_classes)
elif nb_classes is None:
nb_classes = y_true.shape[1].value
# avoid numerical instability with _EPSILON clipping
y_pred = tf.clip_by_value(y_pred, EPS, 1.0 - EPS)
# ====== check distribution ====== #
distribution = tf.reduce_sum(y_true, axis=0)
# probability distribution of each class
prob_distribution = dimshuffle(distribution / tf.reduce_sum(distribution),
('x', 0))
# we need to clip the prior probability distribution also
prob_distribution = tf.clip_by_value(prob_distribution, EPS, 1.0 - EPS)
# ====== init confusion info loss ====== #
# weighted by y_true
loss = y_true * tf.log(y_pred)
loss = - 1 / nb_classes * tf.reduce_sum(loss / prob_distribution, axis=1)
return reduction(loss)
开发者ID:imito,项目名称:odin,代码行数:31,代码来源:losses.py
示例3: _create_loss_and_optimizer
def _create_loss_and_optimizer(self, inputs, x_reconstr_mean, z_log_sigma_sq, z_mean):
# The loss is composed of two terms:
# 1.) The reconstruction loss (the negative log probability
# of the input under the reconstructed Bernoulli distribution
# induced by the decoder in the data space).
# This can be interpreted as the number of "nats" required
# for reconstructing the input when the activation in latent
# is given.
# Adding 1e-10 to avoid evaluation of log(0.0)
self.reconstr_loss = \
-tf.reduce_sum(inputs * tf.log(tf.clip_by_value(x_reconstr_mean, 1e-9, 1.0))
+ (1 - inputs) * tf.log(tf.clip_by_value(1 - x_reconstr_mean, 1e-9, 1.0)),
1)
# 2.) The latent loss, which is defined as the Kullback Leibler divergence
## between the distribution in latent space induced by the encoder on
# the data and some prior. This acts as a kind of regularize.
# This can be interpreted as the number of "nats" required
# for transmitting the the latent space distribution given
# the prior.
self.latent_loss = -0.5 * tf.reduce_sum(1 + z_log_sigma_sq
- tf.square(z_mean)
- tf.exp(z_log_sigma_sq), 1)
loss = tf.reduce_mean(self.reconstr_loss + self.latent_loss) # average over batch
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(loss)
return loss, optimizer
开发者ID:jramapuram,项目名称:dynamic_learning,代码行数:26,代码来源:vae.py
示例4: tf_bivariate_normal
def tf_bivariate_normal(y, mu, sigma, rho, n_mixtures, batch_size):
mu = tf.verify_tensor_all_finite(mu, "Mu not finite!")
y = tf.verify_tensor_all_finite(y, "Y not finite!")
delta = tf.sub(tf.tile(tf.expand_dims(y, 1), [1, n_mixtures, 1]), mu)
delta = tf.verify_tensor_all_finite(delta, "Delta not finite!")
sigma = tf.verify_tensor_all_finite(sigma, "Sigma not finite!")
s = tf.reduce_prod(sigma, 2)
s = tf.verify_tensor_all_finite(s, "S not finite!")
# -1 <= rho <= 1
z = tf.reduce_sum(tf.square(tf.div(delta, sigma + epsilon) + epsilon), 2) - \
2 * tf.div(tf.mul(rho, tf.reduce_prod(delta, 2)), s + epsilon)
z = tf.verify_tensor_all_finite(z, "Z not finite!")
# 0 < negRho <= 1
rho = tf.verify_tensor_all_finite(rho, "rho in bivariate normal not finite!")
negRho = tf.clip_by_value(1 - tf.square(rho), epsilon, 1.0)
negRho = tf.verify_tensor_all_finite(negRho, "negRho not finite!")
# Note that if negRho goes near zero, or z goes really large, this explodes.
negRho = tf.verify_tensor_all_finite(negRho, "negRho in bivariate normal not finite!")
result = tf.clip_by_value(tf.exp(tf.div(-z, 2 * negRho)), 1.0e-8, 1.0e8)
result = tf.verify_tensor_all_finite(result, "Result in bivariate normal not finite!")
denom = 2 * np.pi * tf.mul(s, tf.sqrt(negRho))
denom = tf.verify_tensor_all_finite(denom, "Denom in bivariate normal not finite!")
result = tf.clip_by_value(tf.div(result, denom + epsilon), epsilon, 1.0)
result = tf.verify_tensor_all_finite(result, "Result2 in bivariate normal not finite!")
return result, delta
开发者ID:cybercom-finland,项目名称:location_tracking_ml,代码行数:27,代码来源:model.py
示例5: translate
def translate(U, theta, out_height, out_width):
num_batch = tf.shape(U)[0]
height, width, num_ch = U.get_shape()[1:]
height = height.value
width = width.value
num_ch = num_ch.value
hwc = height*width*num_ch
nind = tf.range(num_batch)
x = repeat(tf.range(height), width)
y = tf.tile(tf.range(width), tf.pack([height]))
cind = tf.range(num_ch)
nind = tf.expand_dims(repeat(nind, hwc), 1)
x = tf.tile(tf.expand_dims(repeat(x, num_ch), 1), tf.pack([num_batch,1]))
y = tf.tile(tf.expand_dims(repeat(y, num_ch), 1), tf.pack([num_batch,1]))
cind = tf.tile(tf.expand_dims(cind, 1), tf.pack([num_batch*height*width,1]))
dx, dy = tf.split(1, 2, theta)
dx = tf.cast(tf.clip_by_value(dx, 0, out_height-height), 'int32')
dx = tf.reshape(tf.tile(dx, tf.pack([1,hwc])), [-1,1])
dy = tf.cast(tf.clip_by_value(dy, 0, out_width-width), 'int32')
dy = tf.reshape(tf.tile(dy, tf.pack([1,hwc])), [-1,1])
x = x + dx
y = y + dy
tind = tf.concat(1, [nind, x, y, cind])
val = tf.reshape(U, [-1])
T = tf.sparse_to_dense(tind,
tf.pack([num_batch, out_height, out_width, num_ch]),
val)
T.set_shape([None, out_height, out_width, num_ch])
return T
开发者ID:juho-lee,项目名称:tf_practice,代码行数:33,代码来源:translate.py
示例6: build_decoder
def build_decoder(self, input_var):
# Build the decoder
if len(self.p_layers) > 0:
self._decoder = Sequential('vae_decoder')
self._decoder += FullyConnected(self.latent_dims, self.p_layers[0], coder_act_fn, name='fc_1')
for i in xrange(1, len(self.p_layers)):
self._decoder += FullyConnected(self.p_layers[i-1], self.p_layers[i], coder_act_fn, name='fc_%d'%(i+1))
self.decoder = self._decoder(input_var)
self._dec_mean = FullyConnected(self.p_layers[-1], self.input_dims, dec_mean_act_fn, name='dec_mean')
self.dec_mean = self._dec_mean(self.decoder)
self._dec_log_std_sq = FullyConnected(self.p_layers[-1], self.input_dims, mean_std_act_fn, name='dec_std')
self.dec_log_std_sq = tf.clip_by_value(
self._dec_log_std_sq(self.decoder),
-self.sigma_clip,
self.sigma_clip
)
else:
self.decoder = input_var
self._dec_mean = FullyConnected(self.latent_dims, self.input_dims, dec_mean_act_fn, name='dec_mean')
self.dec_mean = self._dec_mean(self.decoder)
self._dec_log_std_sq = FullyConnected(self.latent_dims, self.input_dims, mean_std_act_fn, name='dec_std')
self.dec_log_std_sq = tf.clip_by_value(
self._dec_log_std_sq(self.decoder),
-self.sigma_clip,
self.sigma_clip
)
开发者ID:KamyarGh,项目名称:412-Project,代码行数:30,代码来源:vae.py
示例7: cross_entropy
def cross_entropy(u, label_u, alpha=0.5, normed=False):
label_ip = tf.cast(
tf.matmul(label_u, tf.transpose(label_u)), tf.float32)
s = tf.clip_by_value(label_ip, 0.0, 1.0)
# compute balance param
# s_t \in {-1, 1}
s_t = tf.multiply(tf.add(s, tf.constant(-0.5)), tf.constant(2.0))
sum_1 = tf.reduce_sum(s)
sum_all = tf.reduce_sum(tf.abs(s_t))
balance_param = tf.add(tf.abs(tf.add(s, tf.constant(-1.0))),
tf.multiply(tf.div(sum_all, sum_1), s))
if normed:
# ip = tf.clip_by_value(tf.matmul(u, tf.transpose(u)), -1.5e1, 1.5e1)
ip_1 = tf.matmul(u, tf.transpose(u))
def reduce_shaper(t):
return tf.reshape(tf.reduce_sum(t, 1), [tf.shape(t)[0], 1])
mod_1 = tf.sqrt(tf.matmul(reduce_shaper(tf.square(u)),
reduce_shaper(tf.square(u)), transpose_b=True))
ip = tf.div(ip_1, mod_1)
else:
ip = tf.clip_by_value(tf.matmul(u, tf.transpose(u)), -1.5e1, 1.5e1)
ones = tf.ones([tf.shape(u)[0], tf.shape(u)[0]])
return tf.reduce_mean(tf.multiply(tf.log(ones + tf.exp(alpha * ip)) - s * alpha * ip, balance_param))
开发者ID:AllenMao,项目名称:DeepHash,代码行数:27,代码来源:dhn.py
示例8: prob_is_largest
def prob_is_largest(self, Y, mu, var, gh_x, gh_w):
# work out what the mean and variance is of the indicated latent function.
oh_on = tf.cast(tf.one_hot(tf.reshape(Y, (-1,)), self.num_classes, 1.0, 0.0), float_type)
mu_selected = tf.reduce_sum(oh_on * mu, 1)
var_selected = tf.reduce_sum(oh_on * var, 1)
# generate Gauss Hermite grid
X = tf.reshape(mu_selected, (-1, 1)) + gh_x * tf.reshape(
tf.sqrt(tf.clip_by_value(2.0 * var_selected, 1e-10, np.inf)), (-1, 1)
)
# compute the CDF of the Gaussian between the latent functions and the grid (including the selected function)
dist = (tf.expand_dims(X, 1) - tf.expand_dims(mu, 2)) / tf.expand_dims(
tf.sqrt(tf.clip_by_value(var, 1e-10, np.inf)), 2
)
cdfs = 0.5 * (1.0 + tf.erf(dist / np.sqrt(2.0)))
cdfs = cdfs * (1 - 2e-4) + 1e-4
# blank out all the distances on the selected latent function
oh_off = tf.cast(tf.one_hot(tf.reshape(Y, (-1,)), self.num_classes, 0.0, 1.0), float_type)
cdfs = cdfs * tf.expand_dims(oh_off, 2) + tf.expand_dims(oh_on, 2)
# take the product over the latent functions, and the sum over the GH grid.
return tf.matmul(tf.reduce_prod(cdfs, reduction_indices=[1]), tf.reshape(gh_w / np.sqrt(np.pi), (-1, 1)))
开发者ID:GPflow,项目名称:GPflow,代码行数:25,代码来源:likelihoods.py
示例9: _create_cost_function_node
def _create_cost_function_node(self, model_output, ref_input, regterm=None):
""" Create the cost function node.
:param model_output: model output node
:param ref_input: reference input placeholder node
:param regterm: regularization term
:return: self
"""
with tf.name_scope("cost"):
if self.loss_func == 'cross_entropy':
cost = - tf.reduce_mean(ref_input * tf.log(tf.clip_by_value(model_output, 1e-10, float('inf'))) +
(1 - ref_input) * tf.log(tf.clip_by_value(1 - model_output, 1e-10, float('inf'))))
elif self.loss_func == 'softmax_cross_entropy':
softmax = tf.nn.softmax(model_output)
cost = - tf.reduce_mean(ref_input * tf.log(softmax) + (1 - ref_input) * tf.log(1 - softmax))
elif self.loss_func == 'mean_squared':
cost = tf.sqrt(tf.reduce_mean(tf.square(ref_input - model_output)))
else:
cost = None
if cost is not None:
self.cost = cost + regterm if regterm is not None else cost
_ = tf.scalar_summary(self.loss_func, self.cost)
else:
self.cost = None
开发者ID:buptqitian,项目名称:Deep-Learning-TensorFlow,代码行数:29,代码来源:model.py
示例10: focal_loss
def focal_loss(prediction_tensor, target_tensor, weights=None, alpha=0.25, gamma=2):
r"""Compute focal loss for predictions.
Multi-labels Focal loss formula:
FL = -alpha * (z-p)^gamma * log(p) -(1-alpha) * p^gamma * log(1-p)
,which alpha = 0.25, gamma = 2, p = sigmoid(x), z = target_tensor.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: A float tensor of shape [batch_size, num_anchors]
alpha: A scalar tensor for focal loss alpha hyper-parameter
gamma: A scalar tensor for focal loss gamma hyper-parameter
Returns:
loss: A (scalar) tensor representing the value of the loss function
"""
sigmoid_p = tf.nn.sigmoid(prediction_tensor)
zeros = array_ops.zeros_like(sigmoid_p, dtype=sigmoid_p.dtype)
pos_p_sub = array_ops.where(target_tensor >= sigmoid_p, target_tensor - sigmoid_p, zeros)
neg_p_sub = array_ops.where(target_tensor > zeros, zeros, sigmoid_p)
per_entry_cross_ent = - alpha * (pos_p_sub ** gamma) * tf.log(tf.clip_by_value(sigmoid_p, 1e-8, 1.0)) \
- (1 - alpha) * (neg_p_sub ** gamma) * tf.log(tf.clip_by_value(1.0 - sigmoid_p, 1e-8, 1.0))
return tf.reduce_mean(per_entry_cross_ent)
开发者ID:quan821223,项目名称:pulmonary-nodules-MaskRCNN,代码行数:25,代码来源:focal_loss.py
示例11: batchnorm
def batchnorm(x, gamma, beta, r_mean, r_var):
mean, var = tf.nn.moments(x,[0])
update_mean = tf.assign(r_mean,0.9 * r_mean + 0.1 * mean)
update_var = tf.assign(r_var,0.9 * r_var + 0.1 * var)
with tf.control_dependencies([update_mean,update_var]):
return tf.nn.batch_normalization(x,tf.clip_by_value(r_mean,1e-10,100),tf.clip_by_value(r_var,1e-10,100),
offset=beta,scale=gamma,variance_epsilon=1e-5)
开发者ID:dunkyfool,项目名称:qai,代码行数:7,代码来源:layers.py
示例12: clip_weights_with_threshold
def clip_weights_with_threshold(max_threshold):
global weights
for op,w in weights.items():
if 'conv' in op:
weights[op] = tf.clip_by_value(weights[op], -max_threshold, max_threshold, name=None)
elif 'fulcon' in op:
weights[op] = tf.clip_by_value(weights[op], -max_threshold, max_threshold, name=None)
开发者ID:thushv89,项目名称:ConvNets,代码行数:7,代码来源:experiment_inc_initalization.py
示例13: _loss_x_entropy
def _loss_x_entropy(self, x, z, noise=None):
with tf.name_scope("xentropy_loss"):
z_clipped = tf.clip_by_value(z, FLAGS.zero_bound, FLAGS.one_bound)
z_minus_1_clipped = tf.clip_by_value((1.0 - z), FLAGS.zero_bound, FLAGS.one_bound)
x_clipped = tf.clip_by_value(x, FLAGS.zero_bound, FLAGS.one_bound)
x_minus_1_clipped = tf.clip_by_value((1.0 - x), FLAGS.zero_bound, FLAGS.one_bound)
# cross_entropy = x * log(z) + (1 - x) * log(1 - z)
cross_entropy = tf.add(tf.mul(tf.log(z_clipped), x_clipped),
tf.mul(tf.log(z_minus_1_clipped), x_minus_1_clipped), name='X-Entr')
if noise:
with tf.name_scope("Given_Emphasis"):
a, b = self._get_emph_params
corrupted = tf.select(noise, cross_entropy, tf.zeros_like(cross_entropy), name='Corrupted_Emphasis')
# OR -- tf.select(tf.logical_not(noisy_points), cross_entropy, tf.zeros_like(cross_entropy), name='Uncorrupted_Emphasis')
uncorrupted = tf.select(noise, tf.zeros_like(cross_entropy), cross_entropy, name='Uncorrupted_Emphasis')
loss = a * (-1 * tf.reduce_sum(corrupted, 1)) + b * (-1 * tf.reduce_sum(uncorrupted, 1))
else:
# Sum the cost for each example
loss = -1 * tf.reduce_sum(cross_entropy, 1)
# Reduce mean to find the overall cost of the loss
cross_entropy_mean = tf.reduce_mean(loss, name='xentropy_mean')
return cross_entropy_mean
开发者ID:hussius,项目名称:StackedDAE,代码行数:29,代码来源:dae.py
示例14: build_model
def build_model(self, reuse, dev, ntype):
with tf.variable_scope(self.name) and tf.device(dev):
if reuse:
tf.get_variable_scope().reuse_variables()
assert tf.get_variable_scope().reuse
# Set inputs of networks
self.minimap = tf.placeholder(tf.float32, [None, U.minimap_channel(), self.msize, self.msize], name='minimap')
self.screen = tf.placeholder(tf.float32, [None, U.screen_channel(), self.ssize, self.ssize], name='screen')
self.info = tf.placeholder(tf.float32, [None, self.isize], name='info')
# Build networks
net = build_net(self.minimap, self.screen, self.info, self.msize, self.ssize, len(actions.FUNCTIONS), ntype)
self.spatial_action, self.non_spatial_action, self.value = net
# Set targets and masks
self.valid_spatial_action = tf.placeholder(tf.float32, [None], name='valid_spatial_action')
self.spatial_action_selected = tf.placeholder(tf.float32, [None, self.ssize**2], name='spatial_action_selected')
self.valid_non_spatial_action = tf.placeholder(tf.float32, [None, len(actions.FUNCTIONS)], name='valid_non_spatial_action')
self.non_spatial_action_selected = tf.placeholder(tf.float32, [None, len(actions.FUNCTIONS)], name='non_spatial_action_selected')
self.value_target = tf.placeholder(tf.float32, [None], name='value_target')
# Compute log probability
spatial_action_prob = tf.reduce_sum(self.spatial_action * self.spatial_action_selected, axis=1)
spatial_action_log_prob = tf.log(tf.clip_by_value(spatial_action_prob, 1e-10, 1.))
non_spatial_action_prob = tf.reduce_sum(self.non_spatial_action * self.non_spatial_action_selected, axis=1)
valid_non_spatial_action_prob = tf.reduce_sum(self.non_spatial_action * self.valid_non_spatial_action, axis=1)
valid_non_spatial_action_prob = tf.clip_by_value(valid_non_spatial_action_prob, 1e-10, 1.)
non_spatial_action_prob = non_spatial_action_prob / valid_non_spatial_action_prob
non_spatial_action_log_prob = tf.log(tf.clip_by_value(non_spatial_action_prob, 1e-10, 1.))
self.summary.append(tf.summary.histogram('spatial_action_prob', spatial_action_prob))
self.summary.append(tf.summary.histogram('non_spatial_action_prob', non_spatial_action_prob))
# Compute losses, more details in https://arxiv.org/abs/1602.01783
# Policy loss and value loss
action_log_prob = self.valid_spatial_action * spatial_action_log_prob + non_spatial_action_log_prob
advantage = tf.stop_gradient(self.value_target - self.value)
policy_loss = - tf.reduce_mean(action_log_prob * advantage)
value_loss = - tf.reduce_mean(self.value * advantage)
self.summary.append(tf.summary.scalar('policy_loss', policy_loss))
self.summary.append(tf.summary.scalar('value_loss', value_loss))
# TODO: policy penalty
loss = policy_loss + value_loss
# Build the optimizer
self.learning_rate = tf.placeholder(tf.float32, None, name='learning_rate')
opt = tf.train.RMSPropOptimizer(self.learning_rate, decay=0.99, epsilon=1e-10)
grads = opt.compute_gradients(loss)
cliped_grad = []
for grad, var in grads:
self.summary.append(tf.summary.histogram(var.op.name, var))
self.summary.append(tf.summary.histogram(var.op.name+'/grad', grad))
grad = tf.clip_by_norm(grad, 10.0)
cliped_grad.append([grad, var])
self.train_op = opt.apply_gradients(cliped_grad)
self.summary_op = tf.summary.merge(self.summary)
self.saver = tf.train.Saver(max_to_keep=100)
开发者ID:fanyp17,项目名称:pysc2-agents,代码行数:59,代码来源:a3c_agent.py
示例15: _forward
def _forward(self, x, gpu):
hps = self.hps
x = tf.to_float(x)
x = tf.clip_by_value((x + 0.5) / 256.0, 0.0, 1.0) - 0.5
# Input images are repeated k times on the input.
# This is used for Importance Sampling loss (k is number of samples).
data_size = hps.batch_size * hps.k
x = repeat(x, hps.k)
orig_x = x
h_size = hps.h_size
with arg_scope([conv2d, deconv2d], init=(self.mode == "init")):
layers = []
for i in range(hps.depth):
layers.append([])
for j in range(hps.num_blocks):
downsample = (i > 0) and (j == 0)
layers[-1].append(IAFLayer(hps, self.mode, downsample))
h = conv2d("x_enc", x, h_size, [5, 5], [2, 2]) # -> [16, 16]
for i, layer in enumerate(layers):
for j, sub_layer in enumerate(layer):
with tf.variable_scope("IAF_%d_%d" % (i, j)):
h = sub_layer.up(h)
# top->down
self.h_top = h_top = tf.get_variable("h_top", [h_size], initializer=tf.zeros_initializer)
h_top = tf.reshape(h_top, [1, -1, 1, 1])
h = tf.tile(h_top, [data_size, 1, hps.image_size / 2 ** len(layers), hps.image_size / 2 ** len(layers)])
kl_cost = kl_obj = 0.0
for i, layer in reversed(list(enumerate(layers))):
for j, sub_layer in reversed(list(enumerate(layer))):
with tf.variable_scope("IAF_%d_%d" % (i, j)):
h, cur_obj, cur_cost = sub_layer.down(h)
kl_obj += cur_obj
kl_cost += cur_cost
if self.mode == "train" and gpu == hps.num_gpus - 1:
tf.scalar_summary("model/kl_obj_%02d_%02d" % (i, j), tf.reduce_mean(cur_obj))
tf.scalar_summary("model/kl_cost_%02d_%02d" % (i, j), tf.reduce_mean(cur_cost))
x = tf.nn.elu(h)
x = deconv2d("x_dec", x, 3, [5, 5])
x = tf.clip_by_value(x, -0.5 + 1 / 512., 0.5 - 1 / 512.)
log_pxz = discretized_logistic(x, self.dec_log_stdv, sample=orig_x)
obj = tf.reduce_sum(kl_obj - log_pxz)
if self.mode == "train" and gpu == hps.num_gpus - 1:
tf.scalar_summary("model/log_pxz", -tf.reduce_mean(log_pxz))
tf.scalar_summary("model/kl_obj", tf.reduce_mean(kl_obj))
tf.scalar_summary("model/kl_cost", tf.reduce_mean(kl_cost))
loss = tf.reduce_sum(compute_lowerbound(log_pxz, kl_cost, hps.k))
return x, obj, loss
开发者ID:openai,项目名称:iaf,代码行数:59,代码来源:tf_train.py
示例16: test
def test():
saver.restore(sess, FLAGS.save_dir+'/model.ckpt')
batch_x = test_x[0:100]
fig = plt.figure('original')
plt.gray()
plt.axis('off')
plt.imshow(batchmat_to_tileimg(batch_x, (height, width), (10, 10)))
fig.savefig(FLAGS.save_dir+'/original.png')
fa, sa = sess.run([tf.clip_by_value(x_att0, 0, 1),
tf.clip_by_value(x_att1, 0, 1)], {x:batch_x})
fig = plt.figure('first att')
plt.gray()
plt.axis('off')
plt.imshow(batchmat_to_tileimg(fa, (N, N), (10, 10)))
fig.savefig(FLAGS.save_dir+'/first_attention.png')
fig = plt.figure('second att')
plt.gray()
plt.axis('off')
plt.imshow(batchmat_to_tileimg(sa, (N, N), (10, 10)))
fig.savefig(FLAGS.save_dir+'/second_attention.png')
fr, sr = sess.run([tf.clip_by_value(p0, 0, 1),
tf.clip_by_value(p1, 0, 1)], {x:batch_x})
fig = plt.figure('first recon')
plt.gray()
plt.axis('off')
plt.imshow(batchmat_to_tileimg(fr, (height, width), (10, 10)))
fig.savefig(FLAGS.save_dir+'/first_recon.png')
fig = plt.figure('second recon')
plt.gray()
plt.axis('off')
plt.imshow(batchmat_to_tileimg(sr, (height, width), (10, 10)))
fig.savefig(FLAGS.save_dir+'/second_recon.png')
fig = plt.figure('reconstructed')
plt.gray()
plt.axis('off')
p_recon = sess.run(p, {x:batch_x})
plt.imshow(batchmat_to_tileimg(p_recon, (height, width), (10, 10)))
fig.savefig(FLAGS.save_dir+'/reconstructed.png')
p_gen = sess.run(p, {z0_c:np.random.normal(size=(100, n_lat_c)),
z0_t:np.random.normal(size=(100, n_lat_t)),
z1_c:np.random.normal(size=(100, n_lat_c)),
z1_t:np.random.normal(size=(100, n_lat_t))})
I_gen = batchmat_to_tileimg(p_gen, (height, width), (10, 10))
fig = plt.figure('generated')
plt.gray()
plt.axis('off')
plt.imshow(I_gen)
fig.savefig(FLAGS.save_dir+'/generated.png')
plt.show()
开发者ID:juho-lee,项目名称:tf_practice,代码行数:59,代码来源:dmnist_fixed_dvae_dattn.py
示例17: scale
def scale(self, x):
"""Scale x from -0.5 - 0.5 to 0 - 255."""
x = tf.where(tf.is_nan(x), tf.ones_like(x), x)
x = tf.where(tf.is_inf(x), tf.ones_like(x), x)
x = tf.clip_by_value(x, -0.5, 0.5)
x += 0.5
x = x * 2**self.hparams.n_bits_x
return tf.cast(tf.clip_by_value(x, 0, 255), dtype=tf.uint8)
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:8,代码来源:glow.py
示例18: BatchRenorm
def BatchRenorm(x, rmax, dmax, decay=0.9, epsilon=1e-5,
use_scale=True, use_bias=True):
"""
Batch Renormalization layer, as described in the paper:
`Batch Renormalization: Towards Reducing Minibatch Dependence in Batch-Normalized Models
<https://arxiv.org/abs/1702.03275>`_.
Args:
x (tf.Tensor): a NHWC or NC tensor.
rmax, dmax (tf.Tensor): a scalar tensor, the maximum allowed corrections.
decay (float): decay rate of moving average.
epsilon (float): epsilon to avoid divide-by-zero.
use_scale, use_bias (bool): whether to use the extra affine transformation or not.
Returns:
tf.Tensor: a tensor named ``output`` with the same shape of x.
Variable Names:
* ``beta``: the bias term.
* ``gamma``: the scale term. Input will be transformed by ``x * gamma + beta``.
* ``mean/EMA``: the moving average of mean.
* ``variance/EMA``: the moving average of variance.
"""
shape = x.get_shape().as_list()
assert len(shape) in [2, 4]
n_out = shape[-1]
if len(shape) == 2:
x = tf.reshape(x, [-1, 1, 1, n_out])
beta, gamma, moving_mean, moving_var = get_bn_variables(
n_out, use_scale, use_bias, tf.constant_initializer(1.0))
ctx = get_current_tower_context()
use_local_stat = ctx.is_training
# for BatchRenorm, use_local_stat should always be is_training, unless a
# different usage comes out in the future.
if use_local_stat:
xn, batch_mean, batch_var = tf.nn.fused_batch_norm(x, gamma, beta,
epsilon=epsilon, is_training=True)
inv_sigma = tf.rsqrt(moving_var, 'inv_sigma')
r = tf.stop_gradient(tf.clip_by_value(
tf.sqrt(batch_var) * inv_sigma, 1.0 / rmax, rmax))
d = tf.stop_gradient(tf.clip_by_value(
(batch_mean - moving_mean) * inv_sigma,
-dmax, dmax))
xn = xn * r + d
else:
xn = tf.nn.batch_normalization(
x, moving_mean, moving_var, beta, gamma, epsilon)
if len(shape) == 2:
xn = tf.squeeze(xn, [1, 2])
if ctx.is_main_training_tower:
return update_bn_ema(xn, batch_mean, batch_var, moving_mean, moving_var, decay)
else:
return tf.identity(xn, name='output')
开发者ID:j50888,项目名称:tensorpack,代码行数:58,代码来源:batch_norm.py
示例19: __init__
def __init__(self, scope, globalAC=None):
self.scope = scope
if scope == GLOBAL_NET_SCOPE:
## global network only do inference
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self._build_net()
self.a_params = tl.layers.get_variables_with_name(scope + '/actor', True, False)
self.c_params = tl.layers.get_variables_with_name(scope + '/critic', True, False)
normal_dist = tf.contrib.distributions.Normal(self.mu, self.sigma) # for continuous action space
with tf.name_scope('choose_a'): # use local params to choose action
self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1), axis=0), *A_BOUND)
else:
## worker network calculate gradient locally, update on global network
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_his = tf.placeholder(tf.float32, [None, N_A], 'A')
self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget')
self._build_net()
td = tf.subtract(self.v_target, self.v, name='TD_error')
with tf.name_scope('c_loss'):
self.c_loss = tf.reduce_mean(tf.square(td))
with tf.name_scope('wrap_a_out'):
self.test = self.sigma[0]
self.mu, self.sigma = self.mu * A_BOUND[1], self.sigma + 1e-5
normal_dist = tf.contrib.distributions.Normal(self.mu, self.sigma) # for continuous action space
with tf.name_scope('a_loss'):
log_prob = normal_dist.log_prob(self.a_his)
exp_v = log_prob * td
entropy = normal_dist.entropy() # encourage exploration
self.exp_v = ENTROPY_BETA * entropy + exp_v
self.a_loss = tf.reduce_mean(-self.exp_v)
with tf.name_scope('choose_a'): # use local params to choose action
self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1), axis=0), *A_BOUND)
with tf.name_scope('local_grad'):
self.a_params = tl.layers.get_variables_with_name(scope + '/actor', True, False)
self.c_params = tl.layers.get_variables_with_name(scope + '/critic', True, False)
self.a_grads = tf.gradients(self.a_loss, self.a_params)
self.c_grads = tf.gradients(self.c_loss, self.c_params)
with tf.name_scope('sync'):
with tf.name_scope('pull'):
self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, globalAC.a_params)]
self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, globalAC.c_params)]
with tf.name_scope('push'):
self.update_a_op = OPT_A.apply_gradients(zip(self.a_grads, globalAC.a_params))
self.update_c_op = OPT_C.apply_gradients(zip(self.c_grads, globalAC.c_params))
开发者ID:chenlingyun,项目名称:tensorlayer,代码行数:58,代码来源:tutorial_bipedalwalker_a3c_continuous_action.py
示例20: _interpolate2d
def _interpolate2d(imgs, x, y):
n_batch = tf.shape(imgs)[0]
xlen = tf.shape(imgs)[1]
ylen = tf.shape(imgs)[2]
n_channel = tf.shape(imgs)[3]
x = tf.to_float(x)
y = tf.to_float(y)
xlen_f = tf.to_float(xlen)
ylen_f = tf.to_float(ylen)
zero = tf.zeros([], dtype='int32')
max_x = tf.cast(xlen - 1, 'int32')
max_y = tf.cast(ylen - 1, 'int32')
# scale indices from [-1, 1] to [0, xlen/ylen]
x = (x + 1.) * (xlen_f - 1.) * 0.5
y = (y + 1.) * (ylen_f - 1.) * 0.5
# do sampling
x0 = tf.cast(tf.floor(x), 'int32')
x1 = x0 + 1
y0 = tf.cast(tf.floor(y), 'int32')
y1 = y0 + 1
x0 = tf.clip_by_value(x0, zero, max_x)
x1 = tf.clip_by_value(x1, zero, max_x)
y0 = tf.clip_by_value(y0, zero, max_y)
y1 = tf.clip_by_value(y1, zero, max_y)
base = _repeat(tf.range(n_batch) * xlen * ylen, ylen * xlen)
base_x0 = base + x0 * ylen
base_x1 = base + x1 * ylen
index00 = base_x0 + y0
index01 = base_x0 + y1
index10 = base_x1 + y0
index11 = base_x1 + y1
# use indices to lookup pixels in the flat image and restore
# n_channel dim
imgs_flat = tf.reshape(imgs, [-1, n_channel])
imgs_flat = tf.to_float(imgs_flat)
I00 = tf.gather(imgs_flat, index00)
I01 = tf.gather(imgs_flat, index01)
I10 = tf.gather(imgs_flat, index10)
I11 = tf.gather(imgs_flat, index11)
# and finally calculate interpolated values
dx = x - tf.to_float(x0)
dy = y - tf.to_float(y0)
w00 = tf.expand_dims((1. - dx) * (1. - dy), 1)
w01 = tf.expand_dims((1. - dx) * dy, 1)
w10 = tf.expand_dims(dx * (1. - dy), 1)
w11 = tf.expand_dims(dx * dy, 1)
output = tf.add_n([w00*I00, w01*I01, w10*I10, w11*I11])
# reshape
output = tf.reshape(output, [n_batch, xlen, ylen, n_channel])
return output
开发者ID:Ryo-Ito,项目名称:spatial_transformer_network,代码行数:58,代码来源:warp.py
注:本文中的tensorflow.clip_by_value函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论