本文整理汇总了Python中tensorflow.to_float函数的典型用法代码示例。如果您正苦于以下问题:Python to_float函数的具体用法?Python to_float怎么用?Python to_float使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了to_float函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: ctrl_rewards
def ctrl_rewards(states,
actions,
rewards,
next_states,
contexts,
reward_scales=1.0):
"""Returns the negative control cost.
Args:
states: A [batch_size, num_state_dims] Tensor representing a batch
of states.
actions: A [batch_size, num_action_dims] Tensor representing a batch
of actions.
rewards: A [batch_size] Tensor representing a batch of rewards.
next_states: A [batch_size, num_state_dims] Tensor representing a batch
of next states.
contexts: A list of [batch_size, num_context_dims] Tensor representing
a batch of contexts.
reward_scales: multiplicative scale for rewards. A scalar or 1D tensor,
must be broadcastable to number of reward dimensions.
Returns:
A new tf.float32 [batch_size] rewards Tensor, and
tf.float32 [batch_size] discounts tensor.
"""
del states, rewards, contexts # Unused
if actions is None:
rewards = tf.to_float(tf.zeros(shape=next_states.shape[:1]))
else:
rewards = -tf.reduce_sum(tf.square(actions), axis=1)
rewards *= reward_scales
rewards = tf.to_float(rewards)
return rewards, tf.ones_like(rewards)
开发者ID:Exscotticus,项目名称:models,代码行数:33,代码来源:rewards_functions.py
示例2: testPaddingCrossEntropyFactored
def testPaddingCrossEntropyFactored(self):
vocab_size = 19
rows = 5
cols = 4
depth = 11
label_smoothing = 0.1
features = np.random.rand(rows, cols, depth)
weights = np.random.rand(vocab_size, depth)
labels = np.random.randint(0, vocab_size - 1, size=(rows, cols))
with self.test_session() as session:
features = tf.to_float(features)
weights = tf.to_float(weights)
labels = tf.to_int32(labels)
logits = tf.matmul(
tf.reshape(features, [rows * cols, depth]), weights, transpose_b=True)
logits = tf.reshape(logits, [rows, cols, vocab_size])
loss_num, loss_den = common_layers.padded_cross_entropy(
logits, labels, label_smoothing=label_smoothing, reduce_sum=False)
factored_logits = common_layers.FactoredTensor(features, weights)
loss_num_f, loss_den_f = common_layers.padded_cross_entropy_factored(
factored_logits,
labels=labels,
label_smoothing=label_smoothing,
reduce_sum=False)
num, den, num_f, den_f = session.run(
[loss_num, loss_den, loss_num_f, loss_den_f])
self.assertEqual(num.shape, (rows, cols))
self.assertEqual(den.shape, (rows, cols))
self.assertEqual(num_f.shape, (rows, cols))
self.assertEqual(den_f.shape, (rows, cols))
self.assertAllClose(num, num_f)
self.assertAllClose(den, den_f)
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:32,代码来源:common_layers_test.py
示例3: compute_IOU
def compute_IOU(bboxA, bboxB):
"""Compute the Intersection Over Union.
Args:
bboxA: [N X 4 tensor] format = [left, top, right, bottom]
bboxB: [N X 4 tensor]
Return:
IOU: [N X 1 tensor]
"""
x1A, y1A, x2A, y2A = tf.split(1, 4, bboxA)
x1B, y1B, x2B, y2B = tf.split(1, 4, bboxB)
# compute intersection
x1_max = tf.maximum(x1A, x1B)
y1_max = tf.maximum(y1A, y1B)
x2_min = tf.minimum(x2A, x2B)
y2_min = tf.minimum(y2A, y2B)
# overlap_flag = tf.logical_and( tf.less(x1_max, x2_min), tf.less(y1_max, y2_min))
overlap_flag = tf.to_float(tf.less(x1_max, x2_min)) * \
tf.to_float(tf.less(y1_max, y2_min))
overlap_area = tf.mul(overlap_flag, tf.mul(
x2_min - x1_max, y2_min - y1_max))
# compute union
areaA = tf.mul(x2A - x1A, y2A - y1A)
areaB = tf.mul(x2B - x1B, y2B - y1B)
union_area = areaA + areaB - overlap_area
return tf.div(overlap_area, union_area)
开发者ID:renmengye,项目名称:deep-tracker,代码行数:33,代码来源:build_deep_tracker.py
示例4: crop_or_pad
def crop_or_pad(waves, length, channels):
"""Crop or pad wave to have shape [N, length, channels].
Args:
waves: A 3D `Tensor` of NLC format.
length: A Python scalar. The output wave size.
channels: Number of output waves channels.
Returns:
A 3D `Tensor` of NLC format with shape [N, length, channels].
"""
waves = tf.convert_to_tensor(waves)
batch_size = waves.shape[0].value
waves_shape = tf.shape(waves)
# Force audio length.
pad = tf.maximum(0, length - waves_shape[1])
right_pad = tf.to_int32(tf.to_float(pad) / 2.0)
left_pad = pad - right_pad
waves = tf.pad(waves, [[0, 0], [left_pad, right_pad], [0, 0]])
waves = waves[:, :length, :]
# Force number of channels.
num_repeats = tf.to_int32(
tf.ceil(tf.to_float(channels) / tf.to_float(waves_shape[2])))
waves = tf.tile(waves, [1, 1, num_repeats])[:, :, :channels]
waves.set_shape([batch_size, length, channels])
return waves
开发者ID:cghawthorne,项目名称:magenta,代码行数:29,代码来源:spectral_ops.py
示例5: top_1_and_5
def top_1_and_5(predictions, labels):
#test_size = FLAGS.test_size #tf.shape(predictions)[0]
in_top1 = tf.to_float(tf.nn.in_top_k(predictions, labels, k=1))
in_top5 = tf.to_float(tf.nn.in_top_k(predictions, labels, k=5))
num_correct_1 = tf.reduce_sum(in_top1, name ="top1")
num_correct_5 = tf.reduce_sum(in_top5, name ="top5")
return num_correct_1, num_correct_5
开发者ID:codealphago,项目名称:LowRankTRN,代码行数:7,代码来源:resnet_train.py
示例6: _smallest_size_at_least
def _smallest_size_at_least(height, width, smallest_side):
"""Computes new shape with the smallest side equal to `smallest_side`.
Computes new shape with the smallest side equal to `smallest_side` while
preserving the original aspect ratio.
Args:
height: an int32 scalar tensor indicating the current height.
width: an int32 scalar tensor indicating the current width.
smallest_side: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
new_height: an int32 scalar tensor indicating the new height.
new_width: and int32 scalar tensor indicating the new width.
"""
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
height = tf.to_float(height)
width = tf.to_float(width)
smallest_side = tf.to_float(smallest_side)
scale = tf.cond(tf.greater(height, width),
lambda: smallest_side / width,
lambda: smallest_side / height)
new_height = tf.to_int32(height * scale)
new_width = tf.to_int32(width * scale)
return new_height, new_width
开发者ID:Zumbalamambo,项目名称:deepcv,代码行数:28,代码来源:vgg_preprocessing.py
示例7: _summarize_input
def _summarize_input(self, groundtruth_boxes_list, match_list):
"""Creates tensorflow summaries for the input boxes and anchors.
This function creates four summaries corresponding to the average
number (over images in a batch) of (1) groundtruth boxes, (2) anchors
marked as positive, (3) anchors marked as negative, and (4) anchors marked
as ignored.
Args:
groundtruth_boxes_list: a list of 2-D tensors of shape [num_boxes, 4]
containing corners of the groundtruth boxes.
match_list: a list of matcher.Match objects encoding the match between
anchors and groundtruth boxes for each image of the batch,
with rows of the Match objects corresponding to groundtruth boxes
and columns corresponding to anchors.
"""
num_boxes_per_image = tf.stack(
[tf.shape(x)[0] for x in groundtruth_boxes_list])
pos_anchors_per_image = tf.stack(
[match.num_matched_columns() for match in match_list])
neg_anchors_per_image = tf.stack(
[match.num_unmatched_columns() for match in match_list])
ignored_anchors_per_image = tf.stack(
[match.num_ignored_columns() for match in match_list])
tf.summary.scalar('Input/AvgNumGroundtruthBoxesPerImage',
tf.reduce_mean(tf.to_float(num_boxes_per_image)))
tf.summary.scalar('Input/AvgNumPositiveAnchorsPerImage',
tf.reduce_mean(tf.to_float(pos_anchors_per_image)))
tf.summary.scalar('Input/AvgNumNegativeAnchorsPerImage',
tf.reduce_mean(tf.to_float(neg_anchors_per_image)))
tf.summary.scalar('Input/AvgNumIgnoredAnchorsPerImage',
tf.reduce_mean(tf.to_float(ignored_anchors_per_image)))
开发者ID:Peterwangcn,项目名称:object_detector_app,代码行数:32,代码来源:ssd_meta_arch.py
示例8: _scale
def _scale(x):
min_x_valuef = tf.to_float(min_x_value)
max_x_valuef = tf.to_float(max_x_value)
output_minf = tf.to_float(output_min)
output_maxf = tf.to_float(output_max)
return ((((tf.to_float(x) - min_x_valuef) * (output_maxf - output_minf)) /
(max_x_valuef - min_x_valuef)) + output_minf)
开发者ID:googledatalab,项目名称:pydatalab,代码行数:7,代码来源:feature_transforms.py
示例9: _get_sampling_probability
def _get_sampling_probability(hparams, is_training):
"""Returns `sampling_probabiliy` if `sampling schedule` given or 0."""
if (not hasattr(hparams, 'sampling_schedule') or
not hparams.sampling_schedule):
return tf.convert_to_tensor(0.0, tf.float32)
if not is_training:
# This is likely an eval/test job associated with a training job using
# scheduled sampling.
tf.logging.warning(
'Setting non-training sampling schedule from %s:%f to constant:1.0.',
hparams.sampling_schedule, hparams.sampling_rate)
hparams.sampling_schedule = 'constant'
hparams.sampling_rate = 1.0
if hparams.sampling_schedule == 'constant':
sampling_probability = tf.constant(hparams.sampling_rate)
elif hparams.sampling_schedule == 'inverse_sigmoid':
k = tf.constant(hparams.sampling_rate)
sampling_probability = 1.0 - (
k / (k + tf.exp(tf.to_float(tf.train.get_or_create_global_step()) / k)))
elif hparams.sampling_schedule == 'exponential':
if not 0 < hparams.sampling_rate < 1:
raise ValueError(
'Exponential sampling rate must be in the interval (0, 1). Got %f.'
% hparams.sampling_rate)
k = tf.constant(hparams.sampling_rate)
sampling_probability = (
1.0 - tf.pow(k, tf.to_float(tf.train.get_or_create_global_step())))
else:
tf.logging.fatal('Invalid sampling_schedule: %s',
hparams.sampling_schedule)
tf.summary.scalar('sampling_probability', sampling_probability)
return tf.convert_to_tensor(sampling_probability, tf.float32)
开发者ID:wyn314,项目名称:magenta,代码行数:33,代码来源:lstm_models.py
示例10: total_variation_loss
def total_variation_loss(stylized_inputs, total_variation_weight):
"""Total variation regularization loss.
This loss improves the smoothness of the image by expressing high frequency
variations as a loss.
http://link.springer.com/article/10.1023/B:JMIV.0000011325.36760.1e
Args:
stylized_inputs: The batched set of images.
total_variation_weight: Weight of total variation loss.
Returns:
Tensor for the total variation loss, dict mapping loss names to losses.
"""
shape = tf.shape(stylized_inputs)
batch_size = shape[0]
height = shape[1]
width = shape[2]
channels = shape[3]
y_size = tf.to_float((height - 1) * width * channels)
x_size = tf.to_float(height * (width - 1) * channels)
y_loss = tf.nn.l2_loss(
stylized_inputs[:, 1:, :, :] - stylized_inputs[:, :-1, :, :]) / y_size
x_loss = tf.nn.l2_loss(
stylized_inputs[:, :, 1:, :] - stylized_inputs[:, :, :-1, :]) / x_size
loss = (y_loss + x_loss) / tf.to_float(batch_size)
weighted_loss = loss * total_variation_weight
return weighted_loss, {
'total_variation_loss': loss,
'weighted_total_variation_loss': weighted_loss
}
开发者ID:Alice-ren,项目名称:magenta,代码行数:31,代码来源:learning.py
示例11: compute_metrics
def compute_metrics(output_video, target_video):
max_pixel_value = 255.0
output_video = tf.to_float(output_video)
target_video = tf.to_float(target_video)
psnr = tf.image.psnr(output_video, target_video, max_pixel_value)
ssim = tf.image.ssim(output_video, target_video, max_pixel_value)
return {"PSNR": psnr, "SSIM": ssim}
开发者ID:kltony,项目名称:tensor2tensor,代码行数:7,代码来源:video_metrics.py
示例12: total_variation_loss
def total_variation_loss(layer):
shape = tf.shape(layer)
height = shape[1]
width = shape[2]
y = tf.slice(layer, [0,0,0,0], tf.pack([-1,height-1,-1,-1])) - tf.slice(layer, [0,1,0,0], [-1,-1,-1,-1])
x = tf.slice(layer, [0,0,0,0], tf.pack([-1,-1,width-1,-1])) - tf.slice(layer, [0,0,1,0], [-1,-1,-1,-1])
return tf.nn.l2_loss(x) / tf.to_float(tf.size(x)) + tf.nn.l2_loss(y) / tf.to_float(tf.size(y))
开发者ID:DenisSergeevitch,项目名称:fast-neural-style,代码行数:7,代码来源:fast_neural_style.py
示例13: f_conf_loss
def f_conf_loss(s_out, match, timespan, use_cum_min=True):
"""Loss function for confidence score sequence.
Args:
s_out:
match:
use_cum_min:
"""
s_out_shape = tf.shape(s_out)
num_ex = tf.to_float(s_out_shape[0])
max_num_obj = tf.to_float(s_out_shape[1])
match_sum = tf.reduce_sum(match, reduction_indices=[2])
# Loss for confidence scores.
if use_cum_min:
# [B, N]
s_out_min = f_cum_min(s_out, timespan)
s_out_max = f_cum_max(s_out, timespan)
# [B, N]
s_bce = f_bce_minmax(s_out_min, s_out_max, match_sum)
else:
s_bce = f_bce(s_out, match_sum)
loss = tf.reduce_sum(s_bce) / num_ex / max_num_obj
return loss
开发者ID:lrjconan,项目名称:img-count,代码行数:25,代码来源:ris_model_base.py
示例14: f_iou_box
def f_iou_box(top_left_a, bot_right_a, top_left_b, bot_right_b):
"""Computes IoU of boxes.
Args:
top_left_a: [B, T, 2] or [B, 2]
bot_right_a: [B, T, 2] or [B, 2]
top_left_b: [B, T, 2] or [B, 2]
bot_right_b: [B, T, 2] or [B, 2]
Returns:
iou: [B, T]
"""
inter_area = f_inter_box(top_left_a, bot_right_a, top_left_b, bot_right_b)
inter_area = tf.maximum(inter_area, 1e-6)
ndims = tf.shape(tf.shape(top_left_a))
# area_a = tf.reduce_prod(bot_right_a - top_left_a, ndims - 1)
# area_b = tf.reduce_prod(bot_right_b - top_left_b, ndims - 1)
check_a = tf.reduce_prod(tf.to_float(top_left_a < bot_right_a), ndims - 1)
area_a = check_a * tf.reduce_prod(bot_right_a - top_left_a, ndims - 1)
check_b = tf.reduce_prod(tf.to_float(top_left_b < bot_right_b), ndims - 1)
area_b = check_b * tf.reduce_prod(bot_right_b - top_left_b, ndims - 1)
union_area = (area_a + area_b - inter_area + 1e-5)
union_area = tf.maximum(union_area, 1e-5)
iou = inter_area / union_area
iou = tf.maximum(iou, 1e-5)
iou = tf.minimum(iou, 1.0)
return iou
开发者ID:lrjconan,项目名称:img-count,代码行数:28,代码来源:ris_model_base.py
示例15: summarize
def summarize(self):
"""Summarize the number of positives and negatives after mining."""
if self._num_positives_list and self._num_negatives_list:
avg_num_positives = tf.reduce_mean(tf.to_float(self._num_positives_list))
avg_num_negatives = tf.reduce_mean(tf.to_float(self._num_negatives_list))
tf.summary.scalar('HardExampleMiner/NumPositives', avg_num_positives)
tf.summary.scalar('HardExampleMiner/NumNegatives', avg_num_negatives)
开发者ID:ahmedtalbi,项目名称:models,代码行数:7,代码来源:losses.py
示例16: testLSTMCellReparameterization
def testLSTMCellReparameterization(
self, kernel_initializer, recurrent_initializer, bias_initializer,
all_close):
batch_size, timesteps, dim = 5, 3, 12
hidden_size = 10
inputs = tf.to_float(np.random.rand(batch_size, timesteps, dim))
cell = bayes.LSTMCellReparameterization(
hidden_size, kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer)
noise = tf.to_float(np.random.rand(1, hidden_size))
h0, c0 = cell.get_initial_state(inputs)
state = (h0 + noise, c0)
outputs1, _ = cell(inputs[:, 0, :], state)
outputs2, _ = cell(inputs[:, 0, :], state)
cell.sample_weights()
outputs3, _ = cell(inputs[:, 0, :], state)
self.evaluate(tf.global_variables_initializer())
res1, res2, res3 = self.evaluate([outputs1, outputs2, outputs3])
self.assertEqual(res1.shape, (batch_size, hidden_size))
self.assertAllClose(res1, res2)
if all_close:
self.assertAllClose(res1, res3)
else:
self.assertNotAllClose(res1, res3)
cell.get_config()
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:26,代码来源:bayes_test.py
示例17: log_loss
def log_loss(labels, predictions, epsilon=1e-7, scope=None, weights=None):
"""Calculate log losses.
Same as tf.losses.log_loss except that this returns the individual losses
instead of passing them into compute_weighted_loss and returning their
weighted mean. This is useful for eval jobs that report the mean loss. By
returning individual losses, that mean loss can be the same regardless of
batch size.
Args:
labels: The ground truth output tensor, same dimensions as 'predictions'.
predictions: The predicted outputs.
epsilon: A small increment to add to avoid taking a log of zero.
scope: The scope for the operations performed in computing the loss.
weights: Weights to apply to labels.
Returns:
A `Tensor` representing the loss values.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels`.
"""
with tf.name_scope(scope, "log_loss", (predictions, labels)):
predictions = tf.to_float(predictions)
labels = tf.to_float(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
losses = -tf.multiply(labels, tf.log(predictions + epsilon)) - tf.multiply(
(1 - labels), tf.log(1 - predictions + epsilon))
if weights is not None:
losses = tf.multiply(losses, weights)
return losses
开发者ID:cghawthorne,项目名称:magenta,代码行数:32,代码来源:tf_utils.py
示例18: testBayesianLinearModel
def testBayesianLinearModel(self):
"""Tests that model makes reasonable predictions."""
np.random.seed(42)
train_batch_size = 5
test_batch_size = 2
num_features = 3
noise_variance = 0.01
coeffs = tf.range(num_features, dtype=tf.float32)
features = tf.to_float(np.random.randn(train_batch_size, num_features))
labels = (tf.tensordot(features, coeffs, [[-1], [0]])
+ noise_variance * tf.to_float(np.random.randn(train_batch_size)))
model = bayes.BayesianLinearModel(noise_variance=noise_variance)
model.fit(features, labels)
test_features = tf.to_float(np.random.randn(test_batch_size, num_features))
test_labels = tf.tensordot(test_features, coeffs, [[-1], [0]])
outputs = model(test_features)
test_predictions = outputs.distribution.mean()
test_predictions_variance = outputs.distribution.variance()
[
test_labels_val, test_predictions_val, test_predictions_variance_val,
] = self.evaluate(
[test_labels, test_predictions, test_predictions_variance])
self.assertEqual(test_predictions_val.shape, (test_batch_size,))
self.assertEqual(test_predictions_variance_val.shape, (test_batch_size,))
self.assertAllClose(test_predictions_val, test_labels_val, atol=0.1)
self.assertAllLessEqual(test_predictions_variance_val, noise_variance)
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:29,代码来源:bayes_test.py
示例19: attentive_pooling_weights
def attentive_pooling_weights(U_AP, raw_question_rep, raw_answer_rep, tokens_question, tokens_answer,
apply_softmax=True):
"""Calculates the attentive pooling weights for question and answer
:param U_AP: the soft-attention similarity matrix (to learn)
:param raw_question_rep:
:param raw_answer_rep:
:param tokens_question: The raw token indices of the question. Used to detection not-set tokens
:param tokens_answer: The raw token indices of the answer. Used to detection not-set tokens
:param Q_PW: Positional weighting matrix for the question
:param A_PW: Positional weighting matrix for the answer
:param apply_softmax:
:return: question weights, answer weights
"""
tokens_question_float = tf.to_float(tokens_question)
tokens_answer_float = tf.to_float(tokens_answer)
tokens_question_non_zero = non_zero_tokens(tokens_question_float)
tokens_answer_non_zero = non_zero_tokens(tokens_answer_float)
G = soft_alignment(U_AP, raw_question_rep, raw_answer_rep, tokens_question_non_zero, tokens_answer_non_zero)
maxpool_GQ = tf.reduce_max(G, [2], keep_dims=False)
maxpool_GA = tf.reduce_max(G, [1], keep_dims=False)
if apply_softmax:
attention_Q = attention_softmax(maxpool_GQ, tokens_question_non_zero)
attention_A = attention_softmax(maxpool_GA, tokens_answer_non_zero)
else:
attention_Q = maxpool_GQ
attention_A = maxpool_GA
return attention_Q, attention_A
开发者ID:zhongyunuestc,项目名称:iwcs2017-answer-selection,代码行数:32,代码来源:pooling_helper.py
示例20: __init__
def __init__(self, env, hidden_size, entcoeff=0.001, lr_rate=1e-3, scope="adversary"):
self.scope = scope
self.observation_shape = env.observation_space.shape
self.actions_shape = env.action_space.shape
self.input_shape = tuple([o+a for o, a in zip(self.observation_shape, self.actions_shape)])
self.num_actions = env.action_space.shape[0]
self.hidden_size = hidden_size
self.build_ph()
# Build grpah
generator_logits = self.build_graph(self.generator_obs_ph, self.generator_acs_ph, reuse=False)
expert_logits = self.build_graph(self.expert_obs_ph, self.expert_acs_ph, reuse=True)
# Build accuracy
generator_acc = tf.reduce_mean(tf.to_float(tf.nn.sigmoid(generator_logits) < 0.5))
expert_acc = tf.reduce_mean(tf.to_float(tf.nn.sigmoid(expert_logits) > 0.5))
# Build regression loss
# let x = logits, z = targets.
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
generator_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=generator_logits, labels=tf.zeros_like(generator_logits))
generator_loss = tf.reduce_mean(generator_loss)
expert_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=expert_logits, labels=tf.ones_like(expert_logits))
expert_loss = tf.reduce_mean(expert_loss)
# Build entropy loss
logits = tf.concat([generator_logits, expert_logits], 0)
entropy = tf.reduce_mean(logit_bernoulli_entropy(logits))
entropy_loss = -entcoeff*entropy
# Loss + Accuracy terms
self.losses = [generator_loss, expert_loss, entropy, entropy_loss, generator_acc, expert_acc]
self.loss_name = ["generator_loss", "expert_loss", "entropy", "entropy_loss", "generator_acc", "expert_acc"]
self.total_loss = generator_loss + expert_loss + entropy_loss
# Build Reward for policy
self.reward_op = -tf.log(1-tf.nn.sigmoid(generator_logits)+1e-8)
var_list = self.get_trainable_variables()
self.lossandgrad = U.function([self.generator_obs_ph, self.generator_acs_ph, self.expert_obs_ph, self.expert_acs_ph],
self.losses + [U.flatgrad(self.total_loss, var_list)])
开发者ID:MrGoogol,项目名称:baselines,代码行数:34,代码来源:adversary.py
注:本文中的tensorflow.to_float函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论