本文整理汇总了Python中tensorflow.python.summary.summary.scalar函数的典型用法代码示例。如果您正苦于以下问题:Python scalar函数的具体用法?Python scalar怎么用?Python scalar使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了scalar函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: apply_gradients
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
gradients = []
# Number of stale gradients.
stale_counter = variable_scope.get_variable(
"stale_counter", [],
initializer=init_ops.zeros_initializer(),
trainable=False)
def _AcceptGradientOp():
with ops.control_dependencies(
[self._opt.apply_gradients(
grads_and_vars, global_step=global_step, name=name)]):
return gen_array_ops.identity(0.0)
def _DropGradientOp():
return gen_array_ops.identity(1.0)
for grad_and_var in grads_and_vars:
grad = grad_and_var[0]
if isinstance(grad, ops.Tensor):
gradients.append(grad)
else:
gradients.append(grad.op)
with ops.control_dependencies(gradients), ops.colocate_with(global_step):
staleness = gen_array_ops.reshape(
global_step - self._local_step, shape=())
conditional_update = stale_counter.assign_add(control_flow_ops.cond(
gen_math_ops.less_equal(staleness, self._staleness),
_AcceptGradientOp, _DropGradientOp))
summary.scalar(
"Gradient staleness percentage",
stale_counter / (math_ops.cast(global_step + 1, dtypes.float32)))
return conditional_update
开发者ID:sandeepgupta2k4,项目名称:tensorflow,代码行数:35,代码来源:drop_stale_gradient_optimizer.py
示例2: linear_logit_fn
def linear_logit_fn(features):
"""Linear model logit_fn.
Args:
features: This is the first item returned from the `input_fn`
passed to `train`, `evaluate`, and `predict`. This should be a
single `Tensor` or `dict` of same.
Returns:
A `Tensor` representing the logits.
"""
cols_to_vars = {}
logits = feature_column_lib.linear_model(
features=features,
feature_columns=feature_columns,
units=units,
sparse_combiner=sparse_combiner,
cols_to_vars=cols_to_vars)
bias = cols_to_vars.pop('bias')
if units > 1:
summary.histogram('bias', bias)
else:
# If units == 1, the bias value is a length-1 list of a scalar Tensor,
# so we should provide a scalar summary.
summary.scalar('bias', bias[0][0])
summary.scalar('fraction_of_zero_weights',
_compute_fraction_of_zero(cols_to_vars))
return logits
开发者ID:AnishShah,项目名称:tensorflow,代码行数:28,代码来源:linear.py
示例3: test_train_worker_monitor
def test_train_worker_monitor(self):
# We need to explicitly set device due to check on non-chief workers
# requiring all variables to have a device assigned.
with ops.Graph().as_default() as g, g.device('/cpu:0'):
global_step = variables_lib.create_global_step(g)
train_op = state_ops.assign_add(global_step, 1)
loss_op = constant_op.constant(2.0)
summary.scalar('loss', loss_op)
# Add explicit "local" init op to initialize all variables
# as there's no chief to init here.
init_op = variables.global_variables_initializer()
ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, init_op)
# Create worker monitors where one should be active on the worker
# and the other chief exclusive.
chief_exclusive_monitor = _BaseMonitorWrapper(False)
all_workers_monitor = _BaseMonitorWrapper(True)
with self.test_session(g):
loss = learn.graph_actions.train(
g,
output_dir=self._output_dir,
global_step_tensor=global_step,
train_op=train_op,
loss_op=loss_op,
supervisor_is_chief=False,
steps=1,
monitors=[chief_exclusive_monitor, all_workers_monitor])
self.assertEqual(2.0, loss)
self.assertTrue(not chief_exclusive_monitor.is_active and
all_workers_monitor.is_active,
'Only non-chief runnable monitor must have been active.')
self.assertTrue(not chief_exclusive_monitor.has_step and
all_workers_monitor.has_step,
'Only non-chief runnable monitor must have a step.')
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:33,代码来源:graph_actions_test.py
示例4: test_train_summaries
def test_train_summaries(self):
with ops.Graph().as_default() as g, self.test_session(g):
with ops.control_dependencies(self._build_inference_graph()):
train_op = state_ops.assign_add(variables_lib.get_global_step(), 1)
loss_op = constant_op.constant(2.0)
summary.scalar('loss', loss_op)
self._assert_summaries(self._output_dir)
self._assert_ckpt(self._output_dir, False)
loss = learn.graph_actions.train(
g,
output_dir=self._output_dir,
train_op=train_op,
loss_op=loss_op,
steps=1)
# TODO(ebrevdo,ptucker,ispir): this meta_graph_def lacks the
# SaverDef, so we can't add it to the summary assertion test below.
# meta_graph_def = meta_graph.create_meta_graph_def()
self.assertEqual(2.0, loss)
self._assert_summaries(
self._output_dir,
expected_graphs=[g],
expected_summaries={1: {
'loss': 2.0
}})
self._assert_ckpt(self._output_dir, True)
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:25,代码来源:graph_actions_test.py
示例5: prefetch_queue
def prefetch_queue(tensors,
capacity=8,
num_threads=1,
dynamic_pad=False,
shared_name=None,
name=None):
"""Creates a queue to prefetch tensors from `tensors`.
A queue runner for enqueuing tensors into the prefetch_queue is automatically
added to the TF QueueRunners collection.
Example:
This is for example useful to pre-assemble input batches read with
`tf.train.batch()` and enqueue the pre-assembled batches. Ops that dequeue
from the pre-assembled queue will not pay the cost of assembling the batch.
images, labels = tf.train.batch([image, label], batch_size=32, num_threads=4)
batch_queue = prefetch_queue([images, labels])
images, labels = batch_queue.dequeue()
logits = Net(images)
loss = Loss(logits, labels)
Args:
tensors: A list or dictionary of `Tensors` to enqueue in the buffer.
capacity: An integer. The maximum number of elements in the queue.
num_threads: An integer. Number of threads running the enqueue op.
dynamic_pad: Boolean. Whether to allow variable dimensions in input shapes.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A queue from which you can dequeue tensors with the same type and shape
as `tensors`.
"""
if isinstance(tensors, dict):
# Need to wrap the keys and values in list() since Python3 returns views.
# We sort the keys so the order is consistent across runs.
names = list(sorted(tensors.keys()))
tensor_list = list([tensors[n] for n in names])
else:
names = None
tensor_list = tensors
with ops.name_scope(name, "prefetch_queue", tensor_list) as name:
dtypes = [t.dtype for t in tensor_list]
shapes = [t.get_shape() for t in tensor_list]
queue = _which_queue(dynamic_pad)(
capacity=capacity,
dtypes=dtypes,
shapes=shapes,
names=names,
shared_name=shared_name)
enqueue_op = queue.enqueue(tensors)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(queue, [enqueue_op] * num_threads))
summary.scalar(
"fraction_of_%d_full" % capacity,
math_ops.cast(queue.size(), _dtypes.float32) * (1. / capacity))
return queue
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:60,代码来源:prefetch_queue.py
示例6: _ModelFn
def _ModelFn(features, labels, mode):
if is_training:
logits_out = self._BuildGraph(features)
else:
graph_def = self._GetGraphDef(use_trt, batch_size, model_dir)
logits_out = importer.import_graph_def(
graph_def,
input_map={INPUT_NODE_NAME: features},
return_elements=[OUTPUT_NODE_NAME + ':0'],
name='')[0]
loss = losses.sparse_softmax_cross_entropy(
labels=labels, logits=logits_out)
summary.scalar('loss', loss)
classes_out = math_ops.argmax(logits_out, axis=1, name='classes_out')
accuracy = metrics.accuracy(
labels=labels, predictions=classes_out, name='acc_op')
summary.scalar('accuracy', accuracy[1])
if mode == ModeKeys.EVAL:
return EstimatorSpec(
mode, loss=loss, eval_metric_ops={'accuracy': accuracy})
elif mode == ModeKeys.TRAIN:
optimizer = AdamOptimizer(learning_rate=1e-2)
train_op = optimizer.minimize(loss, global_step=get_global_step())
return EstimatorSpec(mode, loss=loss, train_op=train_op)
开发者ID:kylin9872,项目名称:tensorflow,代码行数:27,代码来源:quantization_mnist_test.py
示例7: gradient_clipping
def gradient_clipping(grads_and_vars):
"""Internal function for adaptive clipping."""
grads, variables = zip(*grads_and_vars)
norm = clip_ops.global_norm(grads)
max_norm, log_mean = _adaptive_max_norm(norm, std_factor, decay,
global_step, epsilon, name)
# reports the max gradient norm for debugging
if report_summary:
summary.scalar("global_norm/adaptive_max_gradient_norm", max_norm)
# factor will be 1. if norm is smaller than max_norm
factor = array_ops.where(norm < max_norm,
array_ops.ones_like(norm),
math_ops.exp(log_mean) / norm)
if static_max_norm is not None:
factor = math_ops.minimum(static_max_norm / norm, factor)
# apply factor
clipped_grads = []
for grad in grads:
if grad is None:
clipped_grads.append(None)
elif isinstance(grad, ops.IndexedSlices):
clipped_grads.append(
ops.IndexedSlices(grad.values * factor, grad.indices,
grad.dense_shape))
else:
clipped_grads.append(grad * factor)
return list(zip(clipped_grads, variables))
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:34,代码来源:optimizers.py
示例8: testSummariesAreFlushedToDisk
def testSummariesAreFlushedToDisk(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), 'summaries_are_flushed')
logdir = os.path.join(self.get_temp_dir(), 'summaries_are_flushed_eval')
if gfile.Exists(logdir):
gfile.DeleteRecursively(logdir)
# Train a Model to completion:
self._train_model(checkpoint_dir, num_steps=300)
# Create the model (which can be restored).
inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
logistic_classifier(inputs)
names_to_values = {'bread': 3.4, 'cheese': 4.5, 'tomato': 2.0}
for k in names_to_values:
v = names_to_values[k]
summary_lib.scalar(k, v)
evaluation.evaluate_repeatedly(
checkpoint_dir=checkpoint_dir,
hooks=[evaluation.SummaryAtEndHook(log_dir=logdir),],
max_number_of_evaluations=1)
self._verify_summaries(logdir, names_to_values)
开发者ID:Immexxx,项目名称:tensorflow,代码行数:25,代码来源:evaluation_test.py
示例9: testSummaryWithFamilyMetaGraphExport
def testSummaryWithFamilyMetaGraphExport(self):
with ops.name_scope('outer'):
i = constant_op.constant(11)
summ = summary_lib.scalar('inner', i)
self.assertEquals(summ.op.name, 'outer/inner')
summ_f = summary_lib.scalar('inner', i, family='family')
self.assertEquals(summ_f.op.name, 'outer/family/inner')
metagraph_def, _ = meta_graph.export_scoped_meta_graph(export_scope='outer')
with ops.Graph().as_default() as g:
meta_graph.import_scoped_meta_graph(metagraph_def, graph=g,
import_scope='new_outer')
# The summaries should exist, but with outer scope renamed.
new_summ = g.get_tensor_by_name('new_outer/inner:0')
new_summ_f = g.get_tensor_by_name('new_outer/family/inner:0')
# However, the tags are unaffected.
with self.cached_session() as s:
new_summ_str, new_summ_f_str = s.run([new_summ, new_summ_f])
new_summ_pb = summary_pb2.Summary()
new_summ_pb.ParseFromString(new_summ_str)
self.assertEquals('outer/inner', new_summ_pb.value[0].tag)
new_summ_f_pb = summary_pb2.Summary()
new_summ_f_pb.ParseFromString(new_summ_f_str)
self.assertEquals('family/outer/family/inner',
new_summ_f_pb.value[0].tag)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:27,代码来源:summary_test.py
示例10: create_estimator_spec
def create_estimator_spec(
self, features, mode, logits, labels=None, train_op_fn=None):
"""See `Head`."""
with ops.name_scope('head'):
logits = head_lib._check_logits(logits, self.logits_dimension) # pylint:disable=protected-access
# Predict.
pred_keys = prediction_keys.PredictionKeys
with ops.name_scope(None, 'predictions', (logits,)):
probabilities = math_ops.sigmoid(logits, name=pred_keys.PROBABILITIES)
predictions = {
pred_keys.LOGITS: logits,
pred_keys.PROBABILITIES: probabilities,
}
if mode == model_fn.ModeKeys.PREDICT:
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
'': export_output.ClassificationOutput(scores=probabilities)
})
# Eval.
unweighted_loss, processed_labels = self.create_loss(
features=features, mode=mode, logits=logits, labels=labels)
# Averages loss over classes.
per_example_loss = math_ops.reduce_mean(
unweighted_loss, axis=-1, keep_dims=True)
weights = head_lib._weights(features, self._weight_column) # pylint:disable=protected-access
training_loss = losses.compute_weighted_loss(
per_example_loss, weights=weights, reduction=losses.Reduction.SUM)
if mode == model_fn.ModeKeys.EVAL:
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.EVAL,
predictions=predictions,
loss=training_loss,
eval_metric_ops=self._eval_metric_ops(
labels=processed_labels,
probabilities=probabilities,
weights=weights,
per_example_loss=per_example_loss))
# Train.
if train_op_fn is None:
raise ValueError('train_op_fn can not be None.')
with ops.name_scope(''):
summary.scalar(
head_lib._summary_key(self._name, metric_keys.MetricKeys.LOSS), # pylint:disable=protected-access
training_loss)
summary.scalar(
head_lib._summary_key( # pylint:disable=protected-access
self._name, metric_keys.MetricKeys.LOSS_MEAN),
losses.compute_weighted_loss(
unweighted_loss, weights=weights,
reduction=losses.Reduction.MEAN))
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.TRAIN,
predictions=predictions,
loss=training_loss,
train_op=train_op_fn(training_loss))
开发者ID:Crazyonxh,项目名称:tensorflow,代码行数:60,代码来源:head.py
示例11: testTrainWithTrace
def testTrainWithTrace(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
summary.scalar('total_loss', total_loss)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op,
logdir,
number_of_steps=300,
log_every_n_steps=10,
trace_every_n_steps=100)
self.assertIsNotNone(loss)
for trace_step in [1, 101, 201]:
trace_filename = 'tf_trace-%d.json' % trace_step
self.assertTrue(os.path.isfile(os.path.join(logdir, trace_filename)))
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:27,代码来源:learning_test.py
示例12: _tree_train_op_fn
def _tree_train_op_fn(loss):
"""Returns the op to optimize the loss."""
if dnn_to_tree_distillation_param:
loss_weight, loss_fn = dnn_to_tree_distillation_param
weight_tensor = head_lib._weight_tensor( # pylint: disable=protected-access
features, head.weight_column_name)
dnn_logits_fixed = array_ops.stop_gradient(dnn_logits)
if loss_fn is None:
# we create the loss_fn similar to the head loss_fn for
# multi_class_head used previously as the default one.
n_classes = 2 if head.logits_dimension == 1 else head.logits_dimension
loss_fn = distillation_loss.create_dnn_to_tree_cross_entropy_loss_fn(
n_classes)
dnn_to_tree_distillation_loss = loss_weight * loss_fn(
dnn_logits_fixed, tree_logits, weight_tensor)
summary.scalar("dnn_to_tree_distillation_loss",
dnn_to_tree_distillation_loss)
loss += dnn_to_tree_distillation_loss
update_op = gbdt_model.train(loss, predictions_dict, labels)
with ops.control_dependencies(
[update_op]), (ops.colocate_with(global_step)):
update_op = state_ops.assign_add(global_step, 1).op
return update_op
开发者ID:StephenOman,项目名称:tensorflow,代码行数:26,代码来源:dnn_tree_combined_estimator.py
示例13: create_estimator_spec
def create_estimator_spec(
self, features, mode, logits, labels=None, optimizer=None,
train_op_fn=None):
"""See `_Head`."""
if isinstance(logits, dict):
logits_dict = logits
else:
logits_dict = self._split_logits(logits)
if labels and not isinstance(labels, dict):
raise ValueError('labels must be a dict. Given: {}'.format(labels))
all_estimator_spec = []
for head in self._heads:
head_name = head.name
all_estimator_spec.append(
head.create_estimator_spec(
features=features,
mode=mode,
logits=logits_dict[head_name],
labels=labels[head_name] if labels else None,
train_op_fn=_no_op_train_fn))
if mode == model_fn.ModeKeys.TRAIN:
spec = self._merge_train(
all_estimator_spec=all_estimator_spec,
optimizer=optimizer,
train_op_fn=train_op_fn)
with ops.name_scope(''):
summary.scalar(metric_keys.MetricKeys.LOSS, spec.loss)
return spec
if mode == model_fn.ModeKeys.PREDICT:
return self._merge_predict(all_estimator_spec)
if mode == model_fn.ModeKeys.EVAL:
return self._merge_eval(all_estimator_spec)
raise ValueError('mode={} unrecognized'.format(mode))
开发者ID:AnishShah,项目名称:tensorflow,代码行数:35,代码来源:multi_head.py
示例14: _summary_computed
def _summary_computed():
with ops.Graph().as_default():
sv = supervisor.Supervisor(is_chief=False)
sess = sv.prepare_or_wait_for_session("")
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summ = summary.merge_all()
sv.summary_computed(sess, sess.run(summ))
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:8,代码来源:supervisor_test.py
示例15: create_loss
def create_loss(self, features, mode, logits=None, labels=None):
"""See `_Head`."""
model_outputs = self.state_manager.define_loss(
self.model, features, mode)
summary.scalar(
head_lib._summary_key(self._name, metric_keys.MetricKeys.LOSS),
model_outputs.loss)
return model_outputs
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:8,代码来源:head.py
示例16: testSummaryNameConversion
def testSummaryNameConversion(self):
c = constant_op.constant(3)
s = summary_lib.scalar('name with spaces', c)
self.assertEqual(s.op.name, 'name_with_spaces')
s2 = summary_lib.scalar('name with many $#illegal^: characters!', c)
self.assertEqual(s2.op.name, 'name_with_many___illegal___characters_')
s3 = summary_lib.scalar('/name/with/leading/slash', c)
self.assertEqual(s3.op.name, 'name/with/leading/slash')
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:10,代码来源:summary_test.py
示例17: testReadyForLocalInitOpRestoreFromCheckpoint
def testReadyForLocalInitOpRestoreFromCheckpoint(self):
server = server_lib.Server.create_local_server()
logdir = self._test_dir("ready_for_local_init_op_restore")
uid = uuid.uuid4().hex
# Create a checkpoint.
with ops.Graph().as_default():
v = variables.VariableV1(
10.0, name="ready_for_local_init_op_restore_v_" + str(uid))
summary.scalar("ready_for_local_init_op_restore_v_" + str(uid), v)
sv = supervisor.Supervisor(logdir=logdir)
sv.prepare_or_wait_for_session(server.target)
save_path = sv.save_path
self._wait_for_glob(save_path, 3.0)
self._wait_for_glob(
os.path.join(logdir, "*events*"), 3.0, for_checkpoint=False)
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
def get_session(is_chief):
g = ops.Graph()
with g.as_default():
with ops.device("/job:local"):
v = variables.VariableV1(
1.0, name="ready_for_local_init_op_restore_v_" + str(uid))
vadd = v.assign_add(1)
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="ready_for_local_init_op_restore_w_" + str(uid))
ready_for_local_init_op = variables.report_uninitialized_variables(
variables.global_variables())
sv = supervisor.Supervisor(
logdir=logdir,
is_chief=is_chief,
graph=g,
recovery_wait_secs=1,
ready_for_local_init_op=ready_for_local_init_op)
sess = sv.prepare_or_wait_for_session(server.target)
return sv, sess, v, vadd, w
sv0, sess0, v0, _, w0 = get_session(True)
sv1, sess1, _, vadd1, w1 = get_session(False)
self.assertEqual(10, sess0.run(w0))
self.assertEqual(11, sess1.run(vadd1))
self.assertEqual(10, sess1.run(w1))
self.assertEqual(11, sess0.run(v0))
sv0.stop()
sv1.stop()
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:55,代码来源:supervisor_test.py
示例18: create_estimator_spec
def create_estimator_spec(
self, features, mode, logits, labels=None, train_op_fn=None):
"""See `Head`."""
# Predict.
with ops.name_scope('head'):
logits = _check_logits(logits, self._logits_dimension)
predictions = {prediction_keys.PredictionKeys.PREDICTIONS: logits}
if mode == model_fn.ModeKeys.PREDICT:
regression_output = export_output.RegressionOutput(value=logits)
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
_DEFAULT_SERVING_KEY: regression_output,
_REGRESS_SERVING_KEY: regression_output,
_PREDICT_SERVING_KEY: export_output.PredictOutput(predictions)
})
# Eval.
unweighted_loss, _ = self.create_loss(
features=features, mode=mode, logits=logits, labels=labels)
weights = _weights(features, self._weight_column)
training_loss = losses.compute_weighted_loss(
unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
if mode == model_fn.ModeKeys.EVAL:
# Estimator already adds a metric for loss.
eval_metric_ops = {
metric_keys.MetricKeys.LOSS_MEAN: metrics_lib.mean(
unweighted_loss, weights=weights)
}
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.EVAL,
predictions=predictions,
loss=training_loss,
eval_metric_ops=eval_metric_ops)
# Train.
if train_op_fn is None:
raise ValueError('train_op_fn can not be None.')
with ops.name_scope(''):
summary.scalar(
_summary_key(self._name, metric_keys.MetricKeys.LOSS),
training_loss)
summary.scalar(
_summary_key(self._name, metric_keys.MetricKeys.LOSS_MEAN),
losses.compute_weighted_loss(
unweighted_loss, weights=weights,
reduction=losses.Reduction.MEAN))
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.TRAIN,
predictions=predictions,
loss=training_loss,
train_op=train_op_fn(training_loss))
开发者ID:rajeev921,项目名称:tensorflow,代码行数:53,代码来源:head.py
示例19: acgan_generator_loss
def acgan_generator_loss(discriminator_gen_classification_logits,
one_hot_labels,
weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False):
"""ACGAN loss for the generator.
The ACGAN loss adds a classification loss to the conditional discriminator.
Therefore, the discriminator must output a tuple consisting of
(1) the real/fake prediction and
(2) the logits for the classification (usually the last conv layer,
flattened).
For more details:
ACGAN: https://arxiv.org/abs/1610.09585
Args:
discriminator_gen_classification_logits: Classification logits for generated
data.
one_hot_labels: A Tensor holding one-hot labels for the batch.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`discriminator_gen_classification_logits`, and must be broadcastable to
`discriminator_gen_classification_logits` (i.e., all dimensions must be
either `1`, or the same as the corresponding dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.compat.v1.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A loss Tensor. Shape depends on `reduction`.
Raises:
ValueError: if arg module not either `generator` or `discriminator`
TypeError: if the discriminator does not output a tuple.
"""
with ops.name_scope(
scope, 'acgan_generator_loss',
(discriminator_gen_classification_logits, one_hot_labels)) as scope:
loss = losses.softmax_cross_entropy(
one_hot_labels,
discriminator_gen_classification_logits,
weights=weights,
scope=scope,
loss_collection=loss_collection,
reduction=reduction)
if add_summaries:
summary.scalar('generator_ac_loss', loss)
return loss
开发者ID:ahmedsaiduk,项目名称:tensorflow,代码行数:53,代码来源:losses_impl.py
示例20: minimax_generator_loss
def minimax_generator_loss(discriminator_gen_outputs,
label_smoothing=0.0,
weights=1.0,
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
add_summaries=False):
"""Original minimax generator loss for GANs.
Note that the authors don't recommend using this loss. A more practically
useful loss is `modified_generator_loss`.
L = log(sigmoid(D(x))) + log(1 - sigmoid(D(G(z))))
See `Generative Adversarial Nets` (https://arxiv.org/abs/1406.2661) for more
details.
Args:
discriminator_gen_outputs: Discriminator output on generated data. Expected
to be in the range of (-inf, inf).
label_smoothing: The amount of smoothing for positive labels. This technique
is taken from `Improved Techniques for Training GANs`
(https://arxiv.org/abs/1606.03498). `0.0` means no smoothing.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`discriminator_gen_outputs`, and must be broadcastable to
`discriminator_gen_outputs` (i.e., all dimensions must be either `1`, or
the same as the corresponding dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.compat.v1.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A loss Tensor. The shape depends on `reduction`.
"""
with ops.name_scope(scope, 'generator_minimax_loss') as scope:
loss = -minimax_discriminator_loss(
array_ops.ones_like(discriminator_gen_outputs),
discriminator_gen_outputs,
label_smoothing,
weights,
weights,
scope,
loss_collection,
reduction,
add_summaries=False)
if add_summaries:
summary.scalar('generator_minimax_loss', loss)
return loss
开发者ID:ahmedsaiduk,项目名称:tensorflow,代码行数:51,代码来源:losses_impl.py
注:本文中的tensorflow.python.summary.summary.scalar函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论