本文整理汇总了Python中tensorflow.contrib.losses.python.losses.loss_ops.get_total_loss函数的典型用法代码示例。如果您正苦于以下问题:Python get_total_loss函数的具体用法?Python get_total_loss怎么用?Python get_total_loss使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_total_loss函数的19个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: testTrainWithTrace
def testTrainWithTrace(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
summary.scalar('total_loss', total_loss)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op,
logdir,
number_of_steps=300,
log_every_n_steps=10,
trace_every_n_steps=100)
self.assertIsNotNone(loss)
for trace_step in [1, 101, 201]:
trace_filename = 'tf_trace-%d.json' % trace_step
self.assertTrue(os.path.isfile(os.path.join(logdir, trace_filename)))
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:27,代码来源:learning_test.py
示例2: testNoneGlobalStep
def testNoneGlobalStep(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(
total_loss, optimizer, global_step=None)
global_step = variables_lib.get_or_create_global_step()
with session_lib.Session() as sess:
# Initialize all variables
sess.run(variables_lib2.global_variables_initializer())
for _ in range(10):
sess.run([train_op])
global_step = global_step.eval()
# Since train_op don't use global_step it shouldn't change.
self.assertAllClose(global_step, 0)
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:25,代码来源:training_test.py
示例3: testTrainWithSessionWrapper
def testTrainWithSessionWrapper(self):
"""Test that slim.learning.train can take `session_wrapper` args.
One of the applications of `session_wrapper` is the wrappers of TensorFlow
Debugger (tfdbg), which intercept methods calls to `tf.Session` (e.g., run)
to achieve debugging. `DumpingDebugWrapperSession` is used here for testing
purpose.
"""
dump_root = tempfile.mkdtemp()
def dumping_wrapper(sess): # pylint: disable=invalid-name
return dumping_wrapper_lib.DumpingDebugWrapperSession(sess, dump_root)
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op, None, number_of_steps=1, session_wrapper=dumping_wrapper)
self.assertIsNotNone(loss)
run_root = glob.glob(os.path.join(dump_root, 'run_*'))[-1]
dump = debug_data.DebugDumpDir(run_root)
self.assertAllEqual(0,
dump.get_tensors('global_step', 0, 'DebugIdentity')[0])
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:34,代码来源:learning_test.py
示例4: testResumeTrainAchievesRoughlyTheSameLoss
def testResumeTrainAchievesRoughlyTheSameLoss(self):
number_of_steps = [300, 1, 5]
logdir = os.path.join(self.get_temp_dir(), 'resume_train_same_loss')
for i in range(len(number_of_steps)):
with ops.Graph().as_default():
random_seed.set_random_seed(i)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.StopAtStepHook(
num_steps=number_of_steps[i]),
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=50, saver=saver),
])
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:31,代码来源:training_test.py
示例5: ModelLoss
def ModelLoss(self):
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
return loss_ops.get_total_loss()
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:7,代码来源:training_test.py
示例6: testEmptyUpdateOps
def testEmptyUpdateOps(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer, update_ops=[])
moving_mean = variables_lib.get_variables_by_name('moving_mean')[0]
moving_variance = variables_lib.get_variables_by_name('moving_variance')[
0]
with session_lib.Session() as sess:
# Initialize all variables
sess.run(variables_lib2.global_variables_initializer())
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
for _ in range(10):
sess.run([train_op])
mean = moving_mean.eval()
variance = moving_variance.eval()
# Since we skip update_ops the moving_vars are not updated.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:33,代码来源:training_test.py
示例7: testResumeTrainAchievesRoughlyTheSameLoss
def testResumeTrainAchievesRoughlyTheSameLoss(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
number_of_steps = [300, 301, 305]
for i in range(len(number_of_steps)):
with ops.Graph().as_default():
random_seed.set_random_seed(i)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op,
logdir,
number_of_steps=number_of_steps[i],
log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:26,代码来源:learning_test.py
示例8: testTrainOpInCollection
def testTrainOpInCollection(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
# Make sure the training op was recorded in the proper collection
self.assertTrue(train_op in ops.get_collection(ops.GraphKeys.TRAIN_OP))
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:15,代码来源:training_test.py
示例9: testTrainWithNoneAsLogdirWhenUsingTraceRaisesError
def testTrainWithNoneAsLogdirWhenUsingTraceRaisesError(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
with self.assertRaises(ValueError):
learning.train(
train_op, None, number_of_steps=300, trace_every_n_steps=10)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:17,代码来源:learning_test.py
示例10: testTrainWithNoneAsInitWhenUsingVarsRaisesError
def testTrainWithNoneAsInitWhenUsingVarsRaisesError(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
with self.assertRaises(RuntimeError):
learning.train(train_op, logdir, init_op=None, number_of_steps=300)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:18,代码来源:learning_test.py
示例11: create_train_op
def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate=learning_rate)
if gradient_multiplier != 1.0:
variables = variables_lib.trainable_variables()
gradient_multipliers = {var: gradient_multiplier for var in variables}
else:
gradient_multipliers = None
return learning.create_train_op(
total_loss, optimizer, gradient_multipliers=gradient_multipliers)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:19,代码来源:learning_test.py
示例12: testTrainWithNoInitAssignCanAchieveZeroLoss
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
g = ops.Graph()
with g.as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
self._logdir,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)])
self.assertLess(loss, .1)
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:20,代码来源:training_test.py
示例13: testTrainWithNoInitAssignCanAchieveZeroLoss
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op, logdir, number_of_steps=300, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:20,代码来源:learning_test.py
示例14: testCanAchieveZeroLoss
def testCanAchieveZeroLoss(self):
logdir = os.path.join(self.get_temp_dir(), 'can_achieve_zero_loss')
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
logdir,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)])
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:22,代码来源:training_test.py
示例15: testTrainWithEpochLimit
def testTrainWithEpochLimit(self):
logdir = os.path.join(tempfile.mkdtemp(prefix=self.get_temp_dir()),
'tmp_logs')
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_inputs_limited = input_lib.limit_epochs(tf_inputs, num_epochs=300)
tf_labels_limited = input_lib.limit_epochs(tf_labels, num_epochs=300)
tf_predictions = LogisticClassifier(tf_inputs_limited)
loss_ops.log_loss(tf_predictions, tf_labels_limited)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(train_op, logdir, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
self.assertTrue(os.path.isfile('{}/model.ckpt-300.index'.format(logdir)))
self.assertTrue(os.path.isfile('{}/model.ckpt-300.data-00000-of-00001'.format(logdir)))
开发者ID:Immexxx,项目名称:tensorflow,代码行数:23,代码来源:learning_test.py
示例16: create_train_op
def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate=learning_rate)
def transform_grads_fn(grads):
if gradient_multiplier != 1.0:
variables = variables_lib2.trainable_variables()
gradient_multipliers = {var: gradient_multiplier for var in variables}
with ops.name_scope('multiply_grads'):
return training.multiply_gradients(grads, gradient_multipliers)
else:
return grads
return training.create_train_op(
total_loss, optimizer, transform_grads_fn=transform_grads_fn)
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:23,代码来源:training_test.py
示例17: testTrainWithSessionConfig
def testTrainWithSessionConfig(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
session_config = config_pb2.ConfigProto(allow_soft_placement=True)
loss = learning.train(
train_op,
None,
number_of_steps=300,
log_every_n_steps=10,
session_config=session_config)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:23,代码来源:learning_test.py
示例18: testUseUpdateOps
def testUseUpdateOps(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
expected_mean = np.mean(self._inputs, axis=(0))
expected_var = np.var(self._inputs, axis=(0))
expected_var = self._addBesselsCorrection(16, expected_var)
tf_predictions = BatchNormClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
moving_mean = variables_lib2.get_variables_by_name('moving_mean')[0]
moving_variance = variables_lib2.get_variables_by_name('moving_variance')[
0]
with session.Session() as sess:
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
for _ in range(10):
sess.run([train_op])
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:37,代码来源:learning_test.py
示例19: testUseGlobalStep
def testUseGlobalStep(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = BatchNormClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
global_step = variables_lib2.get_or_create_global_step()
with session.Session() as sess:
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
for _ in range(10):
sess.run([train_op])
global_step = global_step.eval()
# After 10 updates global_step should be 10.
self.assertAllClose(global_step, 10)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:24,代码来源:learning_test.py
注:本文中的tensorflow.contrib.losses.python.losses.loss_ops.get_total_loss函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论