本文整理汇总了Python中tensorflow.contrib.losses.python.losses.loss_ops.log_loss函数的典型用法代码示例。如果您正苦于以下问题:Python log_loss函数的具体用法?Python log_loss怎么用?Python log_loss使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了log_loss函数的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: ModelLoss
def ModelLoss(self):
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
return loss_ops.get_total_loss()
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:7,代码来源:training_test.py
示例2: testNoneGlobalStep
def testNoneGlobalStep(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(
total_loss, optimizer, global_step=None)
global_step = variables_lib.get_or_create_global_step()
with session_lib.Session() as sess:
# Initialize all variables
sess.run(variables_lib2.global_variables_initializer())
for _ in range(10):
sess.run([train_op])
global_step = global_step.eval()
# Since train_op don't use global_step it shouldn't change.
self.assertAllClose(global_step, 0)
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:25,代码来源:training_test.py
示例3: testResumeTrainAchievesRoughlyTheSameLoss
def testResumeTrainAchievesRoughlyTheSameLoss(self):
number_of_steps = [300, 1, 5]
logdir = os.path.join(self.get_temp_dir(), 'resume_train_same_loss')
for i in range(len(number_of_steps)):
with ops.Graph().as_default():
random_seed.set_random_seed(i)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.StopAtStepHook(
num_steps=number_of_steps[i]),
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=50, saver=saver),
])
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:31,代码来源:training_test.py
示例4: testResumeTrainAchievesRoughlyTheSameLoss
def testResumeTrainAchievesRoughlyTheSameLoss(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
number_of_steps = [300, 301, 305]
for i in range(len(number_of_steps)):
with ops.Graph().as_default():
random_seed.set_random_seed(i)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op,
logdir,
number_of_steps=number_of_steps[i],
log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:26,代码来源:learning_test.py
示例5: testEmptyUpdateOps
def testEmptyUpdateOps(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer, update_ops=[])
moving_mean = variables_lib.get_variables_by_name('moving_mean')[0]
moving_variance = variables_lib.get_variables_by_name('moving_variance')[
0]
with session_lib.Session() as sess:
# Initialize all variables
sess.run(variables_lib2.global_variables_initializer())
mean, variance = sess.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
for _ in range(10):
sess.run([train_op])
mean = moving_mean.eval()
variance = moving_variance.eval()
# Since we skip update_ops the moving_vars are not updated.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:33,代码来源:training_test.py
示例6: testTrainWithTrace
def testTrainWithTrace(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
summary.scalar('total_loss', total_loss)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op,
logdir,
number_of_steps=300,
log_every_n_steps=10,
trace_every_n_steps=100)
self.assertIsNotNone(loss)
for trace_step in [1, 101, 201]:
trace_filename = 'tf_trace-%d.json' % trace_step
self.assertTrue(os.path.isfile(os.path.join(logdir, trace_filename)))
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:27,代码来源:learning_test.py
示例7: testTrainWithSessionWrapper
def testTrainWithSessionWrapper(self):
"""Test that slim.learning.train can take `session_wrapper` args.
One of the applications of `session_wrapper` is the wrappers of TensorFlow
Debugger (tfdbg), which intercept methods calls to `tf.Session` (e.g., run)
to achieve debugging. `DumpingDebugWrapperSession` is used here for testing
purpose.
"""
dump_root = tempfile.mkdtemp()
def dumping_wrapper(sess): # pylint: disable=invalid-name
return dumping_wrapper_lib.DumpingDebugWrapperSession(sess, dump_root)
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op, None, number_of_steps=1, session_wrapper=dumping_wrapper)
self.assertIsNotNone(loss)
run_root = glob.glob(os.path.join(dump_root, 'run_*'))[-1]
dump = debug_data.DebugDumpDir(run_root)
self.assertAllEqual(0,
dump.get_tensors('global_step', 0, 'DebugIdentity')[0])
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:34,代码来源:learning_test.py
示例8: testTrainOpInCollection
def testTrainOpInCollection(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
# Make sure the training op was recorded in the proper collection
self.assertTrue(train_op in ops.get_collection(ops.GraphKeys.TRAIN_OP))
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:15,代码来源:training_test.py
示例9: _train_model
def _train_model(self, checkpoint_dir, num_steps):
"""Trains a simple classification model.
Note that the data has been configured such that after around 300 steps,
the model has memorized the dataset (e.g. we can expect %100 accuracy).
Args:
checkpoint_dir: The directory where the checkpoint is written to.
num_steps: The number of steps to train for.
"""
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
loss = loss_ops.log_loss(tf_predictions, tf_labels)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer)
loss = training.train(
train_op,
checkpoint_dir,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps)])
开发者ID:Immexxx,项目名称:tensorflow,代码行数:25,代码来源:evaluation_test.py
示例10: testTrainWithNoneAsLogdirWhenUsingTraceRaisesError
def testTrainWithNoneAsLogdirWhenUsingTraceRaisesError(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
with self.assertRaises(ValueError):
learning.train(
train_op, None, number_of_steps=300, trace_every_n_steps=10)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:17,代码来源:learning_test.py
示例11: testTrainWithNoneAsInitWhenUsingVarsRaisesError
def testTrainWithNoneAsInitWhenUsingVarsRaisesError(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
with self.assertRaises(RuntimeError):
learning.train(train_op, logdir, init_op=None, number_of_steps=300)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:18,代码来源:learning_test.py
注:本文中的tensorflow.contrib.losses.python.losses.loss_ops.log_loss函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论