本文整理汇总了Python中tensorflow.contrib.estimator.python.estimator.head.multi_label_head函数的典型用法代码示例。如果您正苦于以下问题:Python multi_label_head函数的具体用法?Python multi_label_head怎么用?Python multi_label_head使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了multi_label_head函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_loss_fn_arg_invalid
def test_loss_fn_arg_invalid(self):
def _loss_fn(labels, logits, name=None):
del labels, logits, name # Unused
with self.assertRaisesRegexp(
ValueError,
r'loss_fn has unexpected args: \[\'name\'\]'):
head_lib.multi_label_head(n_classes=3, loss_fn=_loss_fn)
开发者ID:andrewharp,项目名称:tensorflow,代码行数:7,代码来源:head_test.py
示例2: test_head_weights_wrong_size
def test_head_weights_wrong_size(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2')
with self.assertRaisesRegexp(
ValueError,
r'heads and head_weights must have the same size\. '
r'Given len\(heads\): 2. Given len\(head_weights\): 1\.'):
multi_head_lib.multi_head([head1, head2], head_weights=[1.])
开发者ID:syed-ahmed,项目名称:tensorflow,代码行数:8,代码来源:multi_head_test.py
示例3: test_loss_fn_arg_logits_missing
def test_loss_fn_arg_logits_missing(self):
def _loss_fn(labels):
del labels # unused
with self.assertRaisesRegexp(
ValueError,
r'loss_fn must contain argument: logits\. '
r'Given arguments: \(\'labels\',\)'):
head_lib.multi_label_head(n_classes=3, loss_fn=_loss_fn)
开发者ID:andrewharp,项目名称:tensorflow,代码行数:8,代码来源:head_test.py
示例4: test_invalid_loss_reduction
def test_invalid_loss_reduction(self):
with self.assertRaisesRegexp(
ValueError, r'Invalid loss_reduction: invalid_loss_reduction'):
head_lib.multi_label_head(
n_classes=3, loss_reduction='invalid_loss_reduction')
with self.assertRaisesRegexp(
ValueError, r'Invalid loss_reduction: none'):
head_lib.multi_label_head(
n_classes=3, loss_reduction=losses.Reduction.NONE)
开发者ID:andrewharp,项目名称:tensorflow,代码行数:9,代码来源:head_test.py
示例5: test_predict_two_heads_logits_tensor
def test_predict_two_heads_logits_tensor(self):
"""Tests predict with logits as Tensor."""
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2')
multi_head = multi_head_lib.multi_head([head1, head2])
logits = np.array(
[[-1., 1., 2., -2., 2.], [-1.5, 1., -3., 2., -2.]], dtype=np.float32)
expected_logits1 = np.array([[-1., 1.], [-1.5, 1.]], dtype=np.float32)
expected_logits2 = np.array([[2., -2., 2.], [-3., 2., -2.]],
dtype=np.float32)
expected_probabilities = {
'head1': _sigmoid(expected_logits1),
'head2': _sigmoid(expected_logits2),
}
spec = multi_head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
self.assertItemsEqual(
(_DEFAULT_SERVING_KEY, 'predict', 'head1', 'classification/head1',
'predict/head1', 'head2', 'classification/head2', 'predict/head2'),
spec.export_outputs.keys())
# Assert predictions and export_outputs.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
predictions = sess.run(spec.predictions)
self.assertAllClose(
expected_logits1,
predictions[('head1', prediction_keys.PredictionKeys.LOGITS)])
self.assertAllClose(
expected_logits2,
predictions[('head2', prediction_keys.PredictionKeys.LOGITS)])
self.assertAllClose(
expected_probabilities['head1'],
predictions[('head1', prediction_keys.PredictionKeys.PROBABILITIES)])
self.assertAllClose(
expected_probabilities['head2'],
predictions[('head2', prediction_keys.PredictionKeys.PROBABILITIES)])
self.assertAllClose(
expected_probabilities['head1'],
sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].scores))
self.assertAllClose(
expected_probabilities['head1'],
sess.run(spec.export_outputs['head1'].scores))
self.assertAllClose(
expected_probabilities['head2'],
sess.run(spec.export_outputs['head2'].scores))
开发者ID:syed-ahmed,项目名称:tensorflow,代码行数:53,代码来源:multi_head_test.py
示例6: test_train_create_loss_two_heads_with_weights
def test_train_create_loss_two_heads_with_weights(self):
# Use different example weighting for each head weighting.
weights1 = np.array([[1.], [2.]], dtype=np.float32)
weights2 = np.array([[2.], [3.]])
head1 = head_lib.multi_label_head(n_classes=2, name='head1',
weight_column='weights1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2',
weight_column='weights2')
multi_head = multi_head_lib.multi_head(
[head1, head2], head_weights=[1., 2.])
logits = {
'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),
'head2': np.array([[20., -20., 20.], [-30., 20., -20.]],
dtype=np.float32),
}
labels = {
'head1': np.array([[1, 0], [1, 1]], dtype=np.int64),
'head2': np.array([[0, 1, 0], [1, 1, 0]], dtype=np.int64),
}
training_loss, unreduced_losses, weights, _ = multi_head.create_loss(
features={
'x': np.array(((42,),), dtype=np.int32),
'weights1': weights1,
'weights2': weights2
},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
tol = 1e-3
with self.test_session():
# loss of the first head is [[(10 + 10) / 2], [(15 + 0) / 2]]
# = [10, 7.5]
# training_loss = (1 * 10 + 2 * 7.5) / 2 = 12.5
# head-weighted unreduced_loss = 1 * [10, 7.5]
self.assertAllClose(
[[10.], [7.5]], unreduced_losses['head1'].eval(), rtol=tol, atol=tol)
# loss of the second head is [[(20 + 20 + 20) / 3], [(30 + 0 + 0) / 3]]
# = [20, 10]
# training_loss = (2 * 20 + 3 * 10) / 2 = 35
# head-weighted unreduced_loss = 2 * [20, 10]
self.assertAllClose(
[[40.], [20.]], unreduced_losses['head2'].eval(), rtol=tol, atol=tol)
# head-weighted training_loss = 1 * 12.5 + 2 * 35 = 82.5
self.assertAllClose(82.5, training_loss.eval(), rtol=tol, atol=tol)
# head-weighted example weights
self.assertAllClose(
[[1.], [2.]], weights['head1'].eval(), rtol=tol, atol=tol)
self.assertAllClose(
[[4.], [6.]], weights['head2'].eval(), rtol=tol, atol=tol)
开发者ID:syed-ahmed,项目名称:tensorflow,代码行数:50,代码来源:multi_head_test.py
示例7: test_eval_with_regularization_losses
def test_eval_with_regularization_losses(self):
n_classes = 2
head = head_lib.multi_label_head(
n_classes, loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
logits = np.array([[-1., 1.], [-1.5, 1.5]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
regularization_losses = [1.5, 0.5]
expected_regularization_loss = 2.
# unregularized_loss = sum(
# labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))) / batch_size
expected_unregularized_loss = np.sum(
_sigmoid_cross_entropy(labels=labels, logits=logits)) / 2.
expected_regularized_loss = (
expected_unregularized_loss + expected_regularization_loss)
keys = metric_keys.MetricKeys
expected_metrics = {
keys.LOSS_MEAN: expected_unregularized_loss,
keys.LOSS_REGULARIZATION: expected_regularization_loss,
# auc and auc_pr cannot be reliably calculated for only 4 samples, but
# this assert tests that the algorithm remains consistent.
keys.AUC: 0.3333,
keys.AUC_PR: 0.5972,
}
self._test_eval(
head=head,
logits=logits,
labels=labels,
expected_loss=expected_regularized_loss,
expected_metrics=expected_metrics,
regularization_losses=regularization_losses)
开发者ID:QiangCai,项目名称:tensorflow,代码行数:31,代码来源:head_test.py
示例8: test_eval_create_loss_large_logits
def test_eval_create_loss_large_logits(self):
"""Tests head.create_loss for eval mode and large logits."""
n_classes = 2
head = head_lib.multi_label_head(n_classes)
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
# loss = labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))
# For large logits, this is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits
expected_weighted_sum_loss = np.sum(
np.array([[(10. + 10.) / 2.], [(15. + 0.) / 2.]], dtype=np.float32))
actual_weighted_sum_loss = head.create_loss(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)[0]
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_weighted_sum_loss,
actual_weighted_sum_loss.eval(),
atol=1e-4)
开发者ID:dyoung418,项目名称:tensorflow,代码行数:25,代码来源:head_test.py
示例9: test_eval
def test_eval(self):
n_classes = 2
head = head_lib.multi_label_head(n_classes)
logits = np.array([[-1., 1.], [-1.5, 1.5]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
# loss = labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))
# Sum over examples, divide by batch_size.
expected_loss = 0.5 * np.sum(
_sigmoid_cross_entropy(labels=labels, logits=logits))
keys = metric_keys.MetricKeys
expected_metrics = {
# Average loss over examples.
keys.LOSS_MEAN: expected_loss,
# auc and auc_pr cannot be reliably calculated for only 4 samples, but
# this assert tests that the algorithm remains consistent.
keys.AUC: 0.3333,
keys.AUC_PR: 0.7639,
}
self._test_eval(
head=head,
logits=logits,
labels=labels,
expected_loss=expected_loss,
expected_metrics=expected_metrics)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:25,代码来源:head_test.py
示例10: test_train_with_optimizer
def test_train_with_optimizer(self):
head = head_lib.multi_label_head(n_classes=2)
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# expected_unweighted_loss = [[10., 10.], [15., 0.]]
# Average over classes, sum over weights.
expected_loss = 17.5
expected_train_result = 'my_train_op'
class _Optimizer(object):
def minimize(self, loss, global_step):
del global_step
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=3)])
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
optimizer=_Optimizer())
tol = 1e-3
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.3f}'.format(expected_train_result, expected_loss)),
train_result)
开发者ID:indiejoseph,项目名称:tensorflow,代码行数:35,代码来源:head_test.py
示例11: test_predict
def test_predict(self):
n_classes = 4
head = head_lib.multi_label_head(n_classes)
self.assertEqual(n_classes, head.logits_dimension)
logits = np.array(
[[0., 1., 2., -1.], [-1., -2., -3., 1.]], dtype=np.float32)
expected_probabilities = _sigmoid(logits)
spec = head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
self.assertItemsEqual(
('', _DEFAULT_SERVING_KEY), spec.export_outputs.keys())
# Assert predictions and export_outputs.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
predictions = sess.run(spec.predictions)
self.assertAllClose(logits,
predictions[prediction_keys.PredictionKeys.LOGITS])
self.assertAllClose(
expected_probabilities,
predictions[prediction_keys.PredictionKeys.PROBABILITIES])
self.assertAllClose(
expected_probabilities,
sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].scores))
开发者ID:rajeev921,项目名称:tensorflow,代码行数:31,代码来源:head_test.py
示例12: test_multi_dim_weighted_eval
def test_multi_dim_weighted_eval(self):
"""Logits and labels of shape [2, 2, 3], weights [2, 2]."""
head = head_lib.multi_label_head(n_classes=3, weight_column='weights')
logits = np.array([[[-10., 10., -10.], [10., -10., 10.]],
[[-12., 12., -12.], [12., -12., 12.]]], dtype=np.float32)
labels = np.array([[[1, 0, 0], [1, 0, 0]],
[[0, 1, 1], [0, 1, 1]]], dtype=np.int64)
weights = np.array([[1., 1.5], [2., 2.5]], dtype=np.float32)
# loss = [[10 + 10 + 0, 0 + 0 + 10], [0 + 0 + 12, 12 + 12 + 0]] / 3
# = [[20/3, 10/3], [4, 8]]
# weighted_sum_loss = 1*20/3 + 1.5*10/3 + 2*4 + 2.5*8 = 39.6667
expected_loss = 39.6667
keys = metric_keys.MetricKeys
expected_metrics = {
keys.LOSS_MEAN: expected_loss / np.sum(weights),
# auc and auc_pr cannot be reliably calculated for only 4 samples, but
# this assert tests that the algorithm remains consistent.
keys.AUC: 0.4977,
keys.AUC_PR: 0.6645,
}
self._test_eval(
head=head,
features={'weights': weights},
logits=logits,
labels=labels,
expected_loss=expected_loss,
expected_metrics=expected_metrics)
开发者ID:andrewharp,项目名称:tensorflow,代码行数:28,代码来源:head_test.py
示例13: test_multi_dim_weights_wrong_outer_dim
def test_multi_dim_weights_wrong_outer_dim(self):
"""Logits and labels of shape [2, 2, 3], weights [2, 2, 3]."""
head = head_lib.multi_label_head(n_classes=3, weight_column='weights')
logits = np.array([[[-10., 10., -10.], [10., -10., 10.]],
[[-12., 12., -12.], [12., -12., 12.]]], dtype=np.float32)
labels = np.array([[[1, 0, 0], [1, 0, 0]],
[[0, 1, 1], [0, 1, 1]]], dtype=np.int64)
weights = np.array([[[1., 1., 1.], [1.5, 1.5, 1.5]],
[[2., 2., 2.], [2.5, 2.5, 2.5]]], dtype=np.float32)
weights_placeholder = array_ops.placeholder(dtype=dtypes.float32)
def _train_op_fn(loss):
del loss
return control_flow_ops.no_op()
spec = head.create_estimator_spec(
features={'weights': weights_placeholder},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[logits_shape: \] \[2 2 3\] \[weights_shape: \] \[2 2 3\]'):
spec.loss.eval({weights_placeholder: weights})
开发者ID:andrewharp,项目名称:tensorflow,代码行数:27,代码来源:head_test.py
示例14: test_eval_create_loss_labels_wrong_shape
def test_eval_create_loss_labels_wrong_shape(self):
"""Tests head.create_loss for eval mode when labels has the wrong shape."""
n_classes = 2
head = head_lib.multi_label_head(n_classes)
logits = np.array([[-1., 1.], [-1.5, 1.]], dtype=np.float32)
labels_placeholder = array_ops.placeholder(dtype=dtypes.int64)
actual_training_loss = head.create_loss(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels_placeholder)[0]
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'\[expected_labels_shape: \] \[2 2\] \[labels_shape: \] \[2 1\]'):
actual_training_loss.eval({
labels_placeholder: np.array([[1], [1]], dtype=np.int64)
})
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r'labels shape must be \[D0, D1, ... DN, 2\]\..*'
r'\[Received shape: \] \[2\]'):
actual_training_loss.eval({
labels_placeholder: np.array([1, 1], dtype=np.int64)
})
开发者ID:andrewharp,项目名称:tensorflow,代码行数:27,代码来源:head_test.py
示例15: test_train_create_loss_loss_reduction
def test_train_create_loss_loss_reduction(self):
"""Tests head.create_loss with loss_reduction."""
n_classes = 2
head = head_lib.multi_label_head(
n_classes, weight_column='example_weights',
loss_reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS)
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
weights = np.array([[1.], [2.]], dtype=np.float32)
# loss = labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))
# For large logits, this is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits
expected_unreduced_loss = [[(10. + 10.) / 2.], [(15. + 0.) / 2.]]
expected_weights = [[1.], [2.]]
expected_training_loss = (1. * (10. + 10.) / 2. + 2. * (15. + 0.) / 2.) / 2.
training_loss, unreduced_loss, actual_weights, _ = head.create_loss(
features={
'x': np.array(((42,),), dtype=np.int32),
'example_weights': weights
},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), atol=1e-4)
self.assertAllClose(
expected_unreduced_loss, unreduced_loss.eval(), atol=1e-4)
self.assertAllClose(expected_weights, actual_weights.eval())
开发者ID:andrewharp,项目名称:tensorflow,代码行数:33,代码来源:head_test.py
示例16: test_weight_should_not_impact_prediction
def test_weight_should_not_impact_prediction(self):
n_classes = 4
head = head_lib.multi_label_head(n_classes, weight_column='example_weights')
self.assertEqual(n_classes, head.logits_dimension)
logits = np.array(
[[0., 1., 2., -1.], [-1., -2., -3., 1.]], dtype=np.float32)
expected_probabilities = _sigmoid(logits)
weights_2x1 = [[1.], [2.]]
spec = head.create_estimator_spec(
features={
'x': np.array(((42,),), dtype=np.int32),
'example_weights': weights_2x1,
},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
# Assert predictions and export_outputs.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
predictions = sess.run(spec.predictions)
self.assertAllClose(logits,
predictions[prediction_keys.PredictionKeys.LOGITS])
self.assertAllClose(
expected_probabilities,
predictions[prediction_keys.PredictionKeys.PROBABILITIES])
开发者ID:andrewharp,项目名称:tensorflow,代码行数:28,代码来源:head_test.py
示例17: test_multi_dim_weighted_train_create_loss
def test_multi_dim_weighted_train_create_loss(self):
"""Logits and labels of shape [2, 2, 3], weights [2, 2]."""
head = head_lib.multi_label_head(n_classes=3, weight_column='weights')
logits = np.array([[[-10., 10., -10.], [10., -10., 10.]],
[[-12., 12., -12.], [12., -12., 12.]]], dtype=np.float32)
labels = np.array([[[1, 0, 0], [1, 0, 0]],
[[0, 1, 1], [0, 1, 1]]], dtype=np.int64)
weights = np.array([[1., 1.5], [2., 2.5]], dtype=np.float32)
# unreduced_loss =
# [[10 + 10 + 0, 0 + 0 + 10], [0 + 0 + 12, 12 + 12 + 0]] / 3
# = [[20/3, 10/3], [4, 8]]
expected_unreduced_loss = [[[20./3.], [10./3.]], [[4.], [8.]]]
# weights are reshaped to [2, 2, 1] to match logits.
expected_weights = [[[1.], [1.5]], [[2.], [2.5]]]
# weighted_sum_loss = 1*20/3 + 1.5*10/3 + 2*4 + 2.5*8 = 39.6667
expected_training_loss = 39.6667
training_loss, unreduced_loss, actual_weights, _ = head.create_loss(
features={'weights': weights},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
atol = 1.e-3
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllClose(
expected_training_loss, training_loss.eval(), atol=atol)
self.assertAllClose(
expected_unreduced_loss, unreduced_loss.eval(), atol=atol)
self.assertAllClose(expected_weights, actual_weights.eval())
开发者ID:andrewharp,项目名称:tensorflow,代码行数:30,代码来源:head_test.py
示例18: test_multi_dim_weighted_train
def test_multi_dim_weighted_train(self):
"""Logits and labels of shape [2, 2, 3], weights [2, 2]."""
head = head_lib.multi_label_head(n_classes=3, weight_column='weights')
logits = np.array([[[-10., 10., -10.], [10., -10., 10.]],
[[-12., 12., -12.], [12., -12., 12.]]], dtype=np.float32)
labels = np.array([[[1, 0, 0], [1, 0, 0]],
[[0, 1, 1], [0, 1, 1]]], dtype=np.int64)
weights = np.array([[1., 1.5], [2., 2.5]], dtype=np.float32)
# loss = [[10 + 10 + 0, 0 + 0 + 10], [0 + 0 + 12, 12 + 12 + 0]] / 3
# = [[20/3, 10/3], [4, 8]]
# weighted_sum_loss = 1*20/3 + 1.5*10/3 + 2*4 + 2.5*8 = 39.6667
expected_loss = 39.6667
expected_train_result = 'my_train_op'
def _train_op_fn(loss):
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=3)])
spec = head.create_estimator_spec(
features={'weights': weights},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
atol = 1.e-3
with self.test_session() as sess:
_initialize_variables(self, monitored_session.Scaffold())
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAllClose(expected_loss, loss, atol=atol)
self.assertEqual(
six.b('{0:s}{1:.3f}'.format(expected_train_result, expected_loss)),
train_result)
开发者ID:andrewharp,项目名称:tensorflow,代码行数:34,代码来源:head_test.py
示例19: test_eval_with_label_vocabulary
def test_eval_with_label_vocabulary(self):
n_classes = 2
head = head_lib.multi_label_head(
n_classes, label_vocabulary=['class0', 'class1'])
logits = np.array([[-1., 1.], [-1.5, 1.5]], dtype=np.float32)
# Equivalent to multi_hot = [[1, 0], [1, 1]]
labels = sparse_tensor.SparseTensor(
values=['class0', 'class0', 'class1'],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
labels_multi_hot = np.array([[1, 0], [1, 1]], dtype=np.int64)
# loss = labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))
# Sum over examples.
expected_loss = (
np.sum(_sigmoid_cross_entropy(labels=labels_multi_hot, logits=logits))
)
keys = metric_keys.MetricKeys
expected_metrics = {
# Average loss over examples.
keys.LOSS_MEAN: expected_loss / 2,
# auc and auc_pr cannot be reliably calculated for only 4 samples, but
# this assert tests that the algorithm remains consistent.
keys.AUC: 0.3333,
keys.AUC_PR: 0.7639,
}
self._test_eval(
head=head,
logits=logits,
labels=labels,
expected_loss=expected_loss,
expected_metrics=expected_metrics)
开发者ID:andrewharp,项目名称:tensorflow,代码行数:32,代码来源:head_test.py
示例20: test_eval_create_loss_sparse_labels
def test_eval_create_loss_sparse_labels(self):
"""Tests head.create_loss for eval mode and sparse labels."""
n_classes = 2
head = head_lib.multi_label_head(n_classes)
logits = np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)
labels = sparse_tensor.SparseTensor(
values=[0, 0, 1],
indices=[[0, 0], [1, 0], [1, 1]],
dense_shape=[2, 2])
expected_labels = np.array([[1, 0], [1, 1]], dtype=np.int64)
# loss = labels * -log(sigmoid(logits)) +
# (1 - labels) * -log(1 - sigmoid(logits))
# For large logits, this is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits
expected_unweighted_loss = np.array(
[[10., 10.], [15., 0.]], dtype=np.float32)
actual_unweighted_loss, actual_labels = head.create_loss(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
with self.test_session():
_initialize_variables(self, monitored_session.Scaffold())
self.assertAllEqual(expected_labels, actual_labels.eval())
self.assertAllClose(
expected_unweighted_loss, actual_unweighted_loss.eval(), atol=1e-4)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:28,代码来源:head_test.py
注:本文中的tensorflow.contrib.estimator.python.estimator.head.multi_label_head函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论