本文整理汇总了Python中tensorflow.contrib.estimator.python.estimator.multi_head.multi_head函数的典型用法代码示例。如果您正苦于以下问题:Python multi_head函数的具体用法?Python multi_head怎么用?Python multi_head使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了multi_head函数的17个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_head_weights_wrong_size
def test_head_weights_wrong_size(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2')
with self.assertRaisesRegexp(
ValueError,
r'heads and head_weights must have the same size\. '
r'Given len\(heads\): 2. Given len\(head_weights\): 1\.'):
multi_head_lib.multi_head([head1, head2], head_weights=[1.])
开发者ID:syed-ahmed,项目名称:tensorflow,代码行数:8,代码来源:multi_head_test.py
示例2: test_train_create_loss_logits_tensor_multi_dim
def test_train_create_loss_logits_tensor_multi_dim(self):
"""Tests create_loss with multi-dimensional logits of shape [2, 2, 5]."""
head1 = head_lib.regression_head(label_dimension=2, name='head1')
head2 = head_lib.regression_head(label_dimension=3, name='head2')
multi_head = multi_head_lib.multi_head([head1, head2])
logits = np.array(
[[[-1., 1., 2., -2., 2.], [-1., 1., 2., -2., 2.]],
[[-1.5, 1.5, -2., 2., -2.], [-1.5, 1.5, -2., 2., -2.]]],
dtype=np.float32)
labels = {
'head1': np.array([[[1., 0.], [1., 0.]],
[[1.5, 1.5], [1.5, 1.5]]], dtype=np.float32),
'head2': np.array([[[0., 1., 0.], [0., 1., 0.]],
[[2., 2., 0.], [2., 2., 0.]]], dtype=np.float32),
}
# Loss for the first head:
# loss1 = ((1+1)^2 + (0-1)^2 + (1+1)^2 + (0-1)^2 +
# (1.5+1.5)^2 + (1.5-1.5)^2 + (1.5+1.5)^2 + (1.5-1.5)^2) / 8
# = 3.5
# Loss for the second head:
# loss2 = ((0-2)^2 + (1+2)^2 + (0-2)^2 + (0-2)^2 + (1+2)^2 + (0-2)^2 +
# (2+2)^2 + (2-2)^2 + (0+2)^2 + (2+2)^2 + (2-2)^2 + (0+2)^2) / 12
# = 6.167
expected_training_loss = 3.5 + 6.167
training_loss = multi_head.create_loss(
features={},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)[0]
tol = 1e-3
with self.test_session():
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=tol, atol=tol)
开发者ID:syed-ahmed,项目名称:tensorflow,代码行数:35,代码来源:multi_head_test.py
示例3: test_predict_two_heads_logits_tensor
def test_predict_two_heads_logits_tensor(self):
"""Tests predict with logits as Tensor."""
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2')
multi_head = multi_head_lib.multi_head([head1, head2])
logits = np.array(
[[-1., 1., 2., -2., 2.], [-1.5, 1., -3., 2., -2.]], dtype=np.float32)
expected_logits1 = np.array([[-1., 1.], [-1.5, 1.]], dtype=np.float32)
expected_logits2 = np.array([[2., -2., 2.], [-3., 2., -2.]],
dtype=np.float32)
expected_probabilities = {
'head1': _sigmoid(expected_logits1),
'head2': _sigmoid(expected_logits2),
}
spec = multi_head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
self.assertItemsEqual(
(_DEFAULT_SERVING_KEY, 'predict', 'head1', 'classification/head1',
'predict/head1', 'head2', 'classification/head2', 'predict/head2'),
spec.export_outputs.keys())
# Assert predictions and export_outputs.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
predictions = sess.run(spec.predictions)
self.assertAllClose(
expected_logits1,
predictions[('head1', prediction_keys.PredictionKeys.LOGITS)])
self.assertAllClose(
expected_logits2,
predictions[('head2', prediction_keys.PredictionKeys.LOGITS)])
self.assertAllClose(
expected_probabilities['head1'],
predictions[('head1', prediction_keys.PredictionKeys.PROBABILITIES)])
self.assertAllClose(
expected_probabilities['head2'],
predictions[('head2', prediction_keys.PredictionKeys.PROBABILITIES)])
self.assertAllClose(
expected_probabilities['head1'],
sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].scores))
self.assertAllClose(
expected_probabilities['head1'],
sess.run(spec.export_outputs['head1'].scores))
self.assertAllClose(
expected_probabilities['head2'],
sess.run(spec.export_outputs['head2'].scores))
开发者ID:syed-ahmed,项目名称:tensorflow,代码行数:53,代码来源:multi_head_test.py
示例4: test_predict_two_heads_logits_tensor_multi_dim
def test_predict_two_heads_logits_tensor_multi_dim(self):
"""Tests predict with multi-dimensional logits of shape [2, 2, 5]."""
head1 = head_lib.regression_head(label_dimension=2, name='head1')
head2 = head_lib.regression_head(label_dimension=3, name='head2')
multi_head = multi_head_lib.multi_head([head1, head2])
logits = np.array(
[[[-1., 1., 2., -2., 2.], [-1., 1., 2., -2., 2.]],
[[-1.5, 1., -3., 2., -2.], [-1.5, 1., -3., 2., -2.]]],
dtype=np.float32)
expected_logits1 = np.array(
[[[-1., 1.], [-1., 1.]],
[[-1.5, 1.], [-1.5, 1.]]],
dtype=np.float32)
expected_logits2 = np.array(
[[[2., -2., 2.], [2., -2., 2.]],
[[-3., 2., -2.], [-3., 2., -2.]]],
dtype=np.float32)
spec = multi_head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
self.assertItemsEqual(
(_DEFAULT_SERVING_KEY, 'predict', 'head1', 'regression/head1',
'predict/head1', 'head2', 'regression/head2', 'predict/head2'),
spec.export_outputs.keys())
# Assert predictions and export_outputs.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
predictions = sess.run(spec.predictions)
self.assertAllClose(
expected_logits1,
predictions[('head1', prediction_keys.PredictionKeys.PREDICTIONS)])
self.assertAllClose(
expected_logits2,
predictions[('head2', prediction_keys.PredictionKeys.PREDICTIONS)])
self.assertAllClose(
expected_logits1,
sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].value))
self.assertAllClose(
expected_logits1,
sess.run(spec.export_outputs['head1'].value))
self.assertAllClose(
expected_logits2,
sess.run(spec.export_outputs['head2'].value))
开发者ID:syed-ahmed,项目名称:tensorflow,代码行数:50,代码来源:multi_head_test.py
示例5: test_train_create_loss_two_heads_with_weights
def test_train_create_loss_two_heads_with_weights(self):
# Use different example weighting for each head weighting.
weights1 = np.array([[1.], [2.]], dtype=np.float32)
weights2 = np.array([[2.], [3.]])
head1 = head_lib.multi_label_head(n_classes=2, name='head1',
weight_column='weights1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2',
weight_column='weights2')
multi_head = multi_head_lib.multi_head(
[head1, head2], head_weights=[1., 2.])
logits = {
'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),
'head2': np.array([[20., -20., 20.], [-30., 20., -20.]],
dtype=np.float32),
}
labels = {
'head1': np.array([[1, 0], [1, 1]], dtype=np.int64),
'head2': np.array([[0, 1, 0], [1, 1, 0]], dtype=np.int64),
}
training_loss, unreduced_losses, weights, _ = multi_head.create_loss(
features={
'x': np.array(((42,),), dtype=np.int32),
'weights1': weights1,
'weights2': weights2
},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
tol = 1e-3
with self.test_session():
# loss of the first head is [[(10 + 10) / 2], [(15 + 0) / 2]]
# = [10, 7.5]
# training_loss = (1 * 10 + 2 * 7.5) / 2 = 12.5
# head-weighted unreduced_loss = 1 * [10, 7.5]
self.assertAllClose(
[[10.], [7.5]], unreduced_losses['head1'].eval(), rtol=tol, atol=tol)
# loss of the second head is [[(20 + 20 + 20) / 3], [(30 + 0 + 0) / 3]]
# = [20, 10]
# training_loss = (2 * 20 + 3 * 10) / 2 = 35
# head-weighted unreduced_loss = 2 * [20, 10]
self.assertAllClose(
[[40.], [20.]], unreduced_losses['head2'].eval(), rtol=tol, atol=tol)
# head-weighted training_loss = 1 * 12.5 + 2 * 35 = 82.5
self.assertAllClose(82.5, training_loss.eval(), rtol=tol, atol=tol)
# head-weighted example weights
self.assertAllClose(
[[1.], [2.]], weights['head1'].eval(), rtol=tol, atol=tol)
self.assertAllClose(
[[4.], [6.]], weights['head2'].eval(), rtol=tol, atol=tol)
开发者ID:syed-ahmed,项目名称:tensorflow,代码行数:50,代码来源:multi_head_test.py
示例6: test_train_create_loss_one_head
def test_train_create_loss_one_head(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
multi_head = multi_head_lib.multi_head([head1])
logits = {'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)}
labels = {'head1': np.array([[1, 0], [1, 1]], dtype=np.int64)}
with self.assertRaisesRegexp(
NotImplementedError,
r'create_loss not yet implemented for MultiHead\.'):
multi_head.create_loss(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
开发者ID:Crazyonxh,项目名称:tensorflow,代码行数:14,代码来源:multi_head_test.py
示例7: test_train_one_head
def test_train_one_head(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
multi_head = multi_head_lib.multi_head([head1])
logits = {'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)}
labels = {'head1': np.array([[1, 0], [1, 1]], dtype=np.int64)}
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# expected_unweighted_loss = [[10., 10.], [15., 0.]]
# Average over classes, sum over weights.
expected_loss = 17.5
expected_train_result = 'my_train_op'
def _train_op_fn(loss):
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=3)])
spec = multi_head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
self.assertIsNotNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
tol = 1e-3
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.3f}'.format(expected_train_result, expected_loss)),
train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS + '/head1': expected_loss,
# Average loss over examples.
metric_keys.MetricKeys.LOSS_MEAN + '/head1': expected_loss / 2,
}, summary_str, tol)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:48,代码来源:multi_head_test.py
示例8: test_train_create_loss_one_head
def test_train_create_loss_one_head(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
multi_head = multi_head_lib.multi_head([head1])
logits = {'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)}
labels = {'head1': np.array([[1, 0], [1, 1]], dtype=np.int64)}
loss = multi_head.create_loss(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)[0]
tol = 1e-3
with self.test_session():
# Unreduced loss of the head is [[(10 + 10) / 2], (15 + 0) / 2]
# (averaged over classes, averaged over examples).
self.assertAllClose(8.75, loss.eval(), rtol=tol, atol=tol)
开发者ID:syed-ahmed,项目名称:tensorflow,代码行数:16,代码来源:multi_head_test.py
示例9: test_train_create_loss_two_heads_with_weights
def test_train_create_loss_two_heads_with_weights(self):
# Use different example weighting for each head weighting.
weights1 = np.array([[1.], [2.]], dtype=np.float32)
weights2 = np.array([[2.], [3.]])
head1 = head_lib.multi_label_head(n_classes=2, name='head1',
weight_column='weights1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2',
weight_column='weights2')
multi_head = multi_head_lib.multi_head(
[head1, head2], head_weights=[1., 2.])
logits = {
'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),
'head2': np.array([[20., -20., 20.], [-30., 20., -20.]],
dtype=np.float32),
}
labels = {
'head1': np.array([[1, 0], [1, 1]], dtype=np.int64),
'head2': np.array([[0, 1, 0], [1, 1, 0]], dtype=np.int64),
}
weighted_sum_loss, example_weight_sum, _ = multi_head.create_loss(
features={
'x': np.array(((42,),), dtype=np.int32),
'weights1': weights1,
'weights2': weights2
},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
tol = 1e-3
with self.test_session():
# loss of the first head is [[(10 + 10) / 2], [(15 + 0) / 2]]
# = [10, 7.5]
# weighted_sum_loss = 1 * 10 + 2 * 7.5 = 25
# loss of the second head is [[(20 + 20 + 20) / 3], [(30 + 0 + 0) / 3]]
# = [20, 10]
# weighted_sum_loss = 2 * 20 + 3 * 10 = 70
# head-weighted merge = 1 * 25 + 2 * 70 = 165
self.assertAllClose(165, weighted_sum_loss.eval(), rtol=tol, atol=tol)
# example_weight_sum = 1 * (1 + 2) + 2 * (2 + 3) = 13
self.assertAllClose(13., example_weight_sum.eval(), rtol=tol, atol=tol)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:41,代码来源:multi_head_test.py
示例10: test_train_create_loss_two_heads_with_weights
def test_train_create_loss_two_heads_with_weights(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2')
multi_head = multi_head_lib.multi_head(
[head1, head2], head_weights=[1., 2.])
logits = {
'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),
'head2': np.array([[20., -20., 20.], [-30., 20., -20.]],
dtype=np.float32),
}
labels = {
'head1': np.array([[1, 0], [1, 1]], dtype=np.int64),
'head2': np.array([[0, 1, 0], [1, 1, 0]], dtype=np.int64),
}
with self.assertRaisesRegexp(
NotImplementedError,
r'create_loss not yet implemented for MultiHead\.'):
multi_head.create_loss(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
开发者ID:Crazyonxh,项目名称:tensorflow,代码行数:23,代码来源:multi_head_test.py
示例11: test_train_one_head_with_optimizer
def test_train_one_head_with_optimizer(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
multi_head = multi_head_lib.multi_head([head1])
logits = {'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)}
labels = {'head1': np.array([[1, 0], [1, 1]], dtype=np.int64)}
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# expected_unweighted_loss = [[10., 10.], [15., 0.]]
# loss = ( (10 + 10) / 2 + (15 + 0) / 2 ) / 2 = 8.75
expected_loss = 8.75
expected_train_result = 'my_train_op'
class _Optimizer(object):
def minimize(self, loss, global_step):
del global_step
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=3)])
spec = multi_head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
optimizer=_Optimizer())
tol = 1e-3
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.3f}'.format(expected_train_result, expected_loss)),
train_result)
开发者ID:syed-ahmed,项目名称:tensorflow,代码行数:37,代码来源:multi_head_test.py
示例12: test_eval_tpu
def test_eval_tpu(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2')
multi_head = multi_head_lib.multi_head(
[head1, head2], head_weights=[1., 2.])
logits = {
'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),
'head2': np.array([[20., -20., 20.], [-30., 20., -20.]],
dtype=np.float32),
}
labels = {
'head1': np.array([[1, 0], [1, 1]], dtype=np.int64),
'head2': np.array([[0, 1, 0], [1, 1, 0]], dtype=np.int64),
}
with self.assertRaisesRegexp(
NotImplementedError,
r'TPU evaluation is not implemented for multi_head\.'):
multi_head._create_tpu_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:24,代码来源:multi_head_test.py
示例13: test_eval_two_heads_with_weights
def test_eval_two_heads_with_weights(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2')
multi_head = multi_head_lib.multi_head(
[head1, head2], head_weights=[1., 2.])
logits = {
'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),
'head2': np.array([[20., -20., 20.], [-30., 20., -20.]],
dtype=np.float32),
}
labels = {
'head1': np.array([[1, 0], [1, 1]], dtype=np.int64),
'head2': np.array([[0, 1, 0], [1, 1, 0]], dtype=np.int64),
}
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# head1: expected_unweighted_loss = [[10., 10.], [15., 0.]]
# loss = ( (10 + 10) / 2 + (15 + 0) / 2 ) / 2 = 8.75
# head2: expected_unweighted_loss = [[20., 20., 20.], [30., 0., 0]]
# loss = ( (20 + 20 + 20) / 3 + (30 + 0 + 0) / 3 ) / 2 = 15
expected_loss_head1 = 8.75
expected_loss_head2 = 15.
expected_loss = 1. * expected_loss_head1 + 2. * expected_loss_head2
spec = multi_head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
keys = metric_keys.MetricKeys
expected_metrics = {
keys.LOSS + '/head1': expected_loss_head1,
keys.LOSS + '/head2': expected_loss_head2,
# Average loss over examples.
keys.LOSS_MEAN + '/head1': expected_loss_head1,
keys.LOSS_MEAN + '/head2': expected_loss_head2,
# auc and auc_pr cannot be reliably calculated for only 4-6 samples, but
# this assert tests that the algorithm remains consistent.
keys.AUC + '/head1': 0.1667,
keys.AUC + '/head2': 0.3333,
keys.AUC_PR + '/head1': 0.6667,
keys.AUC_PR + '/head2': 0.5000,
}
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
tol = 1e-3
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
# Check results of both update (in `metrics`) and value ops.
self.assertAllClose(expected_metrics, metrics, rtol=tol, atol=tol)
self.assertAllClose(
expected_metrics, {k: value_ops[k].eval() for k in value_ops},
rtol=tol,
atol=tol)
开发者ID:syed-ahmed,项目名称:tensorflow,代码行数:69,代码来源:multi_head_test.py
示例14: test_head_name_missing
def test_head_name_missing(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3)
with self.assertRaisesRegexp(
ValueError, r'All given heads must have name specified\.'):
multi_head_lib.multi_head([head1, head2])
开发者ID:syed-ahmed,项目名称:tensorflow,代码行数:6,代码来源:multi_head_test.py
示例15: test_no_heads
def test_no_heads(self):
with self.assertRaisesRegexp(
ValueError, r'Must specify heads\. Given: \[\]'):
multi_head_lib.multi_head(heads=[])
开发者ID:syed-ahmed,项目名称:tensorflow,代码行数:4,代码来源:multi_head_test.py
示例16: test_train_two_heads_with_weights
def test_train_two_heads_with_weights(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2')
multi_head = multi_head_lib.multi_head(
[head1, head2], head_weights=[1., 2.])
logits = {
'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),
'head2': np.array([[20., -20., 20.], [-30., 20., -20.]],
dtype=np.float32),
}
labels = {
'head1': np.array([[1, 0], [1, 1]], dtype=np.int64),
'head2': np.array([[0, 1, 0], [1, 1, 0]], dtype=np.int64),
}
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# head1: expected_unweighted_loss = [[10., 10.], [15., 0.]]
# loss = ( (10 + 10) / 2 + (15 + 0) / 2 ) / 2 = 8.75
# head2: expected_unweighted_loss = [[20., 20., 20.], [30., 0., 0]]
# loss = ( (20 + 20 + 20) / 3 + (30 + 0 + 0) / 3 ) / 2 = 15
# Average over classes, weighted sum over batch and heads.
expected_loss_head1 = 8.75
expected_loss_head2 = 15.0
expected_loss = 1. * expected_loss_head1 + 2. * expected_loss_head2
expected_train_result = 'my_train_op'
def _train_op_fn(loss):
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=3)])
spec = multi_head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
self.assertIsNotNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
tol = 1e-3
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.3f}'.format(expected_train_result, expected_loss)),
train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS + '/head1': expected_loss_head1,
metric_keys.MetricKeys.LOSS + '/head2': expected_loss_head2,
}, summary_str, tol)
开发者ID:syed-ahmed,项目名称:tensorflow,代码行数:61,代码来源:multi_head_test.py
示例17: test_name
def test_name(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2')
multi_head = multi_head_lib.multi_head([head1, head2])
self.assertEqual('head1_head2', multi_head.name)
开发者ID:syed-ahmed,项目名称:tensorflow,代码行数:5,代码来源:multi_head_test.py
注:本文中的tensorflow.contrib.estimator.python.estimator.multi_head.multi_head函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论