本文整理汇总了Python中tensorflow.contrib.learn.python.learn.estimators.head._regression_head函数的典型用法代码示例。如果您正苦于以下问题:Python _regression_head函数的具体用法?Python _regression_head怎么用?Python _regression_head使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了_regression_head函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: testRegression
def testRegression(self):
head = head_lib._regression_head()
with tf.Graph().as_default(), tf.Session() as sess:
prediction = tf.constant([[1.0], [1.0], [3.0]])
labels = tf.constant([[0.0], [1.0], [1.0]])
model_fn_ops = head.head_ops({}, labels, tf.contrib.learn.ModeKeys.TRAIN, _noop_train_op, logits=prediction)
self.assertAlmostEqual(5.0 / 3, sess.run(model_fn_ops.loss))
开发者ID:yuikns,项目名称:tensorflow,代码行数:7,代码来源:head_test.py
示例2: testRegression
def testRegression(self):
head = head_lib._regression_head()
with tf.Graph().as_default(), tf.Session() as sess:
prediction = tf.constant([[1.], [1.], [3.]])
targets = tf.constant([[0.], [1.], [1.]])
model_fn_ops = head.head_ops({}, targets,
tf.contrib.learn.ModeKeys.TRAIN,
None, logits=prediction)
self.assertAlmostEqual(5. / 3, sess.run(model_fn_ops.loss))
开发者ID:Qstar,项目名称:tensorflow,代码行数:9,代码来源:head_test.py
示例3: testRegressionWithWeights
def testRegressionWithWeights(self):
head = head_lib._regression_head(weight_column_name="label_weight")
with tf.Graph().as_default(), tf.Session() as sess:
features = {"label_weight": tf.constant([[2.0], [5.0], [0.0]])}
prediction = tf.constant([[1.0], [1.0], [3.0]])
labels = tf.constant([[0.0], [1.0], [1.0]])
model_fn_ops = head.head_ops(
features, labels, tf.contrib.learn.ModeKeys.TRAIN, _noop_train_op, logits=prediction
)
self.assertAlmostEqual(2.0 / 3, sess.run(model_fn_ops.loss), places=3)
开发者ID:yuikns,项目名称:tensorflow,代码行数:10,代码来源:head_test.py
示例4: testRegression
def testRegression(self):
head = head_lib._regression_head()
with tf.Graph().as_default(), tf.Session():
prediction = tf.constant([[1.], [1.], [3.]])
labels = tf.constant([[0.], [1.], [1.]])
model_fn_ops = head.head_ops({}, labels,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=prediction)
_assert_no_variables(self)
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:10,代码来源:head_test.py
示例5: testRegressionEvalMode
def testRegressionEvalMode(self):
head = head_lib._regression_head()
with ops.Graph().as_default(), session.Session():
prediction = constant_op.constant([[1.], [1.], [3.]])
labels = constant_op.constant([[0.], [1.], [1.]])
model_fn_ops = head.head_ops(
{}, labels, model_fn.ModeKeys.EVAL, _noop_train_op, logits=prediction)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
开发者ID:kadeng,项目名称:tensorflow,代码行数:11,代码来源:head_test.py
示例6: testErrorInSparseTensorLabels
def testErrorInSparseTensorLabels(self):
head = head_lib._regression_head()
with tf.Graph().as_default():
prediction = tf.constant([[1.0], [1.0], [3.0]])
labels = tf.SparseTensor(
indices=tf.constant([[0, 0], [1, 0], [2, 0]], dtype=tf.int64),
values=tf.constant([0.0, 1.0, 1.0]),
shape=[3, 1],
)
with self.assertRaisesRegexp(ValueError, "SparseTensor is not supported as labels."):
head.head_ops({}, labels, tf.contrib.learn.ModeKeys.TRAIN, _noop_train_op, logits=prediction)
开发者ID:yuikns,项目名称:tensorflow,代码行数:11,代码来源:head_test.py
示例7: testRegressionWithLabelName
def testRegressionWithLabelName(self):
label_name = "my_label"
head = head_lib._regression_head(label_name=label_name)
with tf.Graph().as_default(), tf.Session():
prediction = tf.constant([[1.], [1.], [3.]])
labels = {label_name: tf.constant([[0.], [1.], [1.]])}
model_fn_ops = head.head_ops({}, labels,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=prediction)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:12,代码来源:head_test.py
示例8: testRegressionWithLogits
def testRegressionWithLogits(self):
head = head_lib._regression_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=((1.,), (1.,), (3.,)))
_assert_summary_tags(self, ["loss"])
_assert_no_variables(self)
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
开发者ID:ivankreso,项目名称:tensorflow,代码行数:12,代码来源:head_test.py
示例9: testErrorInSparseTensorTarget
def testErrorInSparseTensorTarget(self):
head = head_lib._regression_head()
with tf.Graph().as_default():
prediction = tf.constant([[1.], [1.], [3.]])
targets = tf.SparseTensor(
indices=tf.constant([[0, 0], [1, 0], [2, 0]], dtype=tf.int64),
values=tf.constant([0., 1., 1.]),
shape=[3, 1])
with self.assertRaisesRegexp(
ValueError, "SparseTensor is not supported as a target"):
head.head_ops({}, targets, tf.contrib.learn.ModeKeys.TRAIN, None,
logits=prediction)
开发者ID:Qstar,项目名称:tensorflow,代码行数:12,代码来源:head_test.py
示例10: testRegressionWithWeights
def testRegressionWithWeights(self):
head = head_lib._regression_head(weight_column_name="label_weight")
with ops.Graph().as_default(), session.Session():
weights = ((2.,), (5.,), (0.,))
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": weights},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=((1.,), (1.,), (3.,)))
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 2. / len(weights), {"loss": 2. / np.sum(weights)},
model_fn_ops)
开发者ID:ivankreso,项目名称:tensorflow,代码行数:14,代码来源:head_test.py
示例11: testRegressionWithLogitsInput
def testRegressionWithLogitsInput(self):
head = head_lib._regression_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits_input=((0., 0.), (0., 0.), (0., 0.)))
w = ("logits/weights:0", "logits/biases:0")
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 2. / 3, {"loss": 2. / 3}, model_fn_ops)
开发者ID:ivankreso,项目名称:tensorflow,代码行数:15,代码来源:head_test.py
示例12: testRegressionWithWeights
def testRegressionWithWeights(self):
head = head_lib._regression_head(
weight_column_name="label_weight")
with tf.Graph().as_default(), tf.Session():
weights = ((2.,), (5.,), (0.,))
features = {"label_weight": tf.constant(weights)}
prediction = tf.constant([[1.], [1.], [3.]])
labels = tf.constant([[0.], [1.], [1.]])
model_fn_ops = head.head_ops(features, labels,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=prediction)
_assert_no_variables(self)
_assert_metrics(self, 2. / len(weights), {
"loss": 2. / np.sum(weights)
}, model_fn_ops)
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:15,代码来源:head_test.py
示例13: testRegression
def testRegression(self):
head = head_lib._regression_head()
with tf.Graph().as_default(), tf.Session() as sess:
prediction = tf.constant([[1.], [1.], [3.]])
labels = tf.constant([[0.], [1.], [1.]])
model_fn_ops = head.head_ops({}, labels,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=prediction)
self._assert_metrics(model_fn_ops)
_assert_no_variables(self)
self.assertAlmostEqual(5. / 3, sess.run(model_fn_ops.loss))
model_fn_ops = head.head_ops({}, labels,
tf.contrib.learn.ModeKeys.EVAL,
_noop_train_op, logits=prediction)
self.assertIsNone(model_fn_ops.train_op)
开发者ID:RapidApplicationDevelopment,项目名称:tensorflow,代码行数:16,代码来源:head_test.py
示例14: testRegressionWithCenteredBias
def testRegressionWithCenteredBias(self):
head = head_lib._regression_head(enable_centered_bias=True)
with tf.Graph().as_default(), tf.Session():
prediction = tf.constant([[1.], [1.], [3.]])
labels = tf.constant([[0.], [1.], [1.]])
model_fn_ops = head.head_ops({}, labels,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=prediction)
_assert_variables(self, expected_global=(
"centered_bias_weight:0",
"centered_bias_weight/Adagrad:0",
), expected_trainable=(
"centered_bias_weight:0",
))
tf.global_variables_initializer().run()
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:16,代码来源:head_test.py
示例15: testErrorInSparseTensorLabels
def testErrorInSparseTensorLabels(self):
head = head_lib._regression_head()
with ops.Graph().as_default():
prediction = constant_op.constant([[1.], [1.], [3.]])
labels = sparse_tensor.SparseTensor(
indices=constant_op.constant(
[[0, 0], [1, 0], [2, 0]], dtype=dtypes.int64),
values=constant_op.constant([0., 1., 1.]),
dense_shape=[3, 1])
with self.assertRaisesRegexp(ValueError,
"SparseTensor is not supported as labels."):
head.head_ops(
{},
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=prediction)
开发者ID:kadeng,项目名称:tensorflow,代码行数:17,代码来源:head_test.py
示例16: testRegressionWithCenteredBias
def testRegressionWithCenteredBias(self):
head = head_lib._regression_head(enable_centered_bias=True)
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=_noop_train_op,
logits=((1.,), (1.,), (3.,)))
_assert_variables(
self,
expected_global=(
"centered_bias_weight:0",
"centered_bias_weight/Adagrad:0",),
expected_trainable=("centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss", "centered_bias/bias_0"])
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
开发者ID:ivankreso,项目名称:tensorflow,代码行数:18,代码来源:head_test.py
示例17: testRegressionWithCenteredBias
def testRegressionWithCenteredBias(self):
head = head_lib._regression_head(
weight_column_name="label_weight", enable_centered_bias=True)
with tf.Graph().as_default(), tf.Session() as sess:
features = {"label_weight": tf.constant([[2.], [5.], [0.]])}
prediction = tf.constant([[1.], [1.], [3.]])
labels = tf.constant([[0.], [1.], [1.]])
model_fn_ops = head.head_ops(features, labels,
tf.contrib.learn.ModeKeys.TRAIN,
_noop_train_op, logits=prediction)
self._assert_metrics(model_fn_ops)
_assert_variables(self, expected_global=(
"centered_bias_weight:0",
"centered_bias_weight/Adagrad:0",
), expected_trainable=(
"centered_bias_weight:0",
))
tf.global_variables_initializer().run()
self.assertAlmostEqual(2. / 3, sess.run(model_fn_ops.loss), places=3)
开发者ID:RapidApplicationDevelopment,项目名称:tensorflow,代码行数:19,代码来源:head_test.py
示例18: __init__
def __init__(self,
model_dir=None,
label_dimension=1,
weight_column_name=None,
config=None,
feature_engineering_fn=None):
"""Initializes a DebugRegressor instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
label_dimension: Number of regression targets per example. This is the
size of the last dimension of the labels and logits `Tensor` objects
(typically, these have shape `[batch_size, label_dimension]`).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and returns
features and labels which will be fed into the model.
Returns:
A `DebugRegressor` estimator.
"""
params = {
"head":
head_lib._regression_head( # pylint: disable=protected-access
weight_column_name=weight_column_name,
label_dimension=label_dimension,
enable_centered_bias=True)
}
super(DebugRegressor, self).__init__(
model_fn=debug_model_fn,
model_dir=model_dir,
config=config,
params=params,
feature_engineering_fn=feature_engineering_fn)
开发者ID:eduardofv,项目名称:tensorflow,代码行数:40,代码来源:debug.py
示例19: __init__
def __init__(self, # _joint_weights: pylint: disable=invalid-name
feature_columns,
model_dir=None,
weight_column_name=None,
optimizer=None,
gradient_clip_norm=None,
enable_centered_bias=False,
label_dimension=1,
_joint_weights=False,
config=None,
feature_engineering_fn=None):
"""Construct a `LinearRegressor` estimator object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph, etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Ftrl optimizer.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
label_dimension: Dimension of the label for multilabels. Defaults to 1.
_joint_weights: If True use a single (possibly partitioned) variable to
store the weights. It's faster, but requires all feature columns are
sparse and have the 'sum' combiner. Incompatible with SDCAOptimizer.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
Returns:
A `LinearRegressor` estimator.
"""
self._feature_columns = tuple(feature_columns or [])
assert self._feature_columns
self._optimizer = optimizer
chief_hook = None
if (isinstance(optimizer, sdca_optimizer.SDCAOptimizer) and
enable_centered_bias):
enable_centered_bias = False
logging.warning("centered_bias is not supported with SDCA, "
"please disable it explicitly.")
head = head_lib._regression_head( # pylint: disable=protected-access
weight_column_name=weight_column_name,
label_dimension=label_dimension,
enable_centered_bias=enable_centered_bias)
params = {
"head": head,
"feature_columns": feature_columns,
"optimizer": optimizer,
}
if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
assert label_dimension == 1, "SDCA only applies for label_dimension=1."
assert not _joint_weights, ("_joint_weights is incompatible with"
" SDCAOptimizer.")
model_fn = sdca_model_fn
# The model_fn passes the model parameters to the chief_hook. We then use
# the hook to update weights and shrink step only on the chief.
chief_hook = _SdcaUpdateWeightsHook()
params.update({
"weight_column_name": weight_column_name,
"update_weights_hook": chief_hook,
})
else:
model_fn = _linear_model_fn
params.update({
"gradient_clip_norm": gradient_clip_norm,
"joint_weights": _joint_weights,
})
super(LinearRegressor, self).__init__(
model_fn=model_fn,
model_dir=model_dir,
config=config,
params=params,
feature_engineering_fn=feature_engineering_fn)
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:90,代码来源:linear.py
示例20: __init__
def __init__(self, # _joint_linear_weights pylint: disable=invalid-name
model_dir=None,
weight_column_name=None,
linear_feature_columns=None,
linear_optimizer=None,
_joint_linear_weights=False,
dnn_feature_columns=None,
dnn_optimizer=None,
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
gradient_clip_norm=None,
enable_centered_bias=None,
target_dimension=1,
config=None,
feature_engineering_fn=None):
"""Initializes a DNNLinearCombinedRegressor instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
linear_feature_columns: An iterable containing all the feature columns
used by linear part of the model. All items in the set must be
instances of classes derived from `FeatureColumn`.
linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the linear part of the model. If `None`, will use a FTRL optimizer.
_joint_linear_weights: If True a single (possibly partitioned) variable
will be used to store the linear model weights. It's faster, but
requires that all columns are sparse and have the 'sum' combiner.
dnn_feature_columns: An iterable containing all the feature columns used
by deep part of the model. All items in the set must be instances of
classes derived from `FeatureColumn`.
dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
the deep part of the model. If `None`, will use an Adagrad optimizer.
dnn_hidden_units: List of hidden units per layer. All layers are fully
connected.
dnn_activation_fn: Activation function applied to each layer. If None,
will use `tf.nn.relu`.
dnn_dropout: When not None, the probability we will drop out
a given coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
tf.clip_by_global_norm for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
target_dimension: TODO(zakaria): dimension of the target for multilabels.
config: RunConfig object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
targets which are the output of `input_fn` and
returns features and targets which will be fed
into the model.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
"""
if enable_centered_bias is None:
enable_centered_bias = True
_changing_default_center_bias()
# pylint: disable=protected-access
head = head_lib._regression_head(
weight_column_name=weight_column_name,
target_dimension=target_dimension,
enable_centered_bias=enable_centered_bias)
super(DNNLinearCombinedRegressor, self).__init__(
model_dir=model_dir,
linear_feature_columns=linear_feature_columns,
linear_optimizer=linear_optimizer,
_joint_linear_weights=_joint_linear_weights,
dnn_feature_columns=dnn_feature_columns,
dnn_optimizer=dnn_optimizer,
dnn_hidden_units=dnn_hidden_units,
dnn_activation_fn=dnn_activation_fn,
dnn_dropout=dnn_dropout,
gradient_clip_norm=gradient_clip_norm,
head=head,
config=config,
feature_engineering_fn=feature_engineering_fn,
default_prediction_key=head_lib.PedictionKey.SCORES)
开发者ID:Qstar,项目名称:tensorflow,代码行数:84,代码来源:dnn_linear_combined.py
注:本文中的tensorflow.contrib.learn.python.learn.estimators.head._regression_head函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论