本文整理汇总了Python中tensorflow.contrib.layers.weighted_sum_from_feature_columns函数的典型用法代码示例。如果您正苦于以下问题:Python weighted_sum_from_feature_columns函数的具体用法?Python weighted_sum_from_feature_columns怎么用?Python weighted_sum_from_feature_columns使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了weighted_sum_from_feature_columns函数的18个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _get_train_ops
def _get_train_ops(self, features, targets):
"""See base class."""
self._validate_linear_feature_columns(features)
if not isinstance(self._linear_optimizer, sdca_optimizer.SDCAOptimizer):
return super(LinearClassifier, self)._get_train_ops(features, targets)
# SDCA currently supports binary classification only.
if self._target_column.num_label_columns > 2:
raise ValueError(
"SDCA does not currently support multi-class classification.")
global_step = contrib_variables.get_global_step()
assert global_step
logits, columns_to_variables, _ = layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=self._linear_feature_columns,
num_outputs=self._target_column.num_label_columns,
weight_collections=[self._linear_weight_collection],
name="linear")
with ops.control_dependencies([self._centered_bias()]):
loss = self._loss(logits, targets, features)
logging_ops.scalar_summary("loss", loss)
train_ops = self._linear_optimizer.get_train_step(
self._linear_feature_columns, self._target_column.weight_column_name,
"logistic_loss", features, targets, columns_to_variables, global_step)
return train_ops, loss
开发者ID:363158858,项目名称:tensorflow,代码行数:28,代码来源:linear.py
示例2: build_model
def build_model(self, features, feature_columns, is_training):
"""See base class."""
self._feature_columns = feature_columns
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=self._num_ps_replicas,
min_slice_size=64 << 20)
with variable_scope.variable_scope(
self._scope,
values=features.values(),
partitioner=partitioner) as scope:
if self._joint_weights:
logits, _, _ = layers.joint_weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=self._get_feature_columns(),
num_outputs=self._num_label_columns,
weight_collections=[self._scope],
scope=scope)
else:
logits, _, _ = layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=self._get_feature_columns(),
num_outputs=self._num_label_columns,
weight_collections=[self._scope],
scope=scope)
return logits
开发者ID:rahimkanji,项目名称:tensorflow,代码行数:25,代码来源:composable_model.py
示例3: _get_train_ops
def _get_train_ops(self, features, targets):
"""See base class."""
if not isinstance(self._linear_optimizer, sdca_optimizer.SDCAOptimizer):
return super(LinearRegressor, self)._get_train_ops(features, targets)
assert not self._joint_weights, ("_joint_weights is incompatible with"
" SDCAOptimizer.")
global_step = contrib_variables.get_or_create_global_step()
logits, columns_to_variables, bias = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=self._linear_feature_columns,
num_outputs=self._head.logits_dimension,
weight_collections=[self._linear_model.get_scope_name()],
scope=self._linear_model.get_scope_name()))
_add_bias_column(self._linear_feature_columns, features, bias, targets,
columns_to_variables)
def _train_op_fn(unused_loss):
sdca_model, train_op = self._linear_optimizer.get_train_step(
columns_to_variables, self._weight_column_name,
self._loss_type(), features, targets, global_step)
return sdca_model.update_weights(train_op)
model_fn_ops = self._head.head_ops(features, targets,
estimator.ModeKeys.TRAIN, _train_op_fn,
logits=logits)
return model_fn_ops.training_op, model_fn_ops.loss
开发者ID:MrCrumpets,项目名称:tensorflow,代码行数:28,代码来源:linear.py
示例4: _get_train_ops
def _get_train_ops(self, features, targets):
"""See base class."""
if not isinstance(self._linear_optimizer, sdca_optimizer.SDCAOptimizer):
return super(LinearRegressor, self)._get_train_ops(features, targets)
assert not self._joint_weights, ("_joint_weights is incompatible with"
" SDCAOptimizer.")
global_step = contrib_variables.get_or_create_global_step()
logits, columns_to_variables, bias = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=self._linear_feature_columns,
num_outputs=self._target_column.num_label_columns,
weight_collections=[self._linear_model.get_scope_name()],
scope=self._linear_model.get_scope_name()))
with ops.control_dependencies([self._centered_bias()]):
loss = self._target_column.loss(logits, targets, features)
logging_ops.scalar_summary("loss", loss)
_add_bias_column(self._linear_feature_columns, features, bias, targets,
columns_to_variables)
train_op = self._linear_optimizer.get_train_step(
columns_to_variables, self._target_column.weight_column_name,
self._loss_type(), features, targets, global_step)
return train_op, loss
开发者ID:KalraA,项目名称:tensorflow,代码行数:26,代码来源:linear.py
示例5: _linear_logits
def _linear_logits(self, features):
logits, _, _ = layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=self._get_linear_feature_columns(),
num_outputs=self._num_label_columns(),
weight_collections=[self._linear_weight_collection],
name="linear")
return logits
开发者ID:01-,项目名称:tensorflow,代码行数:8,代码来源:dnn_linear_combined.py
示例6: build_model
def build_model(self, features, feature_columns, is_training):
"""See base class."""
features = self._get_feature_dict(features)
self._feature_columns = feature_columns
logits, _, _ = layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=self._get_feature_columns(),
num_outputs=self._num_label_columns,
weight_collections=[self._weight_collection_name],
name="linear")
return logits
开发者ID:Brandon-Tai,项目名称:tensorflow,代码行数:12,代码来源:dnn_linear_combined.py
示例7: sdca_classifier_model_fn
def sdca_classifier_model_fn(features, targets, mode, params):
"""Estimator's linear model_fn."""
feature_columns = params["feature_columns"]
optimizer = params["optimizer"]
weight_column_name = params["weight_column_name"]
loss_type = params["loss_type"]
enable_centered_bias = params.get("enable_centered_bias", True)
if not isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
raise ValueError("Optimizer must be of type SDCAOptimizer")
loss_fn = {
"logistic_loss": _log_loss_with_two_classes,
"hinge_loss": _hinge_loss,
}[loss_type]
logits, columns_to_variables, bias = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=1))
if enable_centered_bias:
_add_bias_column(feature_columns, features, bias, targets,
columns_to_variables)
loss = None
if mode != estimator.ModeKeys.INFER:
loss = math_ops.reduce_mean(loss_fn(logits, targets), name="loss")
logging_ops.scalar_summary("loss", loss)
train_op = None
if mode == estimator.ModeKeys.TRAIN:
global_step = contrib_variables.get_global_step()
train_op = optimizer.get_train_step(
columns_to_variables, weight_column_name, loss_type, features,
targets, global_step)
predictions = {}
predictions[_LOGISTIC] = math_ops.sigmoid(logits)
logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
predictions[_PROBABILITIES] = nn.softmax(logits)
predictions[_CLASSES] = math_ops.argmax(logits, 1)
return predictions, loss, train_op
开发者ID:apollos,项目名称:tensorflow,代码行数:45,代码来源:linear.py
示例8: _get_linear_train_and_loss_ops
def _get_linear_train_and_loss_ops(features, target, linear_feature_columns,
target_column, linear_optimizer, loss_type,
centered_bias, scope_name):
"""Returns train and loss ops for SDCAOptimizer."""
global_step = contrib_variables.get_global_step()
assert global_step
logits, columns_to_variables, _ = layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=linear_feature_columns,
num_outputs=target_column.num_label_columns,
weight_collections=[scope_name],
scope=scope_name)
with ops.control_dependencies([centered_bias]):
loss = target_column.loss(logits, target, features)
logging_ops.scalar_summary("loss", loss)
train_op = linear_optimizer.get_train_step(linear_feature_columns,
target_column.weight_column_name,
loss_type, features, target,
columns_to_variables, global_step)
return train_op, loss
开发者ID:JamesFysh,项目名称:tensorflow,代码行数:22,代码来源:linear.py
示例9: _linear_classifier_model_fn
def _linear_classifier_model_fn(features, targets, mode, params):
"""Estimator's linear model_fn."""
n_classes = params["n_classes"]
weight_column_name = params["weight_column_name"]
feature_columns = params["feature_columns"]
optimizer = params["optimizer"]
gradient_clip_norm = params.get("gradient_clip_norm", None)
enable_centered_bias = params.get("enable_centered_bias", True)
num_ps_replicas = params.get("num_ps_replicas", 0)
joint_weights = params.get("joint_weights", False)
if not isinstance(features, dict):
features = {"": features}
num_label_columns = 1 if n_classes == 2 else n_classes
loss_fn = _softmax_cross_entropy_loss
if n_classes == 2:
loss_fn = _log_loss_with_two_classes
feat_values = (features.values() if isinstance(features, dict)
else [features])
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20)
with variable_scope.variable_op_scope(
feat_values, "linear", partitioner=partitioner) as scope:
if joint_weights:
logits, _, _ = (
layers.joint_weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=num_label_columns,
weight_collections=["linear"],
scope=scope))
else:
logits, _, _ = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=num_label_columns,
weight_collections=["linear"],
scope=scope))
if enable_centered_bias:
logits = nn.bias_add(logits, _centered_bias(num_label_columns))
loss = None
if mode != estimator.ModeKeys.INFER:
loss = loss_fn(logits, targets)
if weight_column_name:
weight_tensor = array_ops.reshape(
math_ops.to_float(features[weight_column_name]), shape=(-1,))
loss = _weighted_loss(loss, weight_tensor)
else:
loss = math_ops.reduce_mean(loss, name="loss")
logging_ops.scalar_summary("loss", loss)
train_ops = []
if mode == estimator.ModeKeys.TRAIN:
global_step = contrib_variables.get_global_step()
my_vars = ops.get_collection("linear")
grads = gradients.gradients(loss, my_vars)
if gradient_clip_norm:
grads, _ = clip_ops.clip_by_global_norm(grads, gradient_clip_norm)
train_ops.append(optimizer.apply_gradients(
zip(grads, my_vars), global_step=global_step))
if enable_centered_bias:
train_ops.append(
_centered_bias_step(targets, loss_fn, num_label_columns))
predictions = {}
if n_classes == 2:
predictions[_LOGISTIC] = math_ops.sigmoid(logits)
logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
predictions[_PROBABILITIES] = nn.softmax(logits)
predictions[_CLASSES] = math_ops.argmax(logits, 1)
return predictions, loss, control_flow_ops.group(*train_ops)
开发者ID:KalraA,项目名称:tensorflow,代码行数:79,代码来源:linear.py
示例10: _linear_model_fn
def _linear_model_fn(features, labels, mode, params, config=None):
"""A model_fn for linear models that use a gradient-based optimizer.
Args:
features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training. If `None`, will use a FTRL optimizer.
* gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio.
* num_ps_replicas: The number of parameter server replicas.
* joint_weights: If True, the weights for all columns will be stored in a
single (possibly partitioned) variable. It's more efficient, but it's
incompatible with SDCAOptimizer, and requires all feature columns are
sparse and use the 'sum' combiner.
config: `RunConfig` object to configure the runtime settings.
Returns:
A `ModelFnOps` instance.
Raises:
ValueError: If mode is not any of the `ModeKeys`.
"""
head = params["head"]
feature_columns = params["feature_columns"]
optimizer = params.get("optimizer") or _get_default_optimizer(feature_columns)
gradient_clip_norm = params.get("gradient_clip_norm", None)
num_ps_replicas = config.num_ps_replicas if config else 0
joint_weights = params.get("joint_weights", False)
if not isinstance(features, dict):
features = {"": features}
parent_scope = "linear"
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20)
with variable_scope.variable_scope(
parent_scope, values=features.values(), partitioner=partitioner) as scope:
if joint_weights:
logits, _, _ = (
layers.joint_weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=head.logits_dimension,
weight_collections=[parent_scope],
scope=scope))
else:
logits, _, _ = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=head.logits_dimension,
weight_collections=[parent_scope],
scope=scope))
def _train_op_fn(loss):
global_step = contrib_variables.get_global_step()
my_vars = ops.get_collection("linear")
grads = gradients.gradients(loss, my_vars)
if gradient_clip_norm:
grads, _ = clip_ops.clip_by_global_norm(grads, gradient_clip_norm)
return (_get_optimizer(optimizer).apply_gradients(
zip(grads, my_vars), global_step=global_step))
return head.head_ops(features, labels, mode, _train_op_fn, logits)
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:75,代码来源:linear.py
示例11: sdca_model_fn
def sdca_model_fn(features, labels, mode, params):
"""A model_fn for linear models that use the SDCA optimizer.
Args:
features: A dict of `Tensor` keyed by column name.
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance. Type must be one of `_BinarySvmHead`,
`_RegressionHead` or `_MultiClassHead`.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: An `SDCAOptimizer` instance.
* weight_column_name: A string defining the weight feature column, or
None if there are no weights.
* update_weights_hook: A `SessionRunHook` object or None. Used to update
model weights.
Returns:
A `ModelFnOps` instance.
Raises:
ValueError: If `optimizer` is not an `SDCAOptimizer` instance.
ValueError: If the type of head is neither `_BinarySvmHead`, nor
`_RegressionHead` nor `_MultiClassHead`.
ValueError: If mode is not any of the `ModeKeys`.
"""
head = params["head"]
feature_columns = params["feature_columns"]
optimizer = params["optimizer"]
weight_column_name = params["weight_column_name"]
update_weights_hook = params.get("update_weights_hook", None)
if not isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
raise ValueError("Optimizer must be of type SDCAOptimizer")
# pylint: disable=protected-access
if isinstance(head, head_lib._BinarySvmHead):
loss_type = "hinge_loss"
elif isinstance(
head, (head_lib._MultiClassHead, head_lib._BinaryLogisticHead)):
loss_type = "logistic_loss"
elif isinstance(head, head_lib._RegressionHead):
loss_type = "squared_loss"
else:
raise ValueError("Unsupported head type: {}".format(head))
# pylint: enable=protected-access
parent_scope = "linear"
with variable_scope.variable_op_scope(
features.values(), parent_scope) as scope:
logits, columns_to_variables, bias = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=1,
scope=scope))
_add_bias_column(feature_columns, features, bias, labels,
columns_to_variables)
def _train_op_fn(unused_loss):
global_step = contrib_variables.get_global_step()
sdca_model, train_op = optimizer.get_train_step(columns_to_variables,
weight_column_name,
loss_type, features,
labels, global_step)
if update_weights_hook is not None:
update_weights_hook.set_parameters(sdca_model, train_op)
return train_op
model_fn_ops = head.head_ops(features, labels, mode, _train_op_fn, logits)
if update_weights_hook is not None:
return model_fn_ops._replace(
training_chief_hooks=(model_fn_ops.training_chief_hooks +
[update_weights_hook]))
return model_fn_ops
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:81,代码来源:linear.py
示例12: _linear_classifier_model_fn
def _linear_classifier_model_fn(features, targets, mode, params):
"""Linear classifier model_fn.
Args:
features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
targets: `Tensor` of shape [batch_size, 1] or [batch_size] target labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* feature_columns: An iterable containing all the feature columns used by
the model.
* n_classes: number of target classes.
* weight_column_name: A string defining the weight feature column, or
None if there are no weights.
* optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training.
* gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio.
* enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
* num_ps_replicas: The number of parameter server replicas.
* joint_weights: If True, the weights for all columns will be stored in a
single (possibly partitioned) variable. It's more efficient, but it's
incompatible with SDCAOptimizer, and requires all feature columns are
sparse and use the 'sum' combiner.
Returns:
predictions: A dict of `Tensor` objects.
loss: A scalar containing the loss of the step.
train_op: The op for training.
Raises:
ValueError: If mode is not any of the `ModeKeys`.
"""
feature_columns = params["feature_columns"]
optimizer = params["optimizer"]
gradient_clip_norm = params.get("gradient_clip_norm", None)
num_ps_replicas = params.get("num_ps_replicas", 0)
joint_weights = params.get("joint_weights", False)
head = params.get("head", None)
if not head:
# TODO(zakaria): Remove these params and make head mandatory
head = head_lib._multi_class_head( # pylint: disable=protected-access
params.get("n_classes"),
weight_column_name=params["weight_column_name"],
enable_centered_bias=params.get("enable_centered_bias", False))
if not isinstance(features, dict):
features = {"": features}
parent_scope = "linear"
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20)
with variable_scope.variable_op_scope(
features.values(), parent_scope, partitioner=partitioner) as scope:
if joint_weights:
logits, _, _ = (
layers.joint_weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=head.logits_dimension,
weight_collections=[parent_scope],
scope=scope))
else:
logits, _, _ = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=head.logits_dimension,
weight_collections=[parent_scope],
scope=scope))
def _train_op_fn(loss):
global_step = contrib_variables.get_global_step()
my_vars = ops.get_collection("linear")
grads = gradients.gradients(loss, my_vars)
if gradient_clip_norm:
grads, _ = clip_ops.clip_by_global_norm(grads, gradient_clip_norm)
return (optimizer.apply_gradients(
zip(grads, my_vars), global_step=global_step))
return head.head_ops(features, targets, mode, _train_op_fn, logits)
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:88,代码来源:linear.py
示例13: sdca_model_fn
def sdca_model_fn(features, labels, mode, params, config=None):
"""A model_fn for linear models that use the SDCA optimizer.
Args:
features: A dict of `Tensor` keyed by column name.
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` with values in the set {0, 1}.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance. Type must be one of `_BinarySvmHead`,
`_RegressionHead` or `_BinaryLogisticHead`.
* feature_columns: An iterable containing all the feature columns used by
the model.
* l1_regularization: Global (across all examples) L1-regularization
parameter.
* l2_regularization: Global (across all examples) L2-regularization
parameter.
* num_loss_partitions: Number of partitions of the global loss function
optimized by `SDCAOptimizer`.
* weight_column_name: A string defining the weight feature column, or
None if there are no weights.
* update_weights_hook: A `SessionRunHook` object or None. Used to update
model weights.
config: `RunConfig` object to configure the runtime settings.
Returns:
A `ModelFnOps` instance.
Raises:
ValueError: If the type of head is not one of `_BinarySvmHead`,
`_RegressionHead` or `_MultiClassHead`.
ValueError: If mode is not any of the `ModeKeys`.
"""
head = params["head"]
feature_columns = params["feature_columns"]
example_id_column = params["example_id_column"]
l1_regularization = params["l1_regularization"]
l2_regularization = params["l2_regularization"]
num_loss_partitions = params["num_loss_partitions"]
weight_column_name = params["weight_column_name"]
update_weights_hook = params.get("update_weights_hook", None)
loss_type = None
if isinstance(head, head_lib._BinarySvmHead): # pylint: disable=protected-access
loss_type = "hinge_loss"
elif isinstance(head, head_lib._BinaryLogisticHead): # pylint: disable=protected-access
loss_type = "logistic_loss"
elif isinstance(head, head_lib._RegressionHead): # pylint: disable=protected-access
loss_type = "squared_loss"
else:
raise ValueError("Unsupported head type: {}".format(type(head)))
assert head.logits_dimension == 1, (
"SDCA only applies to logits_dimension=1.")
# Update num_loss_partitions based on number of workers.
n_loss_partitions = num_loss_partitions or max(1, config.num_worker_replicas)
optimizer = sdca_optimizer.SDCAOptimizer(
example_id_column=example_id_column,
num_loss_partitions=n_loss_partitions,
symmetric_l1_regularization=l1_regularization,
symmetric_l2_regularization=l2_regularization)
parent_scope = "linear"
with variable_scope.variable_op_scope(features.values(),
parent_scope) as scope:
features = features.copy()
features.update(layers.transform_features(features, feature_columns))
logits, columns_to_variables, bias = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=1,
scope=scope))
_add_bias_column(feature_columns, features, bias, columns_to_variables)
def _train_op_fn(unused_loss):
global_step = contrib_variables.get_global_step()
sdca_model, train_op = optimizer.get_train_step(
columns_to_variables, weight_column_name, loss_type, features, labels,
global_step)
if update_weights_hook is not None:
update_weights_hook.set_parameters(sdca_model, train_op)
return train_op
model_fn_ops = head.create_model_fn_ops(
features=features,
labels=labels,
mode=mode,
train_op_fn=_train_op_fn,
logits=logits)
if update_weights_hook is not None:
return model_fn_ops._replace(training_chief_hooks=(
model_fn_ops.training_chief_hooks + [update_weights_hook]))
return model_fn_ops
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:99,代码来源:sdca_estimator.py
示例14: _dnn_linear_combined_model_fn
#.........这里部分代码省略.........
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas))
for layer_id, num_hidden_units in enumerate(dnn_hidden_units):
with variable_scope.variable_scope(
dnn_parent_scope + "/hiddenlayer_%d" % layer_id,
values=[net],
partitioner=hidden_layer_partitioner) as scope:
net = layers.fully_connected(
net,
num_hidden_units,
activation_fn=dnn_activation_fn,
variables_collections=[dnn_parent_scope],
scope=scope)
if dnn_dropout is not None and mode == estimator.ModeKeys.TRAIN:
net = layers.dropout(
net,
keep_prob=(1.0 - dnn_dropout))
# TODO(b/31209633): Consider adding summary before dropout.
_add_hidden_layer_summary(net, scope.name)
with variable_scope.variable_scope(
dnn_parent_scope + "/logits",
values=[net],
partitioner=hidden_layer_partitioner) as scope:
dnn_logits = layers.fully_connected(
net,
head.logits_dimension,
activation_fn=None,
variables_collections=[dnn_parent_scope],
scope=scope)
_add_hidden_layer_summary(dnn_logits, scope.name)
# Build Linear logits.
linear_parent_scope = "linear"
if not linear_feature_columns:
linear_logits = None
else:
linear_partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20)
with variable_scope.variable_scope(
linear_parent_scope,
values=features.values(),
partitioner=linear_partitioner) as scope:
if joint_linear_weights:
linear_logits, _, _ = layers.joint_weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=linear_feature_columns,
num_outputs=head.logits_dimension,
weight_collections=[linear_parent_scope],
scope=scope)
else:
linear_logits, _, _ = layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=linear_feature_columns,
num_outputs=head.logits_dimension,
weight_collections=[linear_parent_scope],
scope=scope)
# Combine logits and build full model.
if dnn_logits is not None and linear_logits is not None:
logits = dnn_logits + linear_logits
elif dnn_logits is not None:
logits = dnn_logits
else:
logits = linear_logits
def _make_training_op(training_loss):
"""Training op for the DNN linear combined model."""
train_ops = []
if dnn_logits is not None:
train_ops.append(
optimizers.optimize_loss(
loss=training_loss,
global_step=contrib_variables.get_global_step(),
learning_rate=_DNN_LEARNING_RATE,
optimizer=_get_optimizer(dnn_optimizer),
clip_gradients=gradient_clip_norm,
variables=ops.get_collection(dnn_parent_scope),
name=dnn_parent_scope,
# Empty summaries, because head already logs "loss" summary.
summaries=[]))
if linear_logits is not None:
train_ops.append(
optimizers.optimize_loss(
loss=training_loss,
global_step=contrib_variables.get_global_step(),
learning_rate=_linear_learning_rate(len(linear_feature_columns)),
optimizer=_get_optimizer(linear_optimizer),
clip_gradients=gradient_clip_norm,
variables=ops.get_collection(linear_parent_scope),
name=linear_parent_scope,
# Empty summaries, because head already logs "loss" summary.
summaries=[]))
return control_flow_ops.group(*train_ops)
return head.head_ops(
features, labels, mode, _make_training_op, logits=logits)
开发者ID:DavidNemeskey,项目名称:tensorflow,代码行数:101,代码来源:dnn_linear_combined.py
示例15: sdca_classifier_model_fn
def sdca_classifier_model_fn(features, targets, mode, params):
"""Linear classifier model_fn that uses the SDCA optimizer.
Args:
features: A dict of `Tensor` keyed by column name.
targets: `Tensor` of shape [batch_size, 1] or [batch_size] target labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: An `SDCAOptimizer` instance.
* weight_column_name: A string defining the weight feature column, or
None if there are no weights.
* loss_type: A string. Must be either "logistic_loss" or "hinge_loss".
* update_weights_hook: A `SessionRunHook` object or None. Used to update
model weights.
Returns:
predictions: A dict of `Tensor` objects.
loss: A scalar containing the loss of the step.
train_op: The op for training.
Raises:
ValueError: If `optimizer` is not an `SDCAOptimizer` instance.
ValueError: If mode is not any of the `ModeKeys`.
"""
feature_columns = params["feature_columns"]
optimizer = params["optimizer"]
weight_column_name = params["weight_column_name"]
loss_type = params["loss_type"]
update_weights_hook = params.get("update_weights_hook")
if not isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
raise ValueError("Optimizer must be of type SDCAOptimizer")
loss_fn = {
"logistic_loss": _log_loss_with_two_classes,
"hinge_loss": _hinge_loss,
}[loss_type]
logits, columns_to_variables, bias = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=1))
_add_bias_column(feature_columns, features, bias, targets,
columns_to_variables)
loss = None
if mode != estimator.ModeKeys.INFER:
loss = math_ops.reduce_mean(loss_fn(logits, targets), name="loss")
logging_ops.scalar_summary("loss", loss)
train_op = None
if mode == estimator.ModeKeys.TRAIN:
global_step = contrib_variables.get_global_step()
sdca_model, train_op = optimizer.get_train_step(columns_to_variables,
weight_column_name,
loss_type, features,
targets, global_step)
if update_weights_hook is not None:
update_weights_hook.set_parameters(sdca_model, train_op)
predictions = {}
predictions[_LOGISTIC] = math_ops.sigmoid(logits)
logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
predictions[_PROBABILITIES] = nn.softmax(logits)
predictions[_CLASSES] = math_ops.argmax(logits, 1)
return predictions, loss, train_op
开发者ID:MrCrumpets,项目名称:tensorflow,代码行数:74,代码来源:linear.py
示例16: _linear_classifier_model_fn
def _linear_classifier_model_fn(features, targets, mode, params):
"""Linear classifier model_fn.
Args:
features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
targets: `Tensor` of shape [batch_size, 1] or [batch_size] target labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* feature_columns: An iterable containing all the feature columns used by
the model.
* n_classes: number of target classes.
* weight_column_name: A string defining the weight feature column, or
None if there are no weights.
* optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training.
* gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio.
* enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
* num_ps_replicas: The number of parameter server replicas.
* joint_weights: If True, the weights for all columns will be stored in a
single (possibly partitioned) variable. It's more efficient, but it's
incompatible with SDCAOptimizer, and requires all feature columns are
sparse and use the 'sum' combiner.
Returns:
predictions: A dict of `Tensor` objects.
loss: A scalar containing the loss of the step.
train_op: The op for training.
Raises:
ValueError: If mode is not any of the `ModeKeys`.
"""
feature_columns = params["feature_columns"]
n_classes = params["n_classes"]
weight_column_name = params["weight_column_name"]
optimizer = params["optimizer"]
gradient_clip_norm = params.get("gradient_clip_norm", None)
enable_centered_bias = params.get("enable_centered_bias", True)
num_ps_replicas = params.get("num_ps_replicas", 0)
joint_weights = params.get("joint_weights", False)
if not isinstance(features, dict):
features = {"": features}
parent_scope = "linear"
num_label_columns = 1 if n_classes == 2 else n_classes
loss_fn = _softmax_cross_entropy_loss
if n_classes == 2:
loss_fn = _log_loss_with_two_classes
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20)
with variable_scope.variable_op_scope(
features.values(), parent_scope, partitioner=partitioner) as scope:
if joint_weights:
logits, _, _ = (
layers.joint_weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=num_label_columns,
weight_collections=[parent_scope],
scope=scope))
else:
logits, _, _ = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=num_label_columns,
weight_collections=[parent_scope],
scope=scope))
if enable_centered_bias:
logits = nn.bias_add(logits, _centered_bias(num_label_columns))
loss = None
if mode != estimator.ModeKeys.INFER:
loss = loss_fn(logits, targets)
if weight_column_name:
weight_tensor = array_ops.reshape(
math_ops.to_float(features[weight_column_name]), shape=(-1,))
loss = _weighted_loss(loss, weight_tensor)
else:
loss = math_ops.reduce_mean(loss, name="loss")
logging_ops.scalar_summary("loss", loss)
train_ops = []
if mode == estimator.ModeKeys.TRAIN:
global_step = contrib_variables.get_global_step()
my_vars = ops.get_collection("linear")
grads = gradients.gradients(loss, my_vars)
if gradient_clip_norm:
grads, _ = clip_ops.clip_by_global_norm(grads, gradient_clip_norm)
train_ops.append(optimizer.apply_gradients(
#.........这里部分代码省略.........
开发者ID:MrCrumpets,项目名称:tensorflow,代码行数:101,代码来源:linear.py
示例17: _dnn_linear_combined_model_fn
#.........这里部分代码省略.........
net = layers.dropout(
net,
keep_prob=(1.0 - dnn_dropout))
# TODO(b/31209633): Consider adding summary before dropout.
_add_layer_summary(net, dnn_hidden_layer_scope.name)
with variable_scope.variable_scope(
"logits",
values=(net,)) as dnn_logits_scope:
dnn_logits = layers.fully_connected(
net,
head.logits_dimension,
activation_fn=None,
variables_collections=[dnn_parent_scope],
scope=dnn_logits_scope)
_add_layer_summary(dnn_logits, dnn_logits_scope.name)
# Build Linear logits.
linear_parent_scope = "linear"
if not linear_feature_columns:
linear_logits = None
else:
linear_partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20)
with variable_scope.variable_scope(
linear_parent_scope,
values=tuple(six.itervalues(features)),
partitioner=linear_partitioner) as scope:
if all(isinstance(fc, feature_column_lib._FeatureColumn) # pylint: disable=protected-access
for fc in linear_feature_columns):
if joint_linear_weights:
linear_logits, _, _ = layers.joint_weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=linear_feature_columns,
num_outputs=head.logits_dimension,
weight_collections=[linear_parent_scope],
scope=scope)
else:
linear_logits, _, _ = layers.weighted_sum_from_feature_columns(
columns_to
|
请发表评论