本文整理汇总了Python中tensorflow.python.ops.partitioned_variables.min_max_variable_partitioner函数的典型用法代码示例。如果您正苦于以下问题:Python min_max_variable_partitioner函数的具体用法?Python min_max_variable_partitioner怎么用?Python min_max_variable_partitioner使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了min_max_variable_partitioner函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: build_model
def build_model(self, features, feature_columns, is_training):
"""See base class."""
self._feature_columns = feature_columns
input_layer_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=self._num_ps_replicas,
min_slice_size=64 << 20))
with variable_scope.variable_scope(
self._scope + "/input_from_feature_columns",
values=features.values(),
partitioner=input_layer_partitioner) as scope:
net = layers.input_from_feature_columns(
features,
self._get_feature_columns(),
weight_collections=[self._scope],
trainable=self._trainable,
scope=scope)
hidden_layer_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=self._num_ps_replicas))
for layer_id, num_hidden_units in enumerate(self._hidden_units):
with variable_scope.variable_scope(
self._scope + "/hiddenlayer_%d" % layer_id,
values=[net],
partitioner=hidden_layer_partitioner) as scope:
net = layers.fully_connected(
net,
num_hidden_units,
activation_fn=self._activation_fn,
variables_collections=[self._scope],
trainable=self._trainable,
scope=scope)
if self._dropout is not None and is_training:
net = layers.dropout(
net,
keep_prob=(1.0 - self._dropout))
self._add_hidden_layer_summary(net, scope.name)
with variable_scope.variable_scope(
self._scope + "/logits",
values=[net],
partitioner=hidden_layer_partitioner) as scope:
logits = layers.fully_connected(
net,
self._num_label_columns,
activation_fn=None,
variables_collections=[self._scope],
trainable=self._trainable,
scope=scope)
self._add_hidden_layer_summary(logits, "logits")
return logits
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:53,代码来源:composable_model.py
示例2: testInitFromPartitionVar
def testInitFromPartitionVar(self):
checkpoint_dir = self.get_temp_dir()
with self.test_session() as session:
v1 = _create_partition_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable(
name="my1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
my1_var_list = my1._get_variable_list()
with variable_scope.variable_scope("some_other_scope"):
my2 = variable_scope.get_variable(
name="var1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
my2_var_list = my2._get_variable_list()
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
"scope/var1": "some_scope/my1",
"scope/": "some_other_scope/"})
session.run(variables.global_variables_initializer())
my1_values = session.run(my1_var_list)
self.assertAllEqual(my1_values, v1)
my2_values = session.run(my2_var_list)
self.assertAllEqual(my2_values, v1)
# New graph and session.
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable(
name="my1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
my1_var_list = my1._get_variable_list()
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"scope/var1": my1_var_list,})
session.run(variables.global_variables_initializer())
my1_values = session.run(my1_var_list)
self.assertAllEqual(my1_values, v1)
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:53,代码来源:checkpoint_utils_test.py
示例3: _test_logits
def _test_logits(self, mode, rnn_units, logits_dimension, features_fn,
sequence_feature_columns, context_feature_columns,
expected_logits):
"""Tests that the expected logits are calculated."""
with ops.Graph().as_default():
# Global step needed for MonitoredSession, which is in turn used to
# explicitly set variable weights through a checkpoint.
training_util.create_global_step()
# Use a variable scope here with 'rnn', emulating the rnn model_fn, so
# the checkpoint naming is shared.
with variable_scope.variable_scope('rnn'):
input_layer_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=0, min_slice_size=64 << 20))
logit_fn = rnn._rnn_logit_fn_builder(
output_units=logits_dimension,
rnn_cell_fn=rnn._make_rnn_cell_fn(rnn_units),
sequence_feature_columns=sequence_feature_columns,
context_feature_columns=context_feature_columns,
input_layer_partitioner=input_layer_partitioner)
# Features are constructed within this function, otherwise the Tensors
# containing the features would be defined outside this graph.
logits = logit_fn(features=features_fn(), mode=mode)
with monitored_session.MonitoredTrainingSession(
checkpoint_dir=self._model_dir) as sess:
self.assertAllClose(expected_logits, sess.run(logits), atol=1e-4)
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:26,代码来源:rnn_test.py
示例4: build_model
def build_model(self, features, feature_columns, is_training):
"""See base class."""
self._feature_columns = feature_columns
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=self._num_ps_replicas,
min_slice_size=64 << 20)
with variable_scope.variable_scope(
self._scope,
values=features.values(),
partitioner=partitioner) as scope:
if self._joint_weights:
logits, _, _ = layers.joint_weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=self._get_feature_columns(),
num_outputs=self._num_label_columns,
weight_collections=[self._scope],
scope=scope)
else:
logits, _, _ = layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=self._get_feature_columns(),
num_outputs=self._num_label_columns,
weight_collections=[self._scope],
scope=scope)
return logits
开发者ID:rahimkanji,项目名称:tensorflow,代码行数:25,代码来源:composable_model.py
示例5: _linear_model_fn
def _linear_model_fn(features, labels, mode, head, feature_columns, optimizer,
partitioner, config):
"""A model_fn for linear models that use a gradient-based optimizer.
Args:
features: dict of `Tensor`.
labels: `Tensor` of shape `[batch_size, logits_dimension]`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
head: A `Head` instance.
feature_columns: An iterable containing all the feature columns used by
the model.
optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training. If `None`, will use a FTRL optimizer.
partitioner: Partitioner for variables.
config: `RunConfig` object to configure the runtime settings.
Returns:
An `EstimatorSpec` instance.
Raises:
ValueError: mode or params are invalid, or features has the wrong type.
"""
if not isinstance(features, dict):
raise ValueError('features should be a dictionary of `Tensor`s. '
'Given type: {}'.format(type(features)))
optimizer = optimizers.get_optimizer_instance(
optimizer or _get_default_optimizer(feature_columns),
learning_rate=_LEARNING_RATE)
num_ps_replicas = config.num_ps_replicas if config else 0
partitioner = partitioner or (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20))
with variable_scope.variable_scope(
'linear',
values=tuple(six.itervalues(features)),
partitioner=partitioner):
logit_fn = _linear_logit_fn_builder(
units=head.logits_dimension, feature_columns=feature_columns)
logits = logit_fn(features=features)
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
return optimizer.minimize(
loss,
global_step=training_util.get_global_step())
return head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:58,代码来源:linear.py
示例6: _linear_model_fn
def _linear_model_fn(features, labels, mode, params, config):
"""A model_fn for linear models that use a gradient-based optimizer.
Args:
features: Dict of `Tensor`.
labels: `Tensor` of shape `[batch_size, logits_dimension]`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training. If `None`, will use a FTRL optimizer.
config: `RunConfig` object to configure the runtime settings.
Returns:
An `EstimatorSpec` instance.
Raises:
ValueError: If mode or params are invalid.
"""
head = params['head']
feature_columns = tuple(params['feature_columns'])
optimizer = optimizers.get_optimizer_instance(
params.get('optimizer') or _get_default_optimizer(feature_columns),
learning_rate=_LEARNING_RATE)
num_ps_replicas = config.num_ps_replicas if config else 0
partitioner = params.get('partitioner') or (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20))
with variable_scope.variable_scope(
'linear',
values=tuple(six.itervalues(features)),
partitioner=partitioner):
logits = feature_column_lib.linear_model(
features=features,
feature_columns=feature_columns,
units=head.logits_dimension)
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
return optimizer.minimize(
loss,
global_step=training_util.get_global_step())
return head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
开发者ID:adityaatluri,项目名称:tensorflow,代码行数:57,代码来源:linear.py
示例7: _testMinMaxVariablePartitioner
def _testMinMaxVariablePartitioner(self, max_partitions, axis, min_slice_size,
var_name, var_shape, expected_axis_shards,
expected_partitions):
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=max_partitions, axis=axis, min_slice_size=min_slice_size)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable(
var_name, dtype=dtypes.float32, shape=var_shape)
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), expected_axis_shards)
self.assertAllEqual(v0_part, expected_partitions)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:12,代码来源:partitioned_variables_test.py
示例8: _EmbeddingParamsAsPartitionedVariable
def _EmbeddingParamsAsPartitionedVariable(num_shards,
vocab_size,
dtype=dtypes.float32,
shape=None):
p, params, feed_dict = _EmbeddingParams(
num_shards, vocab_size, dtype=dtype, shape=shape)
shape = shape or [10]
partitioned_variable = variable_scope.get_variable(
"p",
shape=[vocab_size] + shape,
initializer=array_ops.concat([params[p_i.name] for p_i in p], 0),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=num_shards, min_slice_size=1))
return p, partitioned_variable, params, feed_dict
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:14,代码来源:embedding_ops_test.py
示例9: build_model
def build_model(self, features, feature_columns, is_training):
"""See base class."""
features = self._get_feature_dict(features)
self._feature_columns = feature_columns
net = layers.input_from_feature_columns(
features,
self._get_feature_columns(),
weight_collections=[self._weight_collection_name])
for layer_id, num_hidden_units in enumerate(self._hidden_units):
with variable_scope.variable_op_scope(
[net], "hiddenlayer_%d" % layer_id,
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=self._config.num_ps_replicas)) as scope:
net = layers.fully_connected(
net,
num_hidden_units,
activation_fn=self._activation_fn,
variables_collections=[self._weight_collection_name],
scope=scope)
if self._dropout is not None and is_training:
net = layers.dropout(
net,
keep_prob=(1.0 - self._dropout))
self._add_hidden_layer_summary(net, scope.name)
with variable_scope.variable_op_scope(
[net], "dnn_logits",
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=self._config.num_ps_replicas)) as scope:
logits = layers.fully_connected(
net,
self._num_label_columns,
activation_fn=None,
variables_collections=[self._weight_collection_name],
scope=scope)
self._add_hidden_layer_summary(logits, "dnn_logits")
return logits
开发者ID:Brandon-Tai,项目名称:tensorflow,代码行数:37,代码来源:dnn_linear_combined.py
示例10: _dnn_logits
def _dnn_logits(self, features, is_training=False):
net = layers.input_from_feature_columns(
features, self._get_dnn_feature_columns(), weight_collections=[self._dnn_weight_collection]
)
for layer_id, num_hidden_units in enumerate(self._dnn_hidden_units):
with variable_scope.variable_op_scope(
[net],
"hiddenlayer_%d" % layer_id,
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=self._config.num_ps_replicas
),
) as scope:
net = layers.fully_connected(
net,
num_hidden_units,
activation_fn=self._dnn_activation_fn,
variables_collections=[self._dnn_weight_collection],
scope=scope,
)
if self._dnn_dropout is not None and is_training:
net = layers.dropout(net, keep_prob=(1.0 - self._dnn_dropout))
self._add_hidden_layer_summary(net, scope.name)
with variable_scope.variable_op_scope(
[net],
"dnn_logit",
partitioner=partitioned_variables.min_max_variable_partitioner(max_partitions=self._config.num_ps_replicas),
) as scope:
logit = layers.fully_connected(
net,
self._target_column.num_label_columns,
activation_fn=None,
variables_collections=[self._dnn_weight_collection],
scope=scope,
)
self._add_hidden_layer_summary(logit, "dnn_logit")
return logit
开发者ID:285219011,项目名称:liuwenfeng,代码行数:36,代码来源:dnn_linear_combined.py
示例11: _create_partition_checkpoints
def _create_partition_checkpoints(sess, checkpoint_dir):
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
checkpoint_state_name = "checkpoint"
v1 = variable_scope.get_variable(
name="var1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
sess.run(variables.global_variables_initializer())
v1_value = sess.run(v1._get_variable_list())
saver = saver_lib.Saver()
saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
return v1_value
开发者ID:willdzeng,项目名称:tensorflow,代码行数:18,代码来源:checkpoint_utils_test.py
示例12: _linear_classifier_model_fn
def _linear_classifier_model_fn(features, targets, mode, params):
"""Estimator's linear model_fn."""
n_classes = params["n_classes"]
weight_column_name = params["weight_column_name"]
feature_columns = params["feature_columns"]
optimizer = params["optimizer"]
gradient_clip_norm = params.get("gradient_clip_norm", None)
enable_centered_bias = params.get("enable_centered_bias", True)
num_ps_replicas = params.get("num_ps_replicas", 0)
joint_weights = params.get("joint_weights", False)
if not isinstance(features, dict):
features = {"": features}
num_label_columns = 1 if n_classes == 2 else n_classes
loss_fn = _softmax_cross_entropy_loss
if n_classes == 2:
loss_fn = _log_loss_with_two_classes
feat_values = (features.values() if isinstance(features, dict)
else [features])
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20)
with variable_scope.variable_op_scope(
feat_values, "linear", partitioner=partitioner) as scope:
if joint_weights:
logits, _, _ = (
layers.joint_weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=num_label_columns,
weight_collections=["linear"],
scope=scope))
else:
logits, _, _ = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=num_label_columns,
weight_collections=["linear"],
scope=scope))
if enable_centered_bias:
logits = nn.bias_add(logits, _centered_bias(num_label_columns))
loss = None
if mode != estimator.ModeKeys.INFER:
loss = loss_fn(logits, targets)
if weight_column_name:
weight_tensor = array_ops.reshape(
math_ops.to_float(features[weight_column_name]), shape=(-1,))
loss = _weighted_loss(loss, weight_tensor)
else:
loss = math_ops.reduce_mean(loss, name="loss")
logging_ops.scalar_summary("loss", loss)
train_ops = []
if mode == estimator.ModeKeys.TRAIN:
global_step = contrib_variables.get_global_step()
my_vars = ops.get_collection("linear")
grads = gradients.gradients(loss, my_vars)
if gradient_clip_norm:
grads, _ = clip_ops.clip_by_global_norm(grads, gradient_clip_norm)
train_ops.append(optimizer.apply_gradients(
zip(grads, my_vars), global_step=global_step))
if enable_centered_bias:
train_ops.append(
_centered_bias_step(targets, loss_fn, num_label_columns))
predictions = {}
if n_classes == 2:
predictions[_LOGISTIC] = math_ops.sigmoid(logits)
logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
predictions[_PROBABILITIES] = nn.softmax(logits)
predictions[_CLASSES] = math_ops.argmax(logits, 1)
return predictions, loss, control_flow_ops.group(*train_ops)
开发者ID:KalraA,项目名称:tensorflow,代码行数:79,代码来源:linear.py
示例13: _linear_model_fn
def _linear_model_fn(features, labels, mode, params, config=None):
"""A model_fn for linear models that use a gradient-based optimizer.
Args:
features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training. If `None`, will use a FTRL optimizer.
* gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio.
* num_ps_replicas: The number of parameter server replicas.
* joint_weights: If True, the weights for all columns will be stored in a
single (possibly partitioned) variable. It's more efficient, but it's
incompatible with SDCAOptimizer, and requires all feature columns are
sparse and use the 'sum' combiner.
config: `RunConfig` object to configure the runtime settings.
Returns:
A `ModelFnOps` instance.
Raises:
ValueError: If mode is not any of the `ModeKeys`.
"""
head = params["head"]
feature_columns = params["feature_columns"]
optimizer = params.get("optimizer") or _get_default_optimizer(feature_columns)
gradient_clip_norm = params.get("gradient_clip_norm", None)
num_ps_replicas = config.num_ps_replicas if config else 0
joint_weights = params.get("joint_weights", False)
if not isinstance(features, dict):
features = {"": features}
parent_scope = "linear"
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20)
with variable_scope.variable_scope(
parent_scope, values=features.values(), partitioner=partitioner) as scope:
if joint_weights:
logits, _, _ = (
layers.joint_weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=head.logits_dimension,
weight_collections=[parent_scope],
scope=scope))
else:
logits, _, _ = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=head.logits_dimension,
weight_collections=[parent_scope],
scope=scope))
def _train_op_fn(loss):
global_step = contrib_variables.get_global_step()
my_vars = ops.get_collection("linear")
grads = gradients.gradients(loss, my_vars)
if gradient_clip_norm:
grads, _ = clip_ops.clip_by_global_norm(grads, gradient_clip_norm)
return (_get_optimizer(optimizer).apply_gradients(
zip(grads, my_vars), global_step=global_step))
return head.head_ops(features, labels, mode, _train_op_fn, logits)
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:75,代码来源:linear.py
示例14: testInitFromPartitionVar
def testInitFromPartitionVar(self):
checkpoint_dir = self.get_temp_dir()
with self.test_session() as session:
v1 = _create_partition_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable(
name="my1",
shape=[100, 100],
initializer=init_ops.zeros_initializer(),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
my1_var_list = my1._get_variable_list()
# Create another variable with different partitions than the variable in
# the checkpoint.
with variable_scope.variable_scope("some_other_scope"):
my2 = variable_scope.get_variable(
name="var1",
shape=[100, 100],
initializer=init_ops.zeros_initializer(),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=16 << 10))
my2_var_list = my2._get_variable_list()
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
"scope/var1": "some_scope/my1",
"scope/": "some_other_scope/"})
session.run(variables.global_variables_initializer())
my1_values = session.run(my1_var_list)
self.assertAllEqual(my1_values, v1)
my2_values = session.run(my2_var_list)
# Verify we created different number of partitions.
self.assertNotEquals(len(my2_values), len(v1))
# Verify the values were correctly initialized inspite of different
# partitions.
full_my2_values = np.concatenate(my2_values, axis=0)
full_v1_values = np.concatenate(v1, axis=0)
self.assertAllEqual(full_my2_values, full_v1_values)
# New graph and session.
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable(
name="my1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
my1_var_list = my1._get_variable_list()
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"scope/var1": my1_var_list,})
session.run(variables.global_variables_initializer())
my1_values = session.run(my1_var_list)
self.assertAllEqual(my1_values, v1)
开发者ID:QiangCai,项目名称:tensorflow,代码行数:61,代码来源:checkpoint_utils_test.py
示例15: _linear_classifier_model_fn
def _linear_classifier_model_fn(features, targets, mode, params):
"""Linear classifier model_fn.
Args:
features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
targets: `Tensor` of shape [batch_size, 1] or [batch_size] target labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* feature_columns: An iterable containing all the feature columns used by
the model.
* n_classes: number of target classes.
* weight_column_name: A string defining the weight feature column, or
None if there are no weights.
* optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training.
* gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio.
* enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
* num_ps_replicas: The number of parameter server replicas.
* joint_weights: If True, the weights for all columns will be stored in a
single (possibly partitioned) variable. It's more efficient, but it's
incompatible with SDCAOptimizer, and requires all feature columns are
sparse and use the 'sum' combiner.
Returns:
predictions: A dict of `Tensor` objects.
loss: A scalar containing the loss of the step.
train_op: The op for training.
Raises:
ValueError: If mode is not any of the `ModeKeys`.
"""
feature_columns = params["feature_columns"]
n_classes = params["n_classes"]
weight_column_name = params["weight_column_name"]
optimizer = params["optimizer"]
gradient_clip_norm = params.get("gradient_clip_norm", None)
enable_centered_bias = params.get("enable_centered_bias", True)
num_ps_replicas = params.get("num_ps_replicas", 0)
joint_weights = params.get("joint_weights", False)
if not isinstance(features, dict):
features = {"": features}
parent_scope = "linear"
num_label_columns = 1 if n_classes == 2 else n_classes
loss_fn = _softmax_cross_entropy_loss
if n_classes == 2:
loss_fn = _log_loss_with_two_classes
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20)
with variable_scope.variable_op_scope(
features.values(), parent_scope, partitioner=partitioner) as scope:
if joint_weights:
logits, _, _ = (
layers.joint_weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=num_label_columns,
weight_collections=[parent_scope],
scope=scope))
else:
logits, _, _ = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=num_label_columns,
weight_collections=[parent_scope],
scope=scope))
if enable_centered_bias:
logits = nn.bias_add(logits, _centered_bias(num_label_columns))
loss = None
if mode != estimator.ModeKeys.INFER:
loss = loss_fn(logits, targets)
if weight_column_name:
weight_tensor = array_ops.reshape(
math_ops.to_float(features[weight_column_name]), shape=(-1,))
loss = _weighted_loss(loss, weight_tensor)
else:
loss = math_ops.reduce_mean(loss, name="loss")
logging_ops.scalar_summary("loss", loss)
train_ops = []
if mode == estimator.ModeKeys.TRAIN:
global_step = contrib_variables.get_global_step()
my_vars = ops.get_collection("linear")
grads = gradients.gradients(loss, my_vars)
if gradient_clip_norm:
grads, _ = clip_ops.clip_by_global_norm(grads, gradient_clip_norm)
train_ops.append(optimizer.apply_gradients(
#.........这里部分代码省略.........
开发者ID:MrCrumpets,项目名称:tensorflow,代码行数:101,代码来源:linear.py
示例16: _linear_classifier_model_fn
def _linear_classifier_model_fn(features, targets, mode, params):
"""Linear classifier model_fn.
Args:
features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
targets: `Tensor` of shape [batch_size, 1] or [batch_size] target labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* feature_columns: An iterable containing all the feature columns used by
the model.
* n_classes: number of target classes.
* weight_column_name: A string defining the weight feature column, or
None if there are no weights.
* optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training.
* gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio.
* enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
* num_ps_replicas: The number of parameter server replicas.
* joint_weights: If True, the weights for all columns will be stored in a
single (possibly partitioned) variable. It's more efficient, but it's
incompatible with SDCAOptimizer, and requires all feature columns are
sparse and use the 'sum' combiner.
Returns:
predictions: A dict of `Tensor` objects.
loss: A scalar containing the loss of the step.
train_op: The op for training.
Raises:
ValueError: If mode is not any of the `ModeKeys`.
"""
feature_columns = params["feature_columns"]
optimizer = params["optimizer"]
gradient_clip_norm = params.get("gradient_clip_norm", None)
num_ps_replicas = params.get("num_ps_replicas", 0)
joint_weights = params.get("joint_weights", False)
head = params.get("head", None)
if not head:
# TODO(zakaria): Remove these params and make head mandatory
head = head_lib._multi_class_head( # pylint: disable=protected-access
params.get("n_classes"),
weight_column_name=params["weight_column_name"],
enable_centered_bias=params.get("enable_centered_bias", False))
if not isinstance(features, dict):
features = {"": features}
parent_scope = "linear"
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20)
with variable_scope.variable_op_scope(
features.values(), parent_scope, partitioner=partitioner) as scope:
if joint_weights:
logits, _, _ = (
layers.joint_weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=head.logits_dimension,
weight_collections=[parent_scope],
scope=scope))
else:
logits, _, _ = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=head.logits_dimension,
weight_collections=[parent_scope],
scope=scope))
def _train_op_fn(loss):
global_step = contrib_variables.get_global_step()
my_vars = ops.get_collection("linear")
grads = gradients.gradients(loss, my_vars)
if gradient_clip_norm:
grads, _ = clip_ops.clip_by_global_norm(grads, gradient_clip_norm)
return (optimizer.apply_gradients(
zip(grads, my_vars), global_step=global_step))
return head.head_ops(features, targets, mode, _train_op_fn, logits)
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:88,代码来源:linear.py
示例17: _dnn_linear_combined_model_fn
def _dnn_linear_combined_model_fn(features,
labels,
mode,
head,
linear_feature_columns=None,
linear_optimizer='Ftrl',
dnn_feature_columns=None,
dnn_optimizer='Adagrad',
dnn_hidden_units=None,
dnn_activation_fn=nn.relu,
dnn_dropout=None,
input_layer_partitioner=None,
config=None):
"""Deep Neural Net and Linear combined model_fn.
Args:
features: dict of `Tensor`.
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of dtype
`int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
head: A `Head` instance.
linear_feature_columns: An iterable containing all the feature columns used
by the Linear model.
linear_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the Linear model. Defaults to the Ftrl
optimizer.
dnn_feature_columns: An iterable containing all the feature columns used by
the DNN model.
dnn_optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training the DNN model. Defaults to the Adagrad
optimizer.
dnn_hidden_units: List of hidden units per DNN layer.
dnn_activation_fn: Activation function applied to each DNN layer. If `None`,
will use `tf.nn.relu`.
dnn_dropout: When not `None`, the probability we will drop out a given DNN
coordinate.
input_layer_partitioner: Partitioner for input layer.
config: `RunConfig` object to configure the runtime settings.
Returns:
An `EstimatorSpec` instance.
Raises:
ValueError: If both `linear_feature_columns` and `dnn_features_columns`
are empty at the same time, or `input_layer_partitioner` is missing,
or features has the wrong type.
"""
if not isinstance(features, dict):
raise ValueError('features should be a dictionary of `Tensor`s. '
'Given type: {}'.format(type(features)))
if not linear_feature_columns and not dnn_feature_columns:
raise ValueError(
'Either linear_feature_columns or dnn_feature_columns must be defined.')
num_ps_replicas = config.num_ps_replicas if config else 0
input_layer_partitioner = input_layer_partitioner or (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20))
# Build DNN Logits.
dnn_parent_scope = 'dnn'
if not dnn_feature_columns:
dnn_logits = None
else:
dnn_optimizer = optimizers.get_optimizer_instance(
dnn_optimizer, learning_rate=_DNN_LEARNING_RATE)
_check_no_sync_replicas_optimizer(dnn_optimizer)
if not dnn_hidden_units:
raise ValueError(
'dnn_hidden_units must be defined when dnn_feature_columns is '
'specified.')
dnn_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas))
with variable_scope.variable_scope(
dnn_parent_scope,
values=tuple(six.itervalues(features)),
partitioner=dnn_partitioner):
dnn_logit_fn = dnn._dnn_logit_fn_builder( # pylint: disable=protected-access
units=head.logits_dimension,
hidden_units=dnn_hidden_units,
feature_columns=dnn_feature_columns,
activation_fn=dnn_activation_fn,
dropout=dnn_dropout,
input_layer_partitioner=input_layer_partitioner)
dnn_logits = dnn_logit_fn(features=features, mode=mode)
linear_parent_scope = 'linear'
if not linear_feature_columns:
linear_logits = None
else:
linear_optimizer = optimizers.get_optimizer_instance(
linear_optimizer,
learning_rate=_linear_learning_rate(len(linear_feature_columns)))
_check_no_sync_replicas_optimizer(linear_optimizer)
#.........这里部分代码省略.........
开发者ID:LiuCKind,项目名称:tensorflow,代码行数:101,代码来源:dnn_linear_combined.py
示例18: _dnn_model_fn
def _dnn_model_fn(features, labels, mode, params):
"""Deep Neural Net model_fn.
Args:
features: `Tensor` or dict of `Tensor` (de
|
请发表评论