本文整理汇总了Python中tensorflow.contrib.layers.python.layers.optimizers.optimize_loss函数的典型用法代码示例。如果您正苦于以下问题:Python optimize_loss函数的具体用法?Python optimize_loss怎么用?Python optimize_loss使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了optimize_loss函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: testBadSummaries
def testBadSummaries(self):
with ops.Graph().as_default() as g, self.test_session(graph=g):
_, _, loss, global_step = _setup_model()
with self.assertRaises(ValueError):
optimizers_lib.optimize_loss(
loss, global_step, learning_rate=0.1, optimizer="SGD",
summaries=["loss", "bad_summary"])
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:7,代码来源:optimizers_test.py
示例2: _make_training_op
def _make_training_op(training_loss):
"""Training op for the DNN linear combined model."""
train_ops = []
if dnn_logits is not None:
train_ops.append(
optimizers.optimize_loss(
loss=training_loss,
global_step=contrib_variables.get_global_step(),
learning_rate=_DNN_LEARNING_RATE,
optimizer=_get_optimizer(dnn_optimizer),
gradient_multipliers=_extract_embedding_lr_multipliers( # pylint: disable=protected-access
embedding_lr_multipliers, dnn_parent_scope,
dnn_input_scope.name),
clip_gradients=gradient_clip_norm,
variables=ops.get_collection(dnn_parent_scope),
name=dnn_parent_scope,
# Empty summaries, because head already logs "loss" summary.
summaries=[]))
if linear_logits is not None:
train_ops.append(
optimizers.optimize_loss(
loss=training_loss,
global_step=contrib_variables.get_global_step(),
learning_rate=_linear_learning_rate(len(linear_feature_columns)),
optimizer=_get_optimizer(linear_optimizer),
clip_gradients=gradient_clip_norm,
variables=ops.get_collection(linear_parent_scope),
name=linear_parent_scope,
# Empty summaries, because head already logs "loss" summary.
summaries=[]))
return control_flow_ops.group(*train_ops)
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:32,代码来源:dnn_linear_combined.py
示例3: testWrongOptimizer
def testWrongOptimizer(self):
optimizers = ["blah", variables.Variable, object(), lambda x: None]
for optimizer in optimizers:
with ops.Graph().as_default() as g:
with self.test_session(graph=g):
_, _, loss, global_step = _setup_model()
with self.assertRaises(ValueError):
optimizers_lib.optimize_loss(
loss, global_step, learning_rate=0.1, optimizer=optimizer)
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:9,代码来源:optimizers_test.py
示例4: testInvalidLoss
def testInvalidLoss(self):
with ops.Graph().as_default() as g, self.test_session(graph=g):
_, _, _, global_step = _setup_model()
with self.assertRaises(ValueError):
optimizers_lib.optimize_loss(
None, global_step, learning_rate=0.1, optimizer="SGD")
with self.assertRaises(ValueError):
optimizers_lib.optimize_loss(
[[1.0]], global_step, learning_rate=0.1, optimizer="SGD")
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:9,代码来源:optimizers_test.py
示例5: testIgnoreVariablesWithNoGradients
def testIgnoreVariablesWithNoGradients(self):
_, _, loss, global_step = _setup_model()
unused_variable = variable_scope.get_variable("ignore_me", [])
optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
gradient_noise_scale=10.0,
gradient_multipliers={unused_variable: 1.},
clip_gradients=10.0)
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:13,代码来源:optimizers_test.py
示例6: _dynamic_rnn_model_fn
def _dynamic_rnn_model_fn(features, labels, mode):
"""The model to be passed to an `Estimator`."""
with ops.name_scope(name):
initial_state = features.get(initial_state_key)
sequence_length = features.get(sequence_length_key)
sequence_input = build_sequence_input(features,
sequence_feature_columns,
context_feature_columns)
if mode == model_fn.ModeKeys.TRAIN:
cell_for_mode = apply_dropout(
cell, input_keep_probability, output_keep_probability)
else:
cell_for_mode = cell
rnn_activations, final_state = construct_rnn(
initial_state,
sequence_input,
cell_for_mode,
target_column.num_label_columns,
dtype=dtype,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
loss = None # Created below for modes TRAIN and EVAL.
if prediction_type == PredictionType.MULTIPLE_VALUE:
prediction_dict = _multi_value_predictions(
rnn_activations, target_column, predict_probabilities)
if mode != model_fn.ModeKeys.INFER:
loss = _multi_value_loss(
rnn_activations, labels, sequence_length, target_column, features)
elif prediction_type == PredictionType.SINGLE_VALUE:
prediction_dict = _single_value_predictions(
rnn_activations, sequence_length, target_column,
predict_probabilities)
if mode != model_fn.ModeKeys.INFER:
loss = _single_value_loss(
rnn_activations, labels, sequence_length, target_column, features)
prediction_dict[RNNKeys.FINAL_STATE_KEY] = final_state
eval_metric_ops = None
if mode != model_fn.ModeKeys.INFER:
eval_metric_ops = _get_eval_metric_ops(
problem_type, prediction_type, sequence_length, prediction_dict,
labels)
train_op = None
if mode == model_fn.ModeKeys.TRAIN:
train_op = optimizers.optimize_loss(
loss=loss,
global_step=None, # Get it internally.
learning_rate=learning_rate,
optimizer=optimizer,
clip_gradients=gradient_clipping_norm,
summaries=optimizers.OPTIMIZER_SUMMARIES)
return model_fn.ModelFnOps(mode=mode,
predictions=prediction_dict,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops)
开发者ID:BloodD,项目名称:tensorflow,代码行数:59,代码来源:dynamic_rnn_estimator.py
示例7: linear_model_fn_with_model_fn_ops
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
开发者ID:Immexxx,项目名称:tensorflow,代码行数:9,代码来源:estimator_test.py
示例8: _train_op_fn
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
return optimizers.optimize_loss(
loss=loss,
global_step=contrib_variables.get_global_step(),
learning_rate=learning_rate,
optimizer=optimizer,
name=parent_scope,
# Empty summaries to prevent optimizers from logging the training_loss.
summaries=[])
开发者ID:soswow,项目名称:Various-JS-and-Python,代码行数:10,代码来源:machine_learning.py
示例9: linear_model_fn
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
开发者ID:Immexxx,项目名称:tensorflow,代码行数:11,代码来源:estimator_test.py
示例10: logistic_model_no_mode_fn
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
开发者ID:Immexxx,项目名称:tensorflow,代码行数:11,代码来源:estimator_test.py
示例11: _train_op_fn
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
return optimizers.optimize_loss(
loss=loss,
global_step=contrib_variables.get_global_step(),
learning_rate=_LEARNING_RATE,
optimizer=_get_optimizer(optimizer),
clip_gradients=gradient_clip_norm,
name=parent_scope,
# Empty summaries to prevent optimizers from logging the training_loss.
summaries=[])
开发者ID:HKUST-SING,项目名称:tensorflow,代码行数:11,代码来源:dnn.py
示例12: _dnn_train_op_fn
def _dnn_train_op_fn(loss):
"""Returns the op to optimize the loss."""
return optimizers.optimize_loss(
loss=loss,
global_step=training_util.get_global_step(),
learning_rate=_DNN_LEARNING_RATE,
optimizer=_get_optimizer(dnn_optimizer),
name=dnn_parent_scope,
variables=ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES, scope=dnn_parent_scope),
# Empty summaries to prevent optimizers from logging training_loss.
summaries=[])
开发者ID:StephenOman,项目名称:tensorflow,代码行数:12,代码来源:dnn_tree_combined_estimator.py
示例13: testInvalidGlobalStep
def testInvalidGlobalStep(self):
with ops.Graph().as_default() as g, self.test_session(graph=g):
x = array_ops.placeholder(dtypes.float32, [])
var = variable_scope.get_variable(
"test", [], initializer=init_ops.constant_initializer(10))
loss = math_ops.abs(var * x)
with self.assertRaises(AttributeError):
optimizers_lib.optimize_loss(
loss,
global_step=constant_op.constant(
43, dtype=dtypes.int64),
learning_rate=0.1,
optimizer="SGD")
with self.assertRaises(TypeError):
optimizers_lib.optimize_loss(
loss,
global_step=variable_scope.get_variable(
"global_step", [],
trainable=False,
dtype=dtypes.float64,
initializer=init_ops.constant_initializer(
0.0, dtype=dtypes.float64)),
learning_rate=0.1,
optimizer="SGD")
with self.assertRaises(ValueError):
optimizers_lib.optimize_loss(
loss,
global_step=variable_scope.get_variable(
"global_step", [1],
trainable=False,
dtype=dtypes.int64,
initializer=init_ops.constant_initializer(
[0], dtype=dtypes.int64)),
learning_rate=0.1,
optimizer="SGD")
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:35,代码来源:optimizers_test.py
示例14: _make_training_op
def _make_training_op(training_loss):
"""Training op for the DNN linear combined model."""
train_ops = []
global_step = training_util.get_global_step()
if dnn_logits is not None:
train_ops.append(
optimizers.optimize_loss(
loss=training_loss,
global_step=global_step,
learning_rate=_DNN_LEARNING_RATE,
optimizer=dnn_optimizer,
gradient_multipliers=_extract_embedding_lr_multipliers( # pylint: disable=protected-access
embedding_lr_multipliers, dnn_parent_scope,
dnn_input_scope.name),
clip_gradients=gradient_clip_norm,
variables=ops.get_collection(dnn_parent_scope),
name=dnn_parent_scope,
# Empty summaries, because head already logs "loss" summary.
summaries=[],
increment_global_step=not fix_global_step_increment_bug))
if linear_logits is not None:
train_ops.append(
optimizers.optimize_loss(
loss=training_loss,
global_step=global_step,
learning_rate=_linear_learning_rate(len(linear_feature_columns)),
optimizer=linear_optimizer,
clip_gradients=gradient_clip_norm,
variables=ops.get_collection(linear_parent_scope),
name=linear_parent_scope,
# Empty summaries, because head already logs "loss" summary.
summaries=[],
increment_global_step=not fix_global_step_increment_bug))
train_op = control_flow_ops.group(*train_ops)
if fix_global_step_increment_bug:
with ops.control_dependencies([train_op]):
with ops.colocate_with(global_step):
return state_ops.assign_add(global_step, 1).op
return train_op
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:40,代码来源:dnn_linear_combined.py
示例15: linear_model_params_fn
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
开发者ID:Immexxx,项目名称:tensorflow,代码行数:13,代码来源:estimator_test.py
示例16: testNoGlobalStepWithDecay
def testNoGlobalStepWithDecay(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.test_session(graph=g):
x = array_ops.placeholder(dtypes.float32, [])
var = variable_scope.get_variable(
"test", [], initializer=init_ops.constant_initializer(10))
loss = math_ops.abs(var * x)
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
with self.assertRaisesRegexp(
ValueError, "global_step is required for learning_rate_decay_fn"):
optimizers_lib.optimize_loss(
loss,
global_step=None,
learning_rate=0.1,
learning_rate_decay_fn=_no_op_learning_rate_decay_fn,
optimizer=optimizer,
update_ops=[update_op])
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:23,代码来源:optimizers_test.py
示例17: testGradientClip
def testGradientClip(self):
with self.test_session() as session:
x, var, loss, global_step = _setup_model()
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
clip_gradients=0.1)
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
self.assertAlmostEqual(var_value, 9.98999, 4)
self.assertEqual(global_step_value, 1)
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:14,代码来源:optimizers_test.py
示例18: _dynamic_rnn_model_fn
def _dynamic_rnn_model_fn(features, labels, mode):
"""The model to be passed to an `Estimator`."""
with ops.name_scope(name):
initial_state = features.get(initial_state_key)
sequence_length = features.get(sequence_length_key)
sequence_input = build_sequence_input(features,
sequence_feature_columns,
context_feature_columns)
if mode == model_fn.ModeKeys.TRAIN:
cell_for_mode = apply_dropout(
cell, input_keep_probability, output_keep_probability)
else:
cell_for_mode = cell
rnn_activations, final_state = construct_rnn(
initial_state,
sequence_input,
cell_for_mode,
target_column.num_label_columns,
dtype=dtype,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
if prediction_type == PredictionType.MULTIPLE_VALUE:
prediction_dict = _multi_value_predictions(
rnn_activations, target_column, predict_probabilities)
loss = _multi_value_loss(
rnn_activations, labels, sequence_length, target_column, features)
elif prediction_type == PredictionType.SINGLE_VALUE:
prediction_dict = _single_value_predictions(
rnn_activations, sequence_length, target_column,
predict_probabilities)
loss = _single_value_loss(
rnn_activations, labels, sequence_length, target_column, features)
# TODO(roumposg): Return eval_metric_ops here, instead of default_metrics.
default_metrics = _get_default_metrics(
problem_type, prediction_type, sequence_length)
prediction_dict[RNNKeys.FINAL_STATE_KEY] = final_state
eval_metric_ops = estimator._make_metrics_ops( # pylint: disable=protected-access
default_metrics, features, labels, prediction_dict)
train_op = optimizers.optimize_loss(
loss=loss,
global_step=None,
learning_rate=learning_rate,
optimizer=optimizer,
clip_gradients=gradient_clipping_norm,
summaries=optimizers.OPTIMIZER_SUMMARIES)
return model_fn.ModelFnOps(mode=mode,
predictions=prediction_dict,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops)
开发者ID:HKUST-SING,项目名称:tensorflow,代码行数:50,代码来源:dynamic_rnn_estimator.py
示例19: _train_op_fn
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
return optimizers.optimize_loss(
loss=loss,
global_step=contrib_variables.get_global_step(),
learning_rate=_LEARNING_RATE,
optimizer=_get_optimizer(optimizer),
gradient_multipliers=(
dnn_linear_combined._extract_embedding_lr_multipliers( # pylint: disable=protected-access
embedding_lr_multipliers, parent_scope, input_layer_scope)),
clip_gradients=gradient_clip_norm,
name=parent_scope,
# Empty summaries to prevent optimizers from logging the training_loss.
summaries=[])
开发者ID:kadeng,项目名称:tensorflow,代码行数:14,代码来源:dnn.py
示例20: _logistic_regression_model_fn
def _logistic_regression_model_fn(features, labels, mode):
_ = mode
logits = layers.linear(
features,
1,
weights_initializer=init_ops.zeros_initializer(),
# Intentionally uses really awful initial values so that
# AUC/precision/recall/etc will change meaningfully even on a toy dataset.
biases_initializer=init_ops.constant_initializer(-10.0))
predictions = math_ops.sigmoid(logits)
loss = loss_ops.sigmoid_cross_entropy(logits, labels)
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return predictions, loss, train_op
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:14,代码来源:logistic_regressor_test.py
注:本文中的tensorflow.contrib.layers.python.layers.optimizers.optimize_loss函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论