本文整理汇总了Python中tensorflow.python.ops.math_ops.subtract函数的典型用法代码示例。如果您正苦于以下问题:Python subtract函数的具体用法?Python subtract怎么用?Python subtract使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了subtract函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: hinge_loss
def hinge_loss(labels, logits, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES):
"""Adds a hinge loss to the training procedure.
WARNING: `weights` also supports dimensions of 1, but the broadcasting does
not work as advertised, you'll wind up with weighted sum instead of weighted
mean for any but the last dimension. This will be cleaned up soon, so please
do not rely on the current behavior for anything but the shapes documented for
`weights` below.
Args:
labels: The ground truth output tensor. Its shape should match the shape of
logits. The values of the tensor are expected to be 0.0 or 1.0.
logits: The logits, a float tensor.
weights: Coefficients for the loss a scalar, a tensor of shape
`[batch_size]` or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
Returns:
A scalar `Tensor` of the loss value.
Raises:
ValueError: If the shapes of `logits` and `labels` don't match.
"""
with ops.name_scope(scope, "hinge_loss", (logits, labels)) as scope:
logits = math_ops.to_float(logits)
labels = math_ops.to_float(labels)
logits.get_shape().assert_is_compatible_with(labels.get_shape())
# We first need to convert binary labels to -1/1 labels (as floats).
all_ones = array_ops.ones_like(labels)
labels = math_ops.subtract(2 * labels, all_ones)
losses = nn_ops.relu(
math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
return compute_weighted_loss(losses, weights, scope, loss_collection)
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:35,代码来源:losses_impl.py
示例2: hinge_loss
def hinge_loss(logits, labels=None, scope=None):
"""Method that returns the loss tensor for hinge loss.
Args:
logits: The logits, a float tensor. Note that logits are assumed to be
unbounded and 0-centered. A value > 0 (resp. < 0) is considered a positive
(resp. negative) binary prediction.
labels: The ground truth output tensor. Its shape should match the shape of
logits. The values of the tensor are expected to be 0.0 or 1.0. Internally
the {0,1} labels are converted to {-1,1} when calculating the hinge loss.
scope: The scope for the operations performed in computing the loss.
Returns:
An unweighted `Tensor` of same shape as `logits` and `labels` representing
the
loss values across the batch.
Raises:
ValueError: If the shapes of `logits` and `labels` don't match.
"""
with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope:
logits.get_shape().assert_is_compatible_with(labels.get_shape())
# We first need to convert binary labels to -1/1 labels (as floats).
labels = math_ops.to_float(labels)
all_ones = array_ops.ones_like(labels)
labels = math_ops.subtract(2 * labels, all_ones)
return nn_ops.relu(
math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:28,代码来源:loss_ops.py
示例3: huber_loss
def huber_loss(y_true, y_pred, delta=1.0):
"""Computes Huber loss value.
For each value x in `error=y_true-y_pred`, the following is calculated:
```
0.5 * x^2 if |x| <= d
0.5 * d^2 + d * (|x| - d) if |x| > d
```
where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss
Args:
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
delta: A float, the point where the Huber loss function changes from a
quadratic to linear.
Returns:
Tensor with one scalar loss entry per sample.
"""
y_pred = math_ops.cast(y_pred, dtype=K.floatx())
y_true = math_ops.cast(y_true, dtype=K.floatx())
error = math_ops.subtract(y_pred, y_true)
abs_error = math_ops.abs(error)
quadratic = math_ops.minimum(abs_error, delta)
linear = math_ops.subtract(abs_error, quadratic)
return math_ops.add(
math_ops.multiply(
ops.convert_to_tensor(0.5, dtype=quadratic.dtype),
math_ops.multiply(quadratic, quadratic)),
math_ops.multiply(delta, linear))
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:31,代码来源:losses.py
示例4: test_multiple_outputs
def test_multiple_outputs(self):
# - +
# / \y0 y1/ \
# x split z
# |
# y (nodes are ops; edges are going up)
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtypes.float32, shape=[1], name='x')
y = array_ops.placeholder(dtypes.float32, shape=[2], name='y')
y0, y1 = array_ops.split(y, num_or_size_splits=2, axis=0)
z = array_ops.placeholder(dtypes.float32, shape=[1], name='z')
math_ops.add(x, y0)
math_ops.subtract(y1, z)
y1_pattern = graph_matcher.OpTypePattern('*')
minus_pattern = graph_matcher.OpTypePattern('Sub', inputs=[y1_pattern, '*'])
matcher = graph_matcher.GraphMatcher(minus_pattern)
match_results = list(matcher.match_graph(g))
self.assertEqual(1, len(match_results))
match_result = match_results[0]
self.assertEqual(y0.op, y1.op)
self.assertEqual(match_result.get_op(y1_pattern), y1.op)
self.assertEqual(match_result.get_tensor(y1_pattern), y1)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:26,代码来源:graph_matcher_test.py
示例5: hinge_loss
def hinge_loss(labels, logits, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds a hinge loss to the training procedure.
Args:
labels: The ground truth output tensor. Its shape should match the shape of
logits. The values of the tensor are expected to be 0.0 or 1.0.
logits: The logits, a float tensor.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shapes of `logits` and `labels` don't match.
"""
with ops.name_scope(scope, "hinge_loss", (logits, labels, weights)) as scope:
logits = math_ops.to_float(logits)
labels = math_ops.to_float(labels)
logits.get_shape().assert_is_compatible_with(labels.get_shape())
# We first need to convert binary labels to -1/1 labels (as floats).
all_ones = array_ops.ones_like(labels)
labels = math_ops.subtract(2 * labels, all_ones)
losses = nn_ops.relu(
math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
开发者ID:piyushjaiswal98,项目名称:tensorflow,代码行数:34,代码来源:losses_impl.py
示例6: unregularized_loss
def unregularized_loss(self, examples):
"""Add operations to compute the loss (without the regularization loss).
Args:
examples: Examples to compute unregularized loss on.
Returns:
An Operation that computes mean (unregularized) loss for given set of
examples.
Raises:
ValueError: if examples are not well defined.
"""
self._assertSpecified([
'example_labels', 'example_weights', 'sparse_features', 'dense_features'
], examples)
self._assertList(['sparse_features', 'dense_features'], examples)
with name_scope('sdca/unregularized_loss'):
predictions = math_ops.cast(
self._linear_predictions(examples), dtypes.float64)
labels = math_ops.cast(
internal_convert_to_tensor(examples['example_labels']),
dtypes.float64)
weights = math_ops.cast(
internal_convert_to_tensor(examples['example_weights']),
dtypes.float64)
if self._options['loss_type'] == 'logistic_loss':
return math_ops.reduce_sum(math_ops.multiply(
sigmoid_cross_entropy_with_logits(labels=labels,
logits=predictions),
weights)) / math_ops.reduce_sum(weights)
if self._options['loss_type'] == 'poisson_loss':
return math_ops.reduce_sum(math_ops.multiply(
log_poisson_loss(targets=labels, log_input=predictions),
weights)) / math_ops.reduce_sum(weights)
if self._options['loss_type'] in ['hinge_loss', 'smooth_hinge_loss']:
# hinge_loss = max{0, 1 - y_i w*x} where y_i \in {-1, 1}. So, we need to
# first convert 0/1 labels into -1/1 labels.
all_ones = array_ops.ones_like(predictions)
adjusted_labels = math_ops.subtract(2 * labels, all_ones)
# Tensor that contains (unweighted) error (hinge loss) per
# example.
error = nn_ops.relu(
math_ops.subtract(all_ones,
math_ops.multiply(adjusted_labels, predictions)))
weighted_error = math_ops.multiply(error, weights)
return math_ops.reduce_sum(weighted_error) / math_ops.reduce_sum(
weights)
# squared loss
err = math_ops.subtract(labels, predictions)
weighted_squared_err = math_ops.multiply(math_ops.square(err), weights)
# SDCA squared loss function is sum(err^2) / (2*sum(weights))
return (math_ops.reduce_sum(weighted_squared_err) /
(2.0 * math_ops.reduce_sum(weights)))
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:59,代码来源:sdca_ops.py
示例7: testStripUnusedMultipleInputs
def testStripUnusedMultipleInputs(self):
input_graph_name = "input_graph.pb"
output_graph_name = "output_graph.pb"
# We'll create an input graph that multiplies two input nodes.
with ops.Graph().as_default():
constant_node1 = constant_op.constant(1.0, name="constant_node1")
constant_node2 = constant_op.constant(2.0, name="constant_node2")
input_node1 = math_ops.subtract(constant_node1, 3.0, name="input_node1")
input_node2 = math_ops.subtract(constant_node2, 5.0, name="input_node2")
output_node = math_ops.multiply(
input_node1, input_node2, name="output_node")
math_ops.add(output_node, 2.0, name="later_node")
sess = session.Session()
output = sess.run(output_node)
self.assertNear(6.0, output, 0.00001)
graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name)
# We save out the graph to disk, and then call the const conversion
# routine.
input_graph_path = os.path.join(self.get_temp_dir(), input_graph_name)
input_binary = False
input_node_names = "input_node1,input_node2"
input_node_types = [
dtypes.float32.as_datatype_enum, dtypes.float32.as_datatype_enum
]
output_binary = True
output_node_names = "output_node"
output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)
strip_unused_lib.strip_unused_from_files(input_graph_path, input_binary,
output_graph_path, output_binary,
input_node_names,
output_node_names,
input_node_types)
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with ops.Graph().as_default():
output_graph_def = graph_pb2.GraphDef()
with open(output_graph_path, "rb") as f:
output_graph_def.ParseFromString(f.read())
_ = importer.import_graph_def(output_graph_def, name="")
self.assertEqual(3, len(output_graph_def.node))
for node in output_graph_def.node:
self.assertNotEqual("Add", node.op)
self.assertNotEqual("Sub", node.op)
if node.name == input_node_names:
self.assertTrue("shape" in node.attr)
with session.Session() as sess:
input_node1 = sess.graph.get_tensor_by_name("input_node1:0")
input_node2 = sess.graph.get_tensor_by_name("input_node2:0")
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = sess.run(output_node,
feed_dict={input_node1: [10.0],
input_node2: [-5.0]})
self.assertNear(-50.0, output, 0.00001)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:59,代码来源:strip_unused_test.py
示例8: inner_loss
def inner_loss(y_true, y_pred):
delta = math_ops.abs(math_ops.subtract(y_pred, y_true))
losses = math_ops.square(delta)
if clip > 0.0:
losses = tf.where(delta < clip, 0.5 * losses, delta - 0.5)
return losses
开发者ID:AlexMikhalev,项目名称:polyaxon,代码行数:7,代码来源:losses.py
示例9: BackwardLoopBody
def BackwardLoopBody(*args):
"""Backward loop body function."""
t, dev_t = args[0], args[1]
(theta, orig_state0, inputs, acc_state, acc_extras, d_theta, d_state1,
d_inputs, d_acc_state) = _Pack(args[2:], bakloop_sig)
# The input recurrent state for time step t is previous time step's
# output, or the original state0 when on time step 0.
state_from_acc = _Index(acc_state, math_ops.maximum(0, t - 1))
state0 = functional_ops.If(
math_ops.equal(t, array_ops.constant(0, dtypes.int32)),
_Flatten([state_from_acc, orig_state0]), ReturnOrigState0,
ReturnAccState)
state0 = nest.pack_sequence_as(orig_state0, state0)
# The external inputs for time step t.
inputs_t = _Index(inputs, t)
# The extras for time step t.
extras_t = _Index(acc_extras, t)
d_state1 = _Add(_Index(d_acc_state, t), d_state1)
(d_theta_t, d_state0, d_inputs_t) = _Pack(
Bak(*_Flatten([theta, state0, inputs_t, extras_t, d_state1])),
[self._theta, self._state, self._inputs])
d_theta = _Add(d_theta, d_theta_t)
d_inputs = _Update(d_inputs, d_inputs_t, dev_t)
return [math_ops.subtract(dev_t, 1)] + _Flatten([
theta, orig_state0, inputs, acc_state, acc_extras, d_theta, d_state0,
d_inputs, d_acc_state
])
开发者ID:AnishShah,项目名称:tensorflow,代码行数:30,代码来源:recurrent.py
示例10: _Update_global_variables
def _Update_global_variables():
local_vars = [v for g, v in grads_and_vars if g is not None]
global_center_vars = [self._global_map[var] for var in local_vars]
local_center_vars = [self._local_map[var] for var in local_vars]
local_center_vars_update = []
for lvar, var in zip(local_center_vars, global_center_vars):
local_center_vars_update.append(lvar.assign(var))
update_ops = []
differences = []
with ops.control_dependencies(local_center_vars_update):
for v, lv in zip(local_vars, local_center_vars):
with ops.device(v.device):
differences.append(math_ops.subtract(v, lv))
for lvar, diff in zip(local_vars, differences):
with ops.device(lvar.device):
update_ops.append(
state_ops.assign_sub(lvar,
math_ops.multiply(self._moving_rate,
diff)))
for var, diff in zip(global_center_vars, differences):
with ops.device(var.device):
update_ops.append(
state_ops.assign_add(var,
math_ops.multiply(self._moving_rate,
diff)))
if global_step:
with ops.colocate_with(global_step):
update_ops.append(state_ops.assign_add(global_step, 1))
variable_update = control_flow_ops.group(*(update_ops))
return variable_update
开发者ID:ahmedsaiduk,项目名称:tensorflow,代码行数:30,代码来源:elastic_average_optimizer.py
示例11: huber_loss
def huber_loss(labels, predictions, weights=1.0, delta=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.WEIGHTED_SUM_BY_NONZERO_WEIGHTS):
"""Adds a Huber Loss term to the training procedure.
For each value x in `error=labels-predictions`, the following is calculated:
```
0.5 * x^2 if |x| <= d
0.5 * d^2 + d * (|x| - d) if |x| > d
```
where d is `delta`.
See: https://en.wikipedia.org/wiki/Huber_loss
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
labels: The ground truth output tensor, same dimensions as 'predictions'.
predictions: The predicted outputs.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
delta: `float`, the point where the huber loss function
changes from a quadratic to linear.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
A scalar `Tensor` that returns the weighted loss.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "huber_loss",
(predictions, labels, weights)) as scope:
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
error = math_ops.subtract(predictions, labels)
abs_error = math_ops.abs(error)
quadratic = math_ops.minimum(abs_error, delta)
# The following expression is the same in value as
# tf.maximum(abs_error - delta, 0), but importantly the gradient for the
# expression when abs_error == delta is 0 (for tf.maximum it would be 1).
# This is necessary to avoid doubling the gradient, since there is already a
# nonzero contribution to the gradient from the quadratic term.
linear = (abs_error - quadratic)
losses = 0.5 * quadratic**2 + delta * linear
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
开发者ID:LUTAN,项目名称:tensorflow,代码行数:60,代码来源:losses_impl.py
示例12: huber_loss
def huber_loss(labels, predictions, weight=1.0, k=1.0, scope=None):
"""Define a huber loss https://en.wikipedia.org/wiki/Huber_loss
tensor: tensor to regularize.
k: value of k in the huber loss
scope: Optional scope for op_scope.
Huber loss:
f(x) = if |x| <= k:
0.5 * x^2
else:
k * |x| - 0.5 * k^2
Returns:
the L1 loss op.
http://concise-bio.readthedocs.io/en/latest/_modules/concise/tf_helper.html
"""
with ops.name_scope(scope, "absolute_difference",
[predictions, labels]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
if weight is None:
raise ValueError("`weight` cannot be None")
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
diff = math_ops.subtract(predictions, labels)
abs_diff = tf.abs(diff)
losses = tf.where(abs_diff < k,
0.5 * tf.square(diff),
k * abs_diff - 0.5 * k ** 2)
return tf.losses.compute_weighted_loss(losses, weight)
开发者ID:PJunhyuk,项目名称:people-counting-pose,代码行数:30,代码来源:losses.py
示例13: normalize_moments
def normalize_moments(counts, mean_ss, variance_ss, shift, name=None):
"""Calculate the mean and variance of based on the sufficient statistics.
Args:
counts: A `Tensor` containing a the total count of the data (one value).
mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly
shifted) sum of the elements to average over.
variance_ss: A `Tensor` containing the variance sufficient statistics: the
(possibly shifted) squared sum of the data to compute the variance over.
shift: A `Tensor` containing the value by which the data is shifted for
numerical stability, or `None` if no shift was performed.
name: Name used to scope the operations that compute the moments.
Returns:
Two `Tensor` objects: `mean` and `variance`.
"""
with ops.name_scope(name, "normalize", [counts, mean_ss, variance_ss, shift]):
divisor = math_ops.reciprocal(counts, name="divisor")
if shift is not None:
shifted_mean = math_ops.multiply(mean_ss, divisor, name="shifted_mean")
mean = math_ops.add(shifted_mean, shift, name="mean")
else: # no shift.
shifted_mean = math_ops.multiply(mean_ss, divisor, name="mean")
mean = shifted_mean
variance = math_ops.subtract(math_ops.multiply(variance_ss, divisor),
math_ops.square(shifted_mean),
name="variance")
return (mean, variance)
开发者ID:pcm17,项目名称:tensorflow,代码行数:28,代码来源:nn_impl.py
示例14: exact_laplacian_kernel
def exact_laplacian_kernel(x, y, stddev):
"""Computes exact Laplacian kernel value(s) for tensors x and y using stddev.
The Laplacian kernel for vectors u, v is defined as follows:
K(u, v) = exp(-||u-v|| / stddev)
where the norm is the l1-norm. x, y can be either vectors or matrices. If they
are vectors, they must have the same dimension. If they are matrices, they
must have the same number of columns. In the latter case, the method returns
(as a matrix) K(u, v) values for all pairs (u, v) where u is a row from x and
v is a row from y.
Args:
x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim].
y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim].
stddev: The width of the Gaussian kernel.
Returns:
A single value (scalar) with shape (1, 1) if x, y are vectors or a matrix
of shape (m, n) with entries K(u, v) (where K is the Laplacian kernel) for
all (u,v) pairs where u, v are rows from x and y respectively.
Raises:
InvalidShapeError: if the shapes of x, y are not compatible.
"""
x_aligned, y_aligned = _align_matrices(x, y)
diff_l1_norm = math_ops.reduce_sum(
math_ops.abs(math_ops.subtract(x_aligned, y_aligned)), 2)
return math_ops.exp(-diff_l1_norm / stddev)
开发者ID:rmlarsen,项目名称:tensorflow,代码行数:28,代码来源:kernelized_utils.py
示例15: mean_squared_error
def mean_squared_error(predictions, labels=None, weights=1.0, scope=None):
"""Adds a Sum-of-Squares loss to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
predictions: The predicted outputs.
labels: The ground truth output tensor, same dimensions as 'predictions'.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "mean_squared_error",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
losses = math_ops.square(math_ops.subtract(predictions, labels))
return compute_weighted_loss(losses, weights, scope=scope)
开发者ID:Immexxx,项目名称:tensorflow,代码行数:32,代码来源:loss_ops.py
示例16: _model_fn
def _model_fn(features, labels, mode):
_ = labels
x = features['x']
y = features['y']
with ops.name_scope('outputs'):
predictions = {'sum': math_ops.add(x, y, name='sum'),
'product': math_ops.multiply(x, y, name='product'),
'difference': math_ops.subtract(x, y, name='difference')}
if core:
export_outputs = {k: export_output.PredictOutput({k: v})
for k, v in predictions.items()}
export_outputs[signature_constants.
DEFAULT_SERVING_SIGNATURE_DEF_KEY] = export_outputs['sum']
return model_fn.EstimatorSpec(mode=mode,
predictions=predictions,
export_outputs=export_outputs,
loss=constant_op.constant(0),
train_op=control_flow_ops.no_op())
else:
output_alternatives = {k: (constants.ProblemType.UNSPECIFIED, {k: v})
for k, v in predictions.items()}
return contrib_model_fn.ModelFnOps(
mode=mode,
predictions=predictions,
output_alternatives=output_alternatives,
loss=constant_op.constant(0),
train_op=control_flow_ops.no_op())
开发者ID:1000sprites,项目名称:tensorflow,代码行数:27,代码来源:testing_common.py
示例17: mean_squared_error
def mean_squared_error(labels, predictions, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES):
"""Adds a Sum-of-Squares loss to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
labels: The ground truth output tensor, same dimensions as 'predictions'.
predictions: The predicted outputs.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "mean_squared_error",
(predictions, labels, weights)) as scope:
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
losses = math_ops.square(math_ops.subtract(predictions, labels))
return compute_weighted_loss(losses, weights, scope, loss_collection)
开发者ID:Immexxx,项目名称:tensorflow,代码行数:35,代码来源:losses_impl.py
示例18: hinge_loss
def hinge_loss(labels, logits, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds a hinge loss to the training procedure.
Args:
labels: The ground truth output tensor. Its shape should match the shape of
logits. The values of the tensor are expected to be 0.0 or 1.0. Internally
the {0,1} labels are converted to {-1,1} when calculating the hinge loss.
logits: The logits, a float tensor. Note that logits are assumed to be
unbounded and 0-centered. A value > 0 (resp. < 0) is considered a positive
(resp. negative) binary prediction.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shapes of `logits` and `labels` don't match or
if `labels` or `logits` is None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if labels is None:
raise ValueError("labels must not be None.")
if logits is None:
raise ValueError("logits must not be None.")
with ops.name_scope(scope, "hinge_loss", (logits, labels, weights)) as scope:
logits = math_ops.to_float(logits)
labels = math_ops.to_float(labels)
logits.get_shape().assert_is_compatible_with(labels.get_shape())
# We first need to convert binary labels to -1/1 labels (as floats).
all_ones = array_ops.ones_like(labels)
labels = math_ops.subtract(2 * labels, all_ones)
losses = nn_ops.relu(
math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
开发者ID:bunbutter,项目名称:tensorflow,代码行数:47,代码来源:losses_impl.py
示例19: _FoldFusedBatchNorms
def _FoldFusedBatchNorms(graph):
"""Finds fused batch norm layers and folds them into preceding layers.
Folding only affects the following layers: Conv2D, fully connected, depthwise
convolution.
Args:
graph: Graph to walk and modify.
Raises:
ValueError: When batch norm folding fails.
"""
for match in _FindFusedBatchNorms(graph):
scope, sep, _ = match.layer_op.name.rpartition('/')
# Make sure new ops are added to `graph` and put on the same device as
# `bn_op`. The '/' (i.e. `sep`) ensures that we reuse the existing scope
# named `scope`. Otherwise, TF creates a unique scope whose name starts with
# `scope`.
with graph.as_default(), graph.name_scope(scope + sep), ops.device(
match.bn_op.device):
with graph.name_scope(scope + sep + 'BatchNorm_Fold' + sep):
# new weights = old weights * gamma / sqrt(variance + epsilon)
# new biases = -mean * gamma / sqrt(variance + epsilon) + beta
multiplier_tensor = match.gamma_tensor * math_ops.rsqrt(
match.variance_tensor + match.bn_op.get_attr('epsilon'))
bias_tensor = math_ops.subtract(
match.beta_tensor,
match.mean_tensor * multiplier_tensor,
name='bias')
# The shape of depthwise weights is different, so we need to reshape the
# multiplier_tensor to ensure that the scaled_weight_tensor has the
# expected shape.
if match.layer_op.type == 'DepthwiseConv2dNative':
new_shape = [
match.weight_tensor.get_shape().as_list()[2],
match.weight_tensor.get_shape().as_list()[3]
]
multiplier_tensor = array_ops.reshape(
multiplier_tensor, new_shape, name='scale_reshape')
# TODO(suharshs): This naming of the following ops needs to carefully
# follow the naming expected by quantize.py. Generalize the quantize code
# to not require these delicate naming conventions.
scaled_weight_tensor = math_ops.multiply(
match.weight_tensor, multiplier_tensor, name='mul_fold')
new_layer_tensor = _CloneWithNewOperands(
match.layer_op, match.input_tensor, scaled_weight_tensor)
bias_add_tensor = math_ops.add(
new_layer_tensor, bias_tensor, name='add_fold')
nodes_modified_count = graph_editor.reroute_ts(bias_add_tensor,
match.output_tensor)
if nodes_modified_count != 1:
raise ValueError(
'Unexpected inputs to op: %s' % match.output_tensor.name)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:58,代码来源:fold_batch_norms.py
示例20: _AtanhGrad
def _AtanhGrad(op, grad):
"""Returns grad * 1/ (1 - x^2)."""
x = op.inputs[0]
with ops.control_dependencies([grad]):
x = math_ops.conj(x)
x2 = math_ops.square(x)
one = constant_op.constant(1, dtype=grad.dtype)
inv = math_ops.reciprocal(math_ops.subtract(one, x2))
return grad * inv
开发者ID:neuroradiology,项目名称:tensorflow,代码行数:9,代码来源:math_grad.py
注:本文中的tensorflow.python.ops.math_ops.subtract函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论