本文整理汇总了Python中tensorflow.python.ops.nn.sparse_softmax_cross_entropy_with_logits函数的典型用法代码示例。如果您正苦于以下问题:Python sparse_softmax_cross_entropy_with_logits函数的具体用法?Python sparse_softmax_cross_entropy_with_logits怎么用?Python sparse_softmax_cross_entropy_with_logits使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sparse_softmax_cross_entropy_with_logits函数的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: sparse_softmax_cross_entropy
def sparse_softmax_cross_entropy(logits, labels, weights=1.0, scope=None):
"""Cross-entropy loss using `tf.nn.sparse_softmax_cross_entropy_with_logits`.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of size [`batch_size`], then the loss weights apply to each
corresponding sample.
Args:
logits: [batch_size, num_classes] logits outputs of the network .
labels: [batch_size, 1] or [batch_size] labels of dtype `int32` or `int64`
in the range `[0, num_classes)`.
weights: Coefficients for the loss. The tensor must be a scalar or a tensor
of shape [batch_size] or [batch_size, 1].
scope: the scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the mean loss value.
Raises:
ValueError: If the shapes of `logits`, `labels`, and `weights` are
incompatible, or if `weights` is None.
"""
with ops.name_scope(scope, "sparse_softmax_cross_entropy_loss",
[logits, labels, weights]) as scope:
labels = array_ops.reshape(labels, shape=[array_ops.shape(labels)[0]])
losses = nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
logits=logits,
name="xentropy")
return compute_weighted_loss(losses, weights, scope=scope)
开发者ID:Immexxx,项目名称:tensorflow,代码行数:31,代码来源:loss_ops.py
示例2: _softmax_cross_entropy_loss
def _softmax_cross_entropy_loss(logits, target):
check_shape_op = control_flow_ops.Assert(
math_ops.less_equal(array_ops.rank(target), 2),
["target's shape should be either [batch_size, 1] or [batch_size]"])
with ops.control_dependencies([check_shape_op]):
target = array_ops.reshape(target, shape=[array_ops.shape(target)[0]])
return nn.sparse_softmax_cross_entropy_with_logits(logits, target)
开发者ID:KalraA,项目名称:tensorflow,代码行数:7,代码来源:linear.py
示例3: _softmax_cross_entropy_loss
def _softmax_cross_entropy_loss(logits, labels):
# Check that we got integer for classification.
if not labels.dtype.is_integer:
raise ValueError("Labels dtype should be integer "
"Instead got %s." % labels.dtype)
# sparse_softmax_cross_entropy_with_logits requires [batch_size] labels.
if len(labels.get_shape()) == 2:
labels = array_ops.squeeze(labels, squeeze_dims=[1])
return nn.sparse_softmax_cross_entropy_with_logits(logits, labels)
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:9,代码来源:head.py
示例4: model
def model():
print("building model ...")
with tf.variable_scope('train'):
print("building model ...")
X_pl = tf.placeholder(tf.float32, [None, num_features])
X_expand = tf.expand_dims(X_pl, axis=2)
print("X_pl", X_pl.get_shape())
t_pl = tf.placeholder(tf.int32, [None,])
print("t_pl", t_pl.get_shape())
is_training_pl = tf.placeholder(tf.bool)
cell_fw = tf.nn.rnn_cell.GRUCell(205)
cell_bw = tf.nn.rnn_cell.GRUCell(205)
seq_len = tf.reduce_sum(tf.ones(tf.shape(X_pl), dtype=tf.int32), axis=1)
_, enc_states = tf.nn.bidirectional_dynamic_rnn(cell_fw=cell_fw,
cell_bw=cell_bw, inputs=X_expand, sequence_length=seq_len,
dtype=tf.float32)
enc_states = tf.concat(1, enc_states)
enc_states_drop = dropout(enc_states, is_training=is_training_pl)
l1 = fully_connected(enc_states_drop, 200, activation_fn=None)
l1 = batch_norm(l1, is_training=is_training_pl)
l1_relu = relu(l1)
l1_dropout = dropout(l1_relu, is_training=is_training_pl)
l2 = fully_connected(l1_dropout, 200, activation_fn=None)
l2 = batch_norm(l2, is_training=is_training_pl)
l2_relu = relu(l2)
l_out = fully_connected(l2_relu, num_outputs=num_classes, activation_fn=None)
l_out_softmax = tf.nn.softmax(l_out)
tf.contrib.layers.summarize_variables()
with tf.variable_scope('metrics'):
loss = sparse_softmax_cross_entropy_with_logits(l_out, t_pl)
print("loss", loss.get_shape())
loss = tf.reduce_mean(loss)
print("loss", loss.get_shape())
tf.summary.scalar('train/loss', loss)
argmax = tf.to_int32(tf.argmax(l_out, 1))
print("argmax", argmax.get_shape())
correct = tf.to_float(tf.equal(argmax, t_pl))
print("correct,", correct.get_shape())
accuracy = tf.reduce_mean(correct)
print("accuracy", accuracy.get_shape())
with tf.variable_scope('optimizer'):
print("building optimizer ...")
global_step = tf.Variable(0, name='global_step', trainable=False)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
grads_and_vars = optimizer.compute_gradients(loss)
gradients, variables = zip(*grads_and_vars)
clipped_gradients, global_norm = (
tf.clip_by_global_norm(gradients, clip_norm))
clipped_grads_and_vars = zip(clipped_gradients, variables)
tf.summary.scalar('train/global_gradient_norm', global_norm)
train_op = optimizer.apply_gradients(clipped_grads_and_vars, global_step=global_step)
return X_pl, t_pl, is_training_pl, l_out, l_out_softmax, loss, accuracy, train_op, global_step
开发者ID:alrojo,项目名称:EEG_DauwelsLab,代码行数:57,代码来源:rnn_big2.py
示例5: _softmax_cross_entropy_loss
def _softmax_cross_entropy_loss(logits, target):
# Check that we got integer for classification.
if not target.dtype.is_integer:
raise ValueError("Target's dtype should be integer "
"Instead got %s." % target.dtype)
# sparse_softmax_cross_entropy_with_logits requires [batch_size] target.
if len(target.get_shape()) == 2:
target = array_ops.squeeze(target, squeeze_dims=[1])
loss_vec = nn.sparse_softmax_cross_entropy_with_logits(logits, target)
return loss_vec
开发者ID:caikehe,项目名称:tensorflow,代码行数:10,代码来源:head.py
示例6: _softmax_cross_entropy_loss
def _softmax_cross_entropy_loss(logits, target):
# sigmoid_cross_entropy_with_logits requires [batch_size, 1] target.
# Check that we got int32/int64 for classification.
if not target.dtype.is_compatible_with(dtypes.int64) and not target.dtype.is_compatible_with(dtypes.int32):
raise ValueError("Target's dtype should be int32, int64 or compatible. " "Instead got %s." % target.dtype)
# sparse_softmax_cross_entropy_with_logits requires [batch_size] target.
if len(target.get_shape()) == 2:
target = array_ops.squeeze(target, squeeze_dims=[1])
loss_vec = nn.sparse_softmax_cross_entropy_with_logits(logits, target)
return loss_vec
开发者ID:sathishreddy,项目名称:tensorflow,代码行数:10,代码来源:target_column.py
示例7: sparse_softmax_cross_entropy
def sparse_softmax_cross_entropy(
labels, logits, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Cross-entropy loss using `tf.nn.sparse_softmax_cross_entropy_with_logits`.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of shape `[batch_size]`, then the loss weights apply to each
corresponding sample.
Args:
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of
`labels` and result) and dtype `int32` or `int64`. Each entry in `labels`
must be an index in `[0, num_classes)`. Other values will raise an
exception when this op is run on CPU, and return `NaN` for corresponding
loss and gradient rows on GPU.
logits: Unscaled log probabilities of shape
`[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32` or
`float64`.
weights: Coefficients for the loss. This must be scalar or broadcastable to
`labels` (i.e. same rank and each dimension is either 1 or the same).
scope: the scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss `Tensor` of the same type as `logits`. If `reduction` is
`NONE`, this has the same shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shapes of `logits`, `labels`, and `weights` are
incompatible, or if any of them are None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if labels is None:
raise ValueError("labels must not be None.")
if logits is None:
raise ValueError("logits must not be None.")
with ops.name_scope(scope, "sparse_softmax_cross_entropy_loss",
(logits, labels, weights)) as scope:
# As documented above in Args, labels contain class IDs and logits contains
# 1 probability per class ID, so we expect rank(logits) - rank(labels) == 1;
# therefore, expected_rank_diff=1.
labels, logits, weights = _remove_squeezable_dimensions(
labels, logits, weights, expected_rank_diff=1)
losses = nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
logits=logits,
name="xentropy")
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
开发者ID:bunbutter,项目名称:tensorflow,代码行数:55,代码来源:losses_impl.py
示例8: model
def model():
tf.set_random_seed(1)
print("building model ...")
with tf.variable_scope('train'):
print("building model ...")
X_pl = tf.placeholder(tf.float32, [None, num_features])
print("X_pl", X_pl.get_shape())
t_pl = tf.placeholder(tf.int32, [None,])
print("t_pl", t_pl.get_shape())
is_training_pl = tf.placeholder(tf.bool)
X_bn = batch_norm(X_pl, is_training=is_training_pl)
print("X_bn", X_bn.get_shape())
l1 = fully_connected(X_pl, num_outputs=100, activation_fn=relu)#, normalizer_fn=batch_norm)
print("l1", l1.get_shape())
l1_drop = dropout(l1, is_training=is_training_pl)
print("l1_drop", l1_drop.get_shape())
l_out = fully_connected(l1_drop, num_outputs=num_classes, activation_fn=None)
print("l_out", l_out.get_shape())
l_out_softmax = tf.nn.softmax(l_out)
tf.contrib.layers.summarize_variables()
with tf.variable_scope('metrics'):
loss = sparse_softmax_cross_entropy_with_logits(l_out, t_pl)
print("loss", loss.get_shape())
loss = tf.reduce_mean(loss)
print("loss", loss.get_shape())
tf.summary.scalar('train/loss', loss)
argmax = tf.to_int32(tf.argmax(l_out, 1))
print("argmax", argmax.get_shape())
correct = tf.to_float(tf.equal(argmax, t_pl))
print("correct,", correct.get_shape())
accuracy = tf.reduce_mean(correct)
print("accuracy", accuracy.get_shape())
with tf.variable_scope('optimizer'):
print("building optimizer ...")
global_step = tf.Variable(0, name='global_step', trainable=False)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
grads_and_vars = optimizer.compute_gradients(loss)
gradients, variables = zip(*grads_and_vars)
clipped_gradients, global_norm = (
tf.clip_by_global_norm(gradients, clip_norm))
clipped_grads_and_vars = zip(clipped_gradients, variables)
tf.summary.scalar('train/global_gradient_norm', global_norm)
train_op = optimizer.apply_gradients(clipped_grads_and_vars, global_step=global_step)
return X_pl, t_pl, is_training_pl, l_out, l_out_softmax, loss, accuracy, train_op, global_step
开发者ID:alrojo,项目名称:EEG_DauwelsLab,代码行数:49,代码来源:mlp.py
示例9: deprecated_flipped_sparse_softmax_cross_entropy_with_logits
def deprecated_flipped_sparse_softmax_cross_entropy_with_logits(logits,
labels,
name=None):
"""Computes sparse softmax cross entropy between `logits` and `labels`.
This function diffs from tf.nn.sparse_softmax_cross_entropy_with_logits only
in the argument order.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** For this operation, the probability of a given label is considered
exclusive. That is, soft classes are not allowed, and the `labels` vector
must provide a single specific index for the true class for each row of
`logits` (each minibatch entry). For soft softmax classification with
a probability distribution for each entry, see
`softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a softmax
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits of shape `[batch_size, num_classes]` and
labels of shape `[batch_size]`. But higher dimensions are supported.
Args:
logits: Unscaled log probabilities of rank `r` and shape
`[d_0, d_1, ..., d_{r-2}, num_classes]` and dtype `float32` or `float64`.
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-2}]` and dtype `int32` or
`int64`. Each entry in `labels` must be an index in `[0, num_classes)`.
Other values will raise an exception when this op is run on CPU, and
return `NaN` for corresponding corresponding loss and gradient rows
on GPU.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `labels` and of the same type as `logits`
with the softmax cross entropy loss.
Raises:
ValueError: If logits are scalars (need to have rank >= 1) or if the rank
of the labels is not equal to the rank of the logits minus one.
"""
return nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name=name)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:48,代码来源:cross_entropy.py
示例10: _loss
def _loss(self, logits, target, weight_tensor):
if self._n_classes < 2:
loss_vec = math_ops.square(logits - math_ops.to_float(target))
elif self._n_classes == 2:
loss_vec = nn.sigmoid_cross_entropy_with_logits(logits, math_ops.to_float(target))
else:
loss_vec = nn.sparse_softmax_cross_entropy_with_logits(logits, array_ops.reshape(target, [-1]))
if weight_tensor is None:
return math_ops.reduce_mean(loss_vec, name="loss")
else:
loss_vec = array_ops.reshape(loss_vec, shape=(-1,))
loss_vec = math_ops.mul(loss_vec, array_ops.reshape(weight_tensor, shape=(-1,)))
return math_ops.div(
math_ops.reduce_sum(loss_vec), math_ops.to_float(math_ops.reduce_sum(weight_tensor)), name="loss"
)
开发者ID:ninotoshi,项目名称:tensorflow,代码行数:16,代码来源:dnn_linear_combined.py
示例11: sparse_softmax_cross_entropy
def sparse_softmax_cross_entropy(
labels, logits, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.WEIGHTED_SUM_BY_NONZERO_WEIGHTS):
"""Cross-entropy loss using `tf.nn.sparse_softmax_cross_entropy_with_logits`.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of shape [`batch_size`], then the loss weights apply to each
corresponding sample.
Args:
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of
`labels` and result) and dtype `int32` or `int64`. Each entry in `labels`
must be an index in `[0, num_classes)`. Other values will raise an
exception when this op is run on CPU, and return `NaN` for corresponding
loss and gradient rows on GPU.
logits: Unscaled log probabilities of shape
`[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float32` or `float64`.
weights: Coefficients for the loss. This must be scalar or of same rank as
`labels`
scope: the scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
A scalar `Tensor` that returns the weighted loss.
Raises:
ValueError: If the shapes of logits, labels, and weight are incompatible, or
if `weights` is None.
"""
with ops.name_scope(scope, "sparse_softmax_cross_entropy_loss",
(logits, labels, weights)) as scope:
# As documented above in Args, labels contain class IDs and logits contains
# 1 probability per class ID, so we expect rank(logits) - rank(labels) == 1;
# therefore, expected_rank_diff=1.
labels, logits, weights = _remove_squeezable_dimensions(
labels, logits, weights, expected_rank_diff=1)
losses = nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
logits=logits,
name="xentropy")
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
开发者ID:hailingc,项目名称:tensorflow,代码行数:44,代码来源:losses_impl.py
示例12: _loss_vec
def _loss_vec(self, logits, target):
if self._n_classes == 2:
# sigmoid_cross_entropy_with_logits requires [batch_size, 1] target.
if len(target.get_shape()) == 1:
target = array_ops.expand_dims(target, dim=[1])
loss_vec = nn.sigmoid_cross_entropy_with_logits(
logits, math_ops.to_float(target))
else:
# Check that we got int32/int64 for classification.
if (not target.dtype.is_compatible_with(dtypes.int64) and
not target.dtype.is_compatible_with(dtypes.int32)):
raise ValueError("Target's dtype should be int32, int64 or compatible. "
"Instead got %s." % target.dtype)
# sparse_softmax_cross_entropy_with_logits requires [batch_size] target.
if len(target.get_shape()) == 2:
target = array_ops.squeeze(target, squeeze_dims=[1])
loss_vec = nn.sparse_softmax_cross_entropy_with_logits(
logits, target)
return loss_vec
开发者ID:Ambier,项目名称:tensorflow,代码行数:19,代码来源:dnn_linear_combined.py
示例13: sparse_softmax_cross_entropy
def sparse_softmax_cross_entropy(labels, logits, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES):
"""Cross-entropy loss using `tf.nn.sparse_softmax_cross_entropy_with_logits`.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of shape [`batch_size`], then the loss weights apply to each
corresponding sample.
WARNING: `weights` also supports dimensions of 1, but the broadcasting does
not work as advertised, you'll wind up with weighted sum instead of weighted
mean for any but the last dimension. This will be cleaned up soon, so please
do not rely on the current behavior for anything but the shapes documented for
`weights` below.
Args:
labels: [batch_size, 1] or [batch_size] target labels of dtype `int32` or
`int64` in the range `[0, num_classes)`.
logits: [batch_size, num_classes] logits outputs of the network .
weights: Coefficients for the loss. This must be of shape `[batch_size]` or
`[batch_size, 1]`.
scope: the scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
Returns:
A scalar `Tensor` representing the mean loss value.
Raises:
ValueError: If the shapes of logits, labels, and weight are incompatible, or
if `weights` is None.
"""
with ops.name_scope(scope, "sparse_softmax_cross_entropy_loss",
[logits, labels, weights]) as scope:
labels = array_ops.reshape(labels, shape=[array_ops.shape(labels)[0]])
losses = nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
logits=logits,
name="xentropy")
# Reshape losses to [batch_size, 1] to be consistent with weights.
losses = array_ops.reshape(losses, shape=[array_ops.shape(losses)[0], 1])
return compute_weighted_loss(losses, weights, scope, loss_collection)
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:41,代码来源:losses_impl.py
注:本文中的tensorflow.python.ops.nn.sparse_softmax_cross_entropy_with_logits函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论