本文整理汇总了Python中tensorflow.python.ops.sparse_ops.sparse_to_dense函数的典型用法代码示例。如果您正苦于以下问题:Python sparse_to_dense函数的具体用法?Python sparse_to_dense怎么用?Python sparse_to_dense使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sparse_to_dense函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: testShapeInferenceKnownShape
def testShapeInferenceKnownShape(self):
with self.session(use_gpu=False):
indices = array_ops.placeholder(dtypes.int64)
shape = [4, 5, 6]
output = sparse_ops.sparse_to_dense(indices, shape, 1, 0)
self.assertEqual(output.get_shape(), [4, 5, 6])
shape = array_ops.placeholder(dtypes.int64, shape=(3,))
output = sparse_ops.sparse_to_dense(indices, shape, 1, 0)
self.assertEqual(output.get_shape().as_list(), [None, None, None])
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:11,代码来源:sparse_to_dense_op_py_test.py
示例2: testSparseExpandDims
def testSparseExpandDims(self):
for rank in range(1, 4):
# Create a dummy input. When rank=3, shape=[2, 4, 6].
shape = np.arange(1, rank + 1) * 2
before = np.arange(np.prod(shape)).reshape(shape)
# Make entries sparse.
before *= np.random.binomial(1, .2, before.shape)
dense_shape = before.shape
indices = np.array(np.where(before)).T
values = before[before != 0]
# Try every possible valid value of axis.
for axis in range(-rank - 1, rank):
expected_after = np.expand_dims(before, axis)
for axis_as_tensor in [False, True]:
dense_shape_t = constant_op.constant(dense_shape, dtype=dtypes.int64)
indices_t = constant_op.constant(indices)
values_t = constant_op.constant(values)
before_t = sparse_tensor.SparseTensor(
indices=indices_t, values=values_t, dense_shape=dense_shape_t)
if axis_as_tensor:
axis = constant_op.constant(axis)
s = sparse_ops.sparse_expand_dims(before_t, axis)
d = sparse_ops.sparse_to_dense(s.indices, s.dense_shape, s.values)
self.assertAllEqual(self.evaluate(d), expected_after)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:29,代码来源:sparse_ops_test.py
示例3: one_hot_mask
def one_hot_mask(labels, num_classes, scope=None):
"""Compute 1-hot encodings for masks.
Given a label image, this computes the one hot encoding at
each pixel.
Args:
labels: (batch_size, width, height, 1) tensor containing labels.
num_classes: number of classes
scope: optional scope name
Returns:
Tensor of shape (batch_size, width, height, num_classes) with
a 1-hot encoding.
"""
with ops.name_scope(scope, "OneHotMask", [labels]):
height, width, depth = _shape(labels)
assert depth == 1
sparse_labels = math_ops.to_int32(array_ops.reshape(labels, [-1, 1]))
sparse_size, _ = _shape(sparse_labels)
indices = array_ops.reshape(math_ops.range(0, sparse_size, 1), [-1, 1])
concated = array_ops.concat([indices, sparse_labels], 1)
dense_result = sparse_ops.sparse_to_dense(concated,
[sparse_size, num_classes], 1.0,
0.0)
result = array_ops.reshape(dense_result, [height, width, num_classes])
return result
开发者ID:1000sprites,项目名称:tensorflow,代码行数:27,代码来源:misc.py
示例4: _TopKGrad
def _TopKGrad(op, grad, _):
"""Return the gradients for TopK.
Args:
op: The TopKOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the TopKOp.
Returns:
A list of two tensors, the first being the gradient w.r.t to the input and
TopK, and the second being the gradient w.r.t. to the indices (all zero).
"""
in_shape = array_ops.shape(op.inputs[0])
ind_shape = array_ops.shape(op.outputs[1])
ind_lastdim = array_ops.gather(ind_shape, array_ops.size(ind_shape) - 1)
# Flatten indices to 2D.
ind_2d = array_ops.reshape(op.outputs[1], array_ops.stack([-1, ind_lastdim]))
in_lastdim = array_ops.gather(in_shape, array_ops.size(in_shape) - 1)
outerdim = array_ops.shape(ind_2d)[0]
# Compute linear indices (flattened to 1D).
ind = array_ops.reshape(ind_2d + array_ops.expand_dims(
math_ops.range(0, outerdim * in_lastdim, in_lastdim), -1), [-1])
# Substitute grad to appropriate locations and fill the rest with zeros,
# finally reshaping it to the original input shape.
return [array_ops.reshape(
sparse_ops.sparse_to_dense(ind,
array_ops.reshape(
math_ops.reduce_prod(in_shape), [1]),
array_ops.reshape(grad, [-1]),
validate_indices=False),
in_shape), array_ops.zeros(
[], dtype=dtypes.int32)]
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:34,代码来源:nn_grad.py
示例5: _check
def _check(self, result_tensor, result_np, input_sp_t):
self.assertAllEqual(input_sp_t.indices.eval(), result_tensor.indices.eval())
self.assertAllEqual(input_sp_t.shape.eval(), result_tensor.shape.eval())
res_densified = sparse_ops.sparse_to_dense(result_tensor.indices,
result_tensor.shape,
result_tensor.values).eval()
self.assertAllEqual(res_densified, result_np)
开发者ID:0-T-0,项目名称:tensorflow,代码行数:8,代码来源:sparse_ops_test.py
示例6: test_one
def test_one(n, m, as_tensors):
expected = np.eye(n, m)
if as_tensors:
m = constant_op.constant(m)
n = constant_op.constant(n)
s = sparse_ops.sparse_eye(n, m)
d = sparse_ops.sparse_to_dense(s.indices, s.dense_shape, s.values)
self.assertAllEqual(self.evaluate(d), expected)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:8,代码来源:sparse_ops_test.py
示例7: _check
def _check(self, result_tensor, result_np, input_sp_t):
self.assertTrue(isinstance(result_tensor, sparse_tensor.SparseTensor))
self.assertTrue(isinstance(input_sp_t, sparse_tensor.SparseTensor))
self.assertAllEqual(input_sp_t.indices, result_tensor.indices)
self.assertAllEqual(input_sp_t.dense_shape, result_tensor.dense_shape)
res_densified = sparse_ops.sparse_to_dense(
result_tensor.indices, result_tensor.dense_shape, result_tensor.values)
self.assertAllEqual(result_np, res_densified)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:9,代码来源:sparse_ops_test.py
示例8: sufficient_statistics
def sufficient_statistics(x, axes, shift=None, keep_dims=False, name=None):
"""Calculate the sufficient statistics for the mean and variance of `x`.
These sufficient statistics are computed using the one pass algorithm on
an input that's optionally shifted. See:
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and variance.
shift: A `Tensor` containing the value by which to shift the data for
numerical stability, or `None` if no shift is to be performed. A shift
close to the true mean provides the most numerically stable results.
keep_dims: produce statistics with the same dimensionality as the input.
name: Name used to scope the operations that compute the sufficient stats.
Returns:
Four `Tensor` objects of the same type as `x`:
* the count (number of elements to average over).
* the (possibly shifted) sum of the elements in the array.
* the (possibly shifted) sum of squares of the elements in the array.
* the shift by which the mean must be corrected or None if `shift` is None.
"""
with ops.op_scope([x, axes, shift], name, "sufficient_statistics"):
x = ops.convert_to_tensor(x, name="x")
x_shape = x.get_shape()
if x_shape.is_fully_defined():
counts = 1
m_shape = []
for d in xrange(x_shape.ndims):
dim = x_shape[d].value
if d in set(axes):
counts *= dim
dim = 1
m_shape.append(dim)
counts = constant_op.constant(counts, dtype=x.dtype)
else: # shape needs to be inferred at runtime.
x_shape = array_ops.shape(x)
select_axes = sparse_ops.sparse_to_dense(axes, array_ops.shape(x_shape),
True, False)
m_shape = math_ops.select(select_axes, array_ops.ones_like(x_shape),
x_shape)
counts = math_ops.cast(
math_ops.reduce_prod(x_shape / m_shape),
x.dtype,
name="count")
if shift is not None:
shift = ops.convert_to_tensor(shift, name="shift")
m_ss = math_ops.sub(x, shift)
v_ss = math_ops.squared_difference(x, shift)
else: # no shift.
m_ss = x
v_ss = math_ops.square(x)
m_ss = math_ops.reduce_sum(m_ss, axes, keep_dims=keep_dims, name="mean_ss")
v_ss = math_ops.reduce_sum(v_ss, axes, keep_dims=keep_dims, name="var_ss")
return counts, m_ss, v_ss, shift
开发者ID:285219011,项目名称:hello-world,代码行数:56,代码来源:nn.py
示例9: sequence_loss_by_example
def sequence_loss_by_example(logits, targets, weights, num_decoder_symbols,
average_across_timesteps=True,
softmax_loss_function=None, name=None):
"""Weighted cross-entropy loss for a sequence of logits (per example).
Args:
logits: list of 2D Tensors of shape [batch_size x num_decoder_symbols].
targets: list of 1D batch-sized int32 Tensors of the same length as logits.
weights: list of 1D batch-sized float-Tensors of the same length as logits.
num_decoder_symbols: integer, number of decoder symbols (output classes).
average_across_timesteps: If set, divide the returned cost by the total
label weight.
softmax_loss_function: function (inputs-batch, labels-batch) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
name: optional name for this operation, default: "sequence_loss_by_example".
Returns:
1D batch-sized float Tensor: the log-perplexity for each sequence.
Raises:
ValueError: if len(logits) is different from len(targets) or len(weights).
"""
if len(targets) != len(logits) or len(weights) != len(logits):
raise ValueError("Lengths of logits, weights, and targets must be the same "
"%d, %d, %d." % (len(logits), len(weights), len(targets)))
with ops.op_scope(logits + targets + weights, name,
"sequence_loss_by_example"):
batch_size = array_ops.shape(targets[0])[0]
log_perp_list = []
length = batch_size * num_decoder_symbols
for i in xrange(len(logits)):
if softmax_loss_function is None:
# TODO(lukaszkaiser): There is no SparseCrossEntropy in TensorFlow, so
# we need to first cast targets into a dense representation, and as
# SparseToDense does not accept batched inputs, we need to do this by
# re-indexing and re-sizing. When TensorFlow adds SparseCrossEntropy,
# rewrite this method.
indices = targets[i] + num_decoder_symbols * math_ops.range(batch_size)
with ops.device("/cpu:0"): # Sparse-to-dense must be on CPU for now.
dense = sparse_ops.sparse_to_dense(
indices, array_ops.expand_dims(length, 0), 1.0,
0.0)
target = array_ops.reshape(dense, [-1, num_decoder_symbols])
crossent = nn_ops.softmax_cross_entropy_with_logits(
logits[i], target, name="SequenceLoss/CrossEntropy{0}".format(i))
else:
crossent = softmax_loss_function(logits[i], targets[i])
log_perp_list.append(crossent * weights[i])
log_perps = math_ops.add_n(log_perp_list)
if average_across_timesteps:
total_size = math_ops.add_n(weights)
total_size += 1e-12 # Just to avoid division by 0 for all-0 weights.
log_perps /= total_size
return log_perps
开发者ID:hlt-mt,项目名称:tensorflow,代码行数:54,代码来源:seq2seq.py
示例10: _SparseToDense
def _SparseToDense(sparse_indices,
output_size,
sparse_values,
default_value,
validate_indices=True):
return sparse_ops.sparse_to_dense(
sparse_indices,
output_size,
sparse_values,
default_value=default_value,
validate_indices=validate_indices)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:11,代码来源:sparse_to_dense_op_py_test.py
示例11: _SparseToDense
def _SparseToDense(sparse_indices,
output_size,
sparse_values,
default_value,
validate_indices=True):
feed_sparse_indices = array_ops.placeholder(dtypes.int32)
feed_dict = {feed_sparse_indices: sparse_indices}
return sparse_ops.sparse_to_dense(
feed_sparse_indices,
output_size,
sparse_values,
default_value=default_value,
validate_indices=validate_indices).eval(feed_dict=feed_dict)
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:13,代码来源:sparse_to_dense_op_test.py
示例12: _sparse_vs_dense_xent_benchmark_dense
def _sparse_vs_dense_xent_benchmark_dense(labels, logits):
labels = tf.identity(labels)
logits = tf.identity(logits)
with tf.device("/cpu:0"): # Sparse-to-dense must be on CPU
batch_size = tf.shape(logits)[0]
num_entries = tf.shape(logits)[1]
length = batch_size * num_entries
labels += num_entries * tf.range(batch_size)
target = sparse_ops.sparse_to_dense(labels, tf.pack([length]), 1.0, 0.0)
target = tf.reshape(target, tf.pack([-1, num_entries]))
crossent = tf.nn.softmax_cross_entropy_with_logits(logits, target, name="SequenceLoss/CrossEntropy")
crossent_sum = tf.reduce_sum(crossent)
grads = tf.gradients([crossent_sum], [logits])[0]
return (crossent_sum, grads)
开发者ID:pronobis,项目名称:tensorflow,代码行数:15,代码来源:sparse_xent_op_test.py
示例13: _apply_transform
def _apply_transform(self, input_tensors):
"""Applies the transformation to the `transform_input`.
Args:
input_tensors: a list of Tensors representing the input to
the Transform.
Returns:
A namedtuple of Tensors representing the transformed output.
"""
s = input_tensors[0]
# pylint: disable=not-callable
return self.return_type(sparse_ops.sparse_to_dense(
s.indices, s.shape, s.values, default_value=self.default_value))
开发者ID:Aruhs,项目名称:tensorflow,代码行数:15,代码来源:densify.py
示例14: _zero_out_float_grad
def _zero_out_float_grad(op, grad):
"""The gradients for `zero_out_float`.
Args:
op: The `zero_out_float` `Operation` that we are differentiating, which we can use
to find the inputs and outputs of the original op.
grad: Gradient with respect to the output of the `zero_out_float` op.
Returns:
Gradients with respect to the input of `zero_out_float`.
"""
to_zero = op.inputs[0]
shape = array_ops.shape(to_zero)
index = array_ops.zeros_like(shape)
first_grad = array_ops.reshape(grad, [-1])[0]
to_zero_grad = sparse_ops.sparse_to_dense([index], shape, first_grad, 0)
return [to_zero_grad] # List of one Tensor, since we have one input
开发者ID:niurouli,项目名称:SWEM,代码行数:17,代码来源:py_zero_out.py
示例15: _compute_sampled_logits
#.........这里部分代码省略.........
labels = math_ops.cast(labels, dtypes.int64)
labels_flat = array_ops.reshape(labels, [-1])
# Sample the negative labels.
# sampled shape: [num_sampled] tensor
# true_expected_count shape = [batch_size, 1] tensor
# sampled_expected_count shape = [num_sampled] tensor
if sampled_values is None:
sampled_values = candidate_sampling_ops.log_uniform_candidate_sampler(
true_classes=labels,
num_true=num_true,
num_sampled=num_sampled,
unique=True,
range_max=num_classes)
# NOTE: pylint cannot tell that 'sampled_values' is a sequence
# pylint: disable=unpacking-non-sequence
sampled, true_expected_count, sampled_expected_count = sampled_values
# pylint: enable=unpacking-non-sequence
# labels_flat is a [batch_size * num_true] tensor
# sampled is a [num_sampled] int tensor
all_ids = array_ops.concat(0, [labels_flat, sampled])
# weights shape is [num_classes, dim]
all_w = embedding_ops.embedding_lookup(
weights, all_ids, partition_strategy=partition_strategy)
all_b = embedding_ops.embedding_lookup(biases, all_ids)
# true_w shape is [batch_size * num_true, dim]
# true_b is a [batch_size * num_true] tensor
true_w = array_ops.slice(
all_w, [0, 0], array_ops.pack([array_ops.shape(labels_flat)[0], -1]))
true_b = array_ops.slice(all_b, [0], array_ops.shape(labels_flat))
# inputs shape is [batch_size, dim]
# true_w shape is [batch_size * num_true, dim]
# row_wise_dots is [batch_size, num_true, dim]
dim = array_ops.shape(true_w)[1:2]
new_true_w_shape = array_ops.concat(0, [[-1, num_true], dim])
row_wise_dots = math_ops.mul(
array_ops.expand_dims(inputs, 1),
array_ops.reshape(true_w, new_true_w_shape))
# We want the row-wise dot plus biases which yields a
# [batch_size, num_true] tensor of true_logits.
dots_as_matrix = array_ops.reshape(row_wise_dots,
array_ops.concat(0, [[-1], dim]))
true_logits = array_ops.reshape(_sum_rows(dots_as_matrix), [-1, num_true])
true_b = array_ops.reshape(true_b, [-1, num_true])
true_logits += true_b
# Lookup weights and biases for sampled labels.
# sampled_w shape is [num_sampled, dim]
# sampled_b is a [num_sampled] float tensor
sampled_w = array_ops.slice(
all_w, array_ops.pack([array_ops.shape(labels_flat)[0], 0]), [-1, -1])
sampled_b = array_ops.slice(all_b, array_ops.shape(labels_flat), [-1])
# inputs has shape [batch_size, dim]
# sampled_w has shape [num_sampled, dim]
# sampled_b has shape [num_sampled]
# Apply X*W'+B, which yields [batch_size, num_sampled]
sampled_logits = math_ops.matmul(inputs,
sampled_w,
transpose_b=True) + sampled_b
if remove_accidental_hits:
acc_hits = candidate_sampling_ops.compute_accidental_hits(
labels, sampled, num_true=num_true)
acc_indices, acc_ids, acc_weights = acc_hits
# This is how SparseToDense expects the indices.
acc_indices_2d = array_ops.reshape(acc_indices, [-1, 1])
acc_ids_2d_int32 = array_ops.reshape(math_ops.cast(
acc_ids, dtypes.int32), [-1, 1])
sparse_indices = array_ops.concat(
1, [acc_indices_2d, acc_ids_2d_int32], "sparse_indices")
# Create sampled_logits_shape = [batch_size, num_sampled]
sampled_logits_shape = array_ops.concat(
0,
[array_ops.shape(labels)[:1], array_ops.expand_dims(num_sampled, 0)])
if sampled_logits.dtype != acc_weights.dtype:
acc_weights = math_ops.cast(acc_weights, sampled_logits.dtype)
sampled_logits += sparse_ops.sparse_to_dense(
sparse_indices, sampled_logits_shape, acc_weights,
default_value=0.0, validate_indices=False)
if subtract_log_q:
# Subtract log of Q(l), prior probability that l appears in sampled.
true_logits -= math_ops.log(true_expected_count)
sampled_logits -= math_ops.log(sampled_expected_count)
# Construct output logits and labels. The true labels/logits start at col 0.
out_logits = array_ops.concat(1, [true_logits, sampled_logits])
# true_logits is a float tensor, ones_like(true_logits) is a float tensor
# of ones. We then divide by num_true to ensure the per-example labels sum
# to 1.0, i.e. form a proper probability distribution.
out_labels = array_ops.concat(
1, [array_ops.ones_like(true_logits) / num_true,
array_ops.zeros_like(sampled_logits)])
return out_logits, out_labels
开发者ID:BersaKAIN,项目名称:tensorflow,代码行数:101,代码来源:nn.py
示例16: _flat_map_fn
def _flat_map_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values))
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:3,代码来源:flat_map_dataset_op_test.py
示例17: sufficient_statistics
def sufficient_statistics(x, axes, shift=False, keep_dims=False, name=None):
"""Calculate the sufficient statistics for the mean and variance of `x`.
These sufficient statistics are computed using the one pass algorithm on
an input that's optionally shifted using the value of the 1st element in `x`.
See:
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data
Unfortunately, in some cases using a random individual sample as the shift
value leads experimentally to very poor numerical stability, so it is disabled
by default. The one-pass approach might have to be revised accordingly.
Args:
x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and variance.
shift: If true, shift the data to provide more numerically stable results.
keep_dims: produce statistics with the same dimensionality as the input.
name: Name used to scope the operations that compute the sufficient stats.
Returns:
Four `Tensor` objects of the same type as `x`:
* the count (number of elements to average over).
* the (possibly shifted) sum of the elements in the array.
* the (possibly shifted) sum of squares of the elements in the array.
* the shift by which the mean must be corrected or None if `shift` is False.
"""
with ops.op_scope([x, axes], name, "sufficient_statistics"):
x = ops.convert_to_tensor(x, name="x")
x_shape = x.get_shape()
if x_shape.is_fully_defined():
counts = 1
m_shape = []
for d in xrange(x_shape.ndims):
dim = x_shape[d].value
if d in set(axes):
counts *= dim
dim = 1
m_shape.append(dim)
counts = constant_op.constant(counts, dtype=x.dtype)
else: # shape needs to be inferred at runtime.
x_shape = array_ops.shape(x)
select_axes = sparse_ops.sparse_to_dense(axes, array_ops.shape(x_shape),
True, False)
m_shape = math_ops.select(select_axes, array_ops.ones_like(x_shape),
x_shape)
counts = math_ops.cast(
math_ops.reduce_prod(x_shape / m_shape),
x.dtype,
name="count")
if shift:
shift_value = array_ops.slice(x, array_ops.zeros_like(m_shape), m_shape)
m_ss = math_ops.sub(x, shift_value)
v_ss = math_ops.squared_difference(x, shift_value)
if keep_dims:
shift_value = array_ops.identity(shift_value, name="shift")
else:
shift_value = array_ops.squeeze(shift_value,
squeeze_dims=axes,
name="shift")
else: # not shift.
m_ss = x
v_ss = math_ops.square(x)
shift_value = None
m_ss = math_ops.reduce_sum(m_ss, axes, keep_dims=keep_dims, name="mean_ss")
v_ss = math_ops.reduce_sum(v_ss, axes, keep_dims=keep_dims, name="var_ss")
return counts, m_ss, v_ss, shift_value
开发者ID:01-,项目名称:tensorflow,代码行数:65,代码来源:nn.py
示例18: _compute_sampled_logits
#.........这里部分代码省略.........
Returns:
out_logits, out_labels: tensors with shape [batch_size, num_true +
num_sampled] for passing to either SigmoidCrossEntropyWithLogits (NCE)
or SoftmaxCrossEntropyWithLogits (sampled softmax).
"""
with ops.op_scope(
[weights, biases, inputs, labels], name, "compute_sampled_logits"):
if labels.dtype != types.int64:
labels = math_ops.cast(labels, types.int64)
labels_flat = array_ops.reshape(labels, [-1])
# Sample the negative labels.
# sampled shape: num_sampled vector
# true_expected_count shape = [batch_size, 1]
# sampled_expected_count shape = num_sampled vector
if sampled_values is None:
sampled_values = candidate_sampling_ops.log_uniform_candidate_sampler(
true_classes=labels,
num_true=num_true,
num_sampled=num_sampled,
unique=True,
range_max=num_classes)
# NOTE: pylint cannot tell that 'sampled_values' is a sequence
# pylint: disable=unpacking-non-sequence
sampled, true_expected_count, sampled_expected_count = sampled_values
# pylint: enable=unpacking-non-sequence
# weights shape is [num_classes, dim]
# labels_flat is a [batch_size * num_true] vector
# true_w shape is [batch_size * num_true, dim]
# true_b is a [batch_size * num_true] vector
true_w = embedding_ops.embedding_lookup(weights, labels_flat)
true_b = embedding_ops.embedding_lookup(biases, labels_flat)
# inputs shape is [batch_size, dim]
# true_w shape is [batch_size * num_true, dim]
# row_wise_dots is [batch_size, num_true, dim]
dim = array_ops.shape(true_w)[1:2]
new_true_w_shape = array_ops.concat(0, [[-1, num_true], dim])
row_wise_dots = math_ops.mul(
array_ops.expand_dims(inputs, 1),
array_ops.reshape(true_w, new_true_w_shape))
# We want the row-wise dot plus biases which yields a
# [batch_size, num_true] tensor of true_logits.
dots_as_matrix = array_ops.reshape(row_wise_dots,
array_ops.concat(0, [[-1], dim]))
true_logits = array_ops.reshape(_sum_rows(dots_as_matrix), [-1, num_true])
true_b = array_ops.reshape(true_b, [-1, num_true])
true_logits += true_b
# Lookup weights and biases for sampled labels.
# sampled is a num_sampled int vector
# sampled_w shape is [num_sampled, dim]
# sampled_b is a num_sampled float vector
sampled_w = embedding_ops.embedding_lookup(weights, sampled)
sampled_b = embedding_ops.embedding_lookup(biases, sampled)
# inputs has shape [batch_size, dim]
# sampled_w has shape [num_sampled, dim]
# sampled_b has shape [num_sampled]
# Apply X*W'+B, which yields [batch_size, num_sampled]
sampled_logits = math_ops.matmul(inputs,
sampled_w,
transpose_b=True) + sampled_b
if remove_accidental_hits:
acc_hits = candidate_sampling_ops.compute_accidental_hits(
labels, sampled, num_true=num_true)
acc_indices, acc_ids, acc_weights = acc_hits
# This is how SparseToDense expects the indices.
acc_indices_2d = array_ops.reshape(acc_indices, [-1, 1])
acc_ids_2d_int32 = array_ops.reshape(math_ops.cast(
acc_ids, types.int32), [-1, 1])
sparse_indices = array_ops.concat(
1, [acc_indices_2d, acc_ids_2d_int32], "sparse_indices")
# Create sampled_logits_shape = [batch_size, num_sampled]
sampled_logits_shape = array_ops.concat(
0,
[array_ops.shape(labels)[:1], array_ops.expand_dims(num_sampled, 0)])
sampled_logits += sparse_ops.sparse_to_dense(
sparse_indices, sampled_logits_shape, acc_weights, 0.0)
if subtract_log_q:
# Subtract log of Q(l), prior probability that l appears in sampled.
true_logits -= math_ops.log(true_expected_count)
sampled_logits -= math_ops.log(sampled_expected_count)
# Construct output logits and labels. The true labels/logits start at col 0.
out_logits = array_ops.concat(1, [true_logits, sampled_logits])
# true_logits is a float tensor, ones_like(true_logits) is a float tensor
# of ones. We then divide by num_true to ensure the per-example labels sum
# to 1.0, i.e. form a proper probability distribution.
out_labels = array_ops.concat(
1, [array_ops.ones_like(true_logits) / num_true,
array_ops.zeros_like(sampled_logits)])
return out_logits, out_labels
开发者ID:adeelzaman,项目名称:tensorflow,代码行数:101,代码来源:nn.py
示例19: testZeroDefault
def testZeroDefault(self):
with self.cached_session():
x = sparse_ops.sparse_to_dense(2, [4], 7).eval()
self.assertAllEqual(x, [0, 0, 7, 0])
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:4,代码来源:sparse_to_dense_op_py_test.py
示例20: testShapeInferenceUnknownShape
def testShapeInferenceUnknownShape(self):
with self.session(use_gpu=False):
indices = array_ops.placeholder(dtypes.int64)
shape = array_ops.placeholder(dtypes.int64)
output = sparse_ops.sparse_to_dense(indices, shape, 1, 0)
self.assertEqual(output.get_shape().ndims, None)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:6,代码来源:sparse_to_dense_op_py_test.py
注:本文中的tensorflow.python.ops.sparse_ops.sparse_to_dense函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论