本文整理汇总了Python中tensorflow.python.ops.math_ops.logical_not函数的典型用法代码示例。如果您正苦于以下问题:Python logical_not函数的具体用法?Python logical_not怎么用?Python logical_not使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了logical_not函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _apply_transform
def _apply_transform(self, input_tensors, **kwargs):
"""Applies the transformation to the `transform_input`.
Args:
input_tensors: a list of Tensors representing the input to
the Transform.
**kwargs: Additional keyword arguments, unused here.
Returns:
A namedtuple of Tensors representing the transformed output.
"""
d = input_tensors[0]
if self.strip_value is np.nan:
strip_hot = math_ops.is_nan(d)
else:
strip_hot = math_ops.equal(d,
array_ops.constant([self.strip_value],
dtype=d.dtype))
keep_hot = math_ops.logical_not(strip_hot)
length = array_ops.reshape(array_ops.shape(d), [])
indices = array_ops.boolean_mask(math_ops.range(length), keep_hot)
values = array_ops.boolean_mask(d, keep_hot)
sparse_indices = array_ops.reshape(
math_ops.cast(indices, dtypes.int64), [-1, 1])
shape = math_ops.cast(array_ops.shape(d), dtypes.int64)
# pylint: disable=not-callable
return self.return_type(ops.SparseTensor(sparse_indices, values, shape))
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:31,代码来源:sparsify.py
示例2: report_uninitialized_variables
def report_uninitialized_variables(var_list=None, name="report_uninitialized_variables"):
"""Adds ops to list the names of uninitialized variables.
When run, it returns a 1-D tensor containing the names of uninitialized
variables if there are any, or an empty array if there are none.
Args:
var_list: List of `Variable` objects to check. Defaults to the
value of `all_variables() + local_variables()`
name: Optional name of the `Operation`.
Returns:
A 1-D tensor containing names of the unintialized variables, or an empty 1-D
tensor if there are no variables or no uninitialized variables.
"""
if var_list is None:
var_list = all_variables() + local_variables()
# Backwards compatibility for old-style variables. TODO(touts): remove.
if not var_list:
var_list = []
for op in ops.get_default_graph().get_operations():
if op.type in ["Variable", "AutoReloadVariable"]:
var_list.append(op.outputs[0])
if not var_list:
# Return an empty tensor so we only need to check for returned tensor
# size being 0 as an indication of model ready.
return array_ops.constant([], dtype=dtypes.string, name=name)
else:
# Get a 1-D boolean tensor listing whether each variable is initialized.
variables_mask = math_ops.logical_not(array_ops.pack([state_ops.is_variable_initialized(v) for v in var_list]))
# Get a 1-D string tensor containing all the variable names.
variable_names_tensor = array_ops.constant([s.op.name for s in var_list])
# Return a 1-D tensor containing all the names of uninitialized variables.
return array_ops.boolean_mask(variable_names_tensor, variables_mask, name=name)
开发者ID:RuhiSharma,项目名称:tensorflow,代码行数:34,代码来源:variables.py
示例3: apply_attention_scores
def apply_attention_scores(self, scores, value, value_mask=None):
"""Applies attention scores to the given value tensor.
To use this method in your attention layer, follow the steps:
* Use `query` tensor of shape `[batch_size, Tq]` and `key` tensor of shape
`[batch_size, Tv]` to calculate the attention `scores`.
* Pass `scores` and `value` tensors to this method. The method applies
`value_mask`, calculates `attention_distribution = softmax(scores)`, then
returns `matmul(attention_distribution, value).
* Apply `query_mask` and return the result.
Args:
scores: Scores float tensor of shape `[batch_size, Tq, Tv]`.
value: Value tensor of shape `[batch_size, Tv, dim]`.
value_mask: A boolean mask `Tensor` of shape `[batch_size, Tv]`.
If given, will apply the mask such that values at positions where
`mask==False` do not contribute to the result.
Returns:
Tensor of shape `[batch_size, Tq, dim]`.
"""
if value_mask is not None:
# Mask of shape [batch_size, 1, Tv] that is True in padding position.
padding_mask = array_ops.expand_dims(
math_ops.logical_not(value_mask), axis=1)
# Bias so padding positions do not contribute to attention distribution.
scores -= 1.e9 * math_ops.cast(padding_mask, dtype=K.floatx())
attention_distribution = nn.softmax(scores)
return math_ops.matmul(attention_distribution, value)
开发者ID:kylin9872,项目名称:tensorflow,代码行数:30,代码来源:dense_attention.py
示例4: report_uninitialized_resources
def report_uninitialized_resources(resource_list=None,
name="report_uninitialized_resources"):
"""Returns the names of all uninitialized resources in resource_list.
If the returned tensor is empty then all resources have been initialized.
Args:
resource_list: resources to check. If None, will use shared_resources() +
local_resources().
name: name for the resource-checking op.
Returns:
Tensor containing names of the handles of all resources which have not
yet been initialized.
"""
if resource_list is None:
resource_list = shared_resources() + local_resources()
with ops.name_scope(name):
# Run all operations on CPU
with ops.device("/cpu:0"):
if not resource_list:
# Return an empty tensor so we only need to check for returned tensor
# size being 0 as an indication of model ready.
return array_ops.constant([], dtype=dtypes.string)
# Get a 1-D boolean tensor listing whether each resource is initialized.
variables_mask = math_ops.logical_not(
array_ops.stack([r.is_initialized for r in resource_list]))
# Get a 1-D string tensor containing all the resource names.
variable_names_tensor = array_ops.constant(
[s.handle.name for s in resource_list])
# Return a 1-D tensor containing all the names of uninitialized resources.
return array_ops.boolean_mask(variable_names_tensor, variables_mask)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:33,代码来源:resources.py
示例5: body
def body(time, outputs_ta, state, inputs, finished, sequence_lengths):
"""Internal while_loop body.
Args:
time: scalar int32 tensor.
outputs_ta: structure of TensorArray.
state: (structure of) state tensors and TensorArrays.
inputs: (structure of) input tensors.
finished: bool tensor (keeping track of what's finished).
sequence_lengths: int32 tensor (keeping track of time of finish).
Returns:
`(time + 1, outputs_ta, next_state, next_inputs, next_finished,
next_sequence_lengths)`.
```
"""
(next_outputs, decoder_state, next_inputs,
decoder_finished) = decoder.step(time, inputs, state)
next_finished = math_ops.logical_or(decoder_finished, finished)
if maximum_iterations is not None:
next_finished = math_ops.logical_or(
next_finished, time + 1 >= maximum_iterations)
next_sequence_lengths = array_ops.where(
math_ops.logical_and(math_ops.logical_not(finished), next_finished),
array_ops.fill(array_ops.shape(sequence_lengths), time + 1),
sequence_lengths)
nest.assert_same_structure(state, decoder_state)
nest.assert_same_structure(outputs_ta, next_outputs)
nest.assert_same_structure(inputs, next_inputs)
# Zero out output values past finish
if impute_finished:
emit = nest.map_structure(
lambda out, zero: array_ops.where(finished, zero, out),
next_outputs,
zero_outputs)
else:
emit = next_outputs
# Copy through states past finish
def _maybe_copy_state(new, cur):
# TensorArrays and scalar states get passed through.
if isinstance(cur, tensor_array_ops.TensorArray):
pass_through = True
else:
new.set_shape(cur.shape)
pass_through = (new.shape.ndims == 0)
return new if pass_through else array_ops.where(finished, cur, new)
if impute_finished:
next_state = nest.map_structure(
_maybe_copy_state, decoder_state, state)
else:
next_state = decoder_state
outputs_ta = nest.map_structure(lambda ta, out: ta.write(time, out),
outputs_ta, emit)
return (time + 1, outputs_ta, next_state, next_inputs, next_finished,
next_sequence_lengths)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:60,代码来源:decoder.py
示例6: _mask_probs
def _mask_probs(probs, eos_token, finished):
"""Masks log probabilities.
The result is that finished beams allocate all probability mass to eos and
unfinished beams remain unchanged.
Args:
probs: Log probabiltiies of shape `[batch_size, beam_width, vocab_size]`
eos_token: An int32 id corresponding to the EOS token to allocate
probability to.
finished: A boolean tensor of shape `[batch_size, beam_width]` that
specifies which elements in the beam are finished already.
Returns:
A tensor of shape `[batch_size, beam_width, vocab_size]`, where unfinished
beams stay unchanged and finished beams are replaced with a tensor with all
probability on the EOS token.
"""
vocab_size = array_ops.shape(probs)[2]
finished_mask = math_ops.cast(array_ops.expand_dims(finished, 2), probs.dtype)
not_finished_mask = math_ops.cast(
array_ops.expand_dims(math_ops.logical_not(finished), 2),
probs.dtype)
# These examples are not finished and we leave them
non_finished_examples = not_finished_mask * probs
# All finished examples are replaced with a vector that has all
# probability on EOS
finished_row = array_ops.one_hot(
eos_token,
vocab_size,
dtype=probs.dtype,
on_value=0.,
off_value=probs.dtype.min)
finished_examples = finished_mask * finished_row
return finished_examples + non_finished_examples
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:35,代码来源:beam_search_decoder.py
示例7: _apply_scores
def _apply_scores(self, scores, value, scores_mask=None):
"""Applies attention scores to the given value tensor.
To use this method in your attention layer, follow the steps:
* Use `query` tensor of shape `[batch_size, Tq]` and `key` tensor of shape
`[batch_size, Tv]` to calculate the attention `scores`.
* Pass `scores` and `value` tensors to this method. The method applies
`scores_mask`, calculates `attention_distribution = softmax(scores)`, then
returns `matmul(attention_distribution, value).
* Apply `query_mask` and return the result.
Args:
scores: Scores float tensor of shape `[batch_size, Tq, Tv]`.
value: Value tensor of shape `[batch_size, Tv, dim]`.
scores_mask: A boolean mask `Tensor` of shape `[batch_size, 1, Tv]` or
`[batch_size, Tq, Tv]`. If given, scores at positions where
`scores_mask==False` do not contribute to the result. It must contain
at least one `True` value in each line along the last dimension.
Returns:
Tensor of shape `[batch_size, Tq, dim]`.
"""
if scores_mask is not None:
padding_mask = math_ops.logical_not(scores_mask)
# Bias so padding positions do not contribute to attention distribution.
scores -= 1.e9 * math_ops.cast(padding_mask, dtype=K.floatx())
attention_distribution = nn.softmax(scores)
return math_ops.matmul(attention_distribution, value)
开发者ID:aritratony,项目名称:tensorflow,代码行数:29,代码来源:dense_attention.py
示例8: _not
def _not(self, x, use_gpu=False):
np_ans = np.logical_not(x)
with test_util.device(use_gpu=use_gpu):
out = math_ops.logical_not(ops.convert_to_tensor(x))
tf_val = self.evaluate(out)
self.assertEqual(out.dtype, dtypes_lib.bool)
self.assertAllEqual(np_ans, tf_val)
self.assertShapeEqual(np_ans, out)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:8,代码来源:cwise_ops_test.py
示例9: next_inputs
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledOutputTrainingHelperNextInputs",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledOutputTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
sample_ids = math_ops.cast(sample_ids, dtypes.bool)
def maybe_sample():
"""Perform scheduled sampling."""
def maybe_concatenate_auxiliary_inputs(outputs_, indices=None):
"""Concatenate outputs with auxiliary inputs, if they exist."""
if self._auxiliary_input_tas is None:
return outputs_
next_time = time + 1
auxiliary_inputs = nest.map_structure(
lambda ta: ta.read(next_time), self._auxiliary_input_tas)
if indices is not None:
auxiliary_inputs = array_ops.gather_nd(auxiliary_inputs, indices)
return nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1),
outputs_, auxiliary_inputs)
if self._next_inputs_fn is None:
return array_ops.where(
sample_ids, maybe_concatenate_auxiliary_inputs(outputs),
base_next_inputs)
where_sampling = math_ops.cast(
array_ops.where(sample_ids), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(math_ops.logical_not(sample_ids)), dtypes.int32)
outputs_sampling = array_ops.gather_nd(outputs, where_sampling)
inputs_not_sampling = array_ops.gather_nd(base_next_inputs,
where_not_sampling)
sampled_next_inputs = maybe_concatenate_auxiliary_inputs(
self._next_inputs_fn(outputs_sampling), where_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
no_samples = math_ops.logical_not(math_ops.reduce_any(sample_ids))
next_inputs = control_flow_ops.cond(
math_ops.logical_or(all_finished, no_samples),
lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
开发者ID:AnddyWang,项目名称:tensorflow,代码行数:58,代码来源:helper.py
示例10: kl_divergence
def kl_divergence(distribution_a, distribution_b,
allow_nan_stats=True, name=None):
"""Get the KL-divergence KL(distribution_a || distribution_b).
If there is no KL method registered specifically for `type(distribution_a)`
and `type(distribution_b)`, then the class hierarchies of these types are
searched.
If one KL method is registered between any pairs of classes in these two
parent hierarchies, it is used.
If more than one such registered method exists, the method whose registered
classes have the shortest sum MRO paths to the input types is used.
If more than one such shortest path exists, the first method
identified in the search is used (favoring a shorter MRO distance to
`type(distribution_a)`).
Args:
distribution_a: The first distribution.
distribution_b: The second distribution.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Returns:
A Tensor with the batchwise KL-divergence between `distribution_a`
and `distribution_b`.
Raises:
NotImplementedError: If no KL method is defined for distribution types
of `distribution_a` and `distribution_b`.
"""
kl_fn = _registered_kl(type(distribution_a), type(distribution_b))
if kl_fn is None:
raise NotImplementedError(
"No KL(distribution_a || distribution_b) registered for distribution_a "
"type %s and distribution_b type %s"
% (type(distribution_a).__name__, type(distribution_b).__name__))
with ops.name_scope("KullbackLeibler"):
kl_t = kl_fn(distribution_a, distribution_b, name=name)
if allow_nan_stats:
return kl_t
# Check KL for NaNs
kl_t = array_ops.identity(kl_t, name="kl")
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.logical_not(
math_ops.reduce_any(math_ops.is_nan(kl_t))),
["KL calculation between %s and %s returned NaN values "
"(and was called with allow_nan_stats=False). Values:"
% (distribution_a.name, distribution_b.name), kl_t])]):
return array_ops.identity(kl_t, name="checked_kl")
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:58,代码来源:kullback_leibler.py
示例11: _not
def _not(self, x, use_gpu=False):
np_ans = np.logical_not(x)
with self.test_session(use_gpu=use_gpu,
force_gpu=use_gpu and test_util.is_gpu_available()):
out = math_ops.logical_not(ops.convert_to_tensor(x))
tf_val = self.evaluate(out)
self.assertEqual(out.dtype, dtypes_lib.bool)
self.assertAllEqual(np_ans, tf_val)
self.assertShapeEqual(np_ans, out)
开发者ID:bunbutter,项目名称:tensorflow,代码行数:9,代码来源:cwise_ops_test.py
示例12: _assert_non_singular
def _assert_non_singular(self):
if self.dtype.is_complex:
should_be_nonzero = math_ops.complex_abs(self._diag)
else:
should_be_nonzero = self._diag
nonzero_diag = math_ops.reduce_all(
math_ops.logical_not(math_ops.equal(should_be_nonzero, 0)))
return control_flow_ops.Assert(
nonzero_diag,
data=["Singular operator: diag contained zero values.", self._diag])
开发者ID:RapidApplicationDevelopment,项目名称:tensorflow,代码行数:12,代码来源:linear_operator_diag.py
示例13: kl
def kl(dist_a, dist_b, allow_nan=False, name=None):
"""Get the KL-divergence KL(dist_a || dist_b).
If there is no KL method registered specifically for `type(dist_a)` and
`type(dist_b)`, then the class hierarchies of these types are searched.
If one KL method is registered between any pairs of classes in these two
parent hierarchies, it is used.
If more than one such registered method exists, the method whose registered
classes have the shortest sum MRO paths to the input types is used.
If more than one such shortest path exists, the first method
identified in the search is used (favoring a shorter MRO distance to
`type(dist_a)`).
Args:
dist_a: The first distribution.
dist_b: The second distribution.
allow_nan: If `False` (default), a runtime error is raised
if the KL returns NaN values for any batch entry of the given
distributions. If `True`, the KL may return a NaN for the given entry.
name: (optional) Name scope to use for created operations.
Returns:
A Tensor with the batchwise KL-divergence between dist_a and dist_b.
Raises:
NotImplementedError: If no KL method is defined for distribution types
of dist_a and dist_b.
"""
kl_fn = _registered_kl(type(dist_a), type(dist_b))
if kl_fn is None:
raise NotImplementedError(
"No KL(dist_a || dist_b) registered for dist_a type %s and dist_b "
"type %s" % ((type(dist_a).__name__, type(dist_b).__name__)))
with ops.name_scope("KullbackLeibler"):
kl_t = kl_fn(dist_a, dist_b, name=name)
if allow_nan:
return kl_t
# Check KL for NaNs
kl_t = array_ops.identity(kl_t, name="kl")
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.logical_not(
math_ops.reduce_any(math_ops.is_nan(kl_t))),
["KL calculation between %s and %s returned NaN values "
"(and was called with allow_nan=False). Values:"
% (dist_a.name, dist_b.name), kl_t])]):
return array_ops.identity(kl_t, name="checked_kl")
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:53,代码来源:kullback_leibler.py
示例14: kl
def kl(dist_a, dist_b, allow_nan=False, name=None):
"""Get the KL-divergence KL(dist_a || dist_b).
Args:
dist_a: instance of distributions.Distribution.
dist_b: instance of distributions.Distribution.
allow_nan: If False (default), a runtime error is raised
if the KL returns NaN values for any batch entry of the given
distributions. If True, the KL may return a NaN for the given entry.
name: (optional) Name scope to use for created operations.
Returns:
A Tensor with the batchwise KL-divergence between dist_a and dist_b.
Raises:
TypeError: If dist_a or dist_b is not an instance of Distribution.
NotImplementedError: If no KL method is defined for distribution types
of dist_a and dist_b.
"""
if not isinstance(dist_a, distribution.Distribution):
raise TypeError("dist_a is not an instance of Distribution, received type: %s" % type(dist_a))
if not isinstance(dist_b, distribution.Distribution):
raise TypeError("dist_b is not an instance of Distribution, received type: %s" % type(dist_b))
kl_fn = _DIVERGENCES.get((type(dist_a), type(dist_b)), None)
if kl_fn is None:
raise NotImplementedError(
"No KL(dist_a || dist_b) registered for dist_a type %s and dist_b "
"type %s" % ((type(dist_a).__name__, type(dist_b).__name__))
)
with ops.name_scope("KullbackLeibler"):
kl_t = kl_fn(dist_a, dist_b, name=name)
if allow_nan:
return kl_t
# Check KL for NaNs
kl_t = array_ops.identity(kl_t, name="kl")
with ops.control_dependencies(
[
logging_ops.Assert(
math_ops.logical_not(math_ops.reduce_any(math_ops.is_nan(kl_t))),
[
"KL calculation between %s and %s returned NaN values "
"(and was called with allow_nan=False). Values:" % (dist_a.name, dist_b.name),
kl_t,
],
)
]
):
return array_ops.identity(kl_t, name="checked_kl")
开发者ID:ChaitanyaCixLive,项目名称:tensorflow,代码行数:50,代码来源:kullback_leibler.py
示例15: _MaximumMinimumGrad
def _MaximumMinimumGrad(op, grad, selector_op):
"""Factor out the code for the gradient of Maximum or Minimum."""
x = op.inputs[0]
y = op.inputs[1]
gdtype = grad.dtype
sx = array_ops.shape(x)
sy = array_ops.shape(y)
gradshape = array_ops.shape(grad)
zeros = array_ops.zeros(gradshape, gdtype)
xmask = selector_op(x, y)
rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
xgrad = array_ops.where(xmask, grad, zeros)
ygrad = array_ops.where(math_ops.logical_not(xmask), grad, zeros)
gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx)
gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy)
return (gx, gy)
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:16,代码来源:math_grad.py
示例16: pairwise_distance
def pairwise_distance(feature, squared=False):
"""Computes the pairwise distance matrix with numerical stability.
output[i, j] = || feature[i, :] - feature[j, :] ||_2
Args:
feature: 2-D Tensor of size [number of data, feature dimension].
squared: Boolean, whether or not to square the pairwise distances.
Returns:
pairwise_distances: 2-D Tensor of size [number of data, number of data].
"""
pairwise_distances_squared = math_ops.add(
math_ops.reduce_sum(
math_ops.square(feature),
axis=[1],
keepdims=True),
math_ops.reduce_sum(
math_ops.square(
array_ops.transpose(feature)),
axis=[0],
keepdims=True)) - 2.0 * math_ops.matmul(
feature, array_ops.transpose(feature))
# Deal with numerical inaccuracies. Set small negatives to zero.
pairwise_distances_squared = math_ops.maximum(pairwise_distances_squared, 0.0)
# Get the mask where the zero distances are at.
error_mask = math_ops.less_equal(pairwise_distances_squared, 0.0)
# Optionally take the sqrt.
if squared:
pairwise_distances = pairwise_distances_squared
else:
pairwise_distances = math_ops.sqrt(
pairwise_distances_squared + math_ops.to_float(error_mask) * 1e-16)
# Undo conditionally adding 1e-16.
pairwise_distances = math_ops.multiply(
pairwise_distances, math_ops.to_float(math_ops.logical_not(error_mask)))
num_data = array_ops.shape(feature)[0]
# Explicitly set diagonals to zero.
mask_offdiagonals = array_ops.ones_like(pairwise_distances) - array_ops.diag(
array_ops.ones([num_data]))
pairwise_distances = math_ops.multiply(pairwise_distances, mask_offdiagonals)
return pairwise_distances
开发者ID:dananjayamahesh,项目名称:tensorflow,代码行数:46,代码来源:metric_loss_ops.py
示例17: maybe_sample
def maybe_sample():
"""Perform scheduled sampling."""
if self._next_input_layer is None:
return array_ops.where(sample_ids, outputs, base_next_inputs)
where_sampling = math_ops.cast(
array_ops.where(sample_ids), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(math_ops.logical_not(sample_ids)), dtypes.int32)
outputs_sampling = array_ops.gather_nd(outputs, where_sampling)
inputs_not_sampling = array_ops.gather_nd(base_next_inputs,
where_not_sampling)
sampled_next_inputs = self._next_input_layer(outputs_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
开发者ID:Immexxx,项目名称:tensorflow,代码行数:20,代码来源:helper.py
示例18: _make_auc_histograms
def _make_auc_histograms(boolean_labels, scores, score_range, nbins):
"""Create histogram tensors from one batch of labels/scores."""
with variable_scope.variable_op_scope(
[boolean_labels, scores, nbins], None, 'make_auc_histograms'):
# Histogram of scores for records in this batch with True label.
hist_true = histogram_ops.histogram_fixed_width(
array_ops.boolean_mask(scores, boolean_labels),
score_range,
nbins=nbins,
dtype=dtypes.int64,
name='hist_true')
# Histogram of scores for records in this batch with False label.
hist_false = histogram_ops.histogram_fixed_width(
array_ops.boolean_mask(scores, math_ops.logical_not(boolean_labels)),
score_range,
nbins=nbins,
dtype=dtypes.int64,
name='hist_false')
return hist_true, hist_false
开发者ID:285219011,项目名称:hello-world,代码行数:20,代码来源:histogram_ops.py
示例19: maybe_sample
def maybe_sample():
"""Perform scheduled sampling."""
def maybe_concatenate_auxiliary_inputs(outputs_, indices=None):
"""Concatenate outputs with auxiliary inputs, if they exist."""
if self._auxiliary_input_tas is None:
return outputs_
next_time = time + 1
auxiliary_inputs = nest.map_structure(
lambda ta: ta.read(next_time), self._auxiliary_input_tas)
if indices is not None:
auxiliary_inputs = array_ops.gather_nd(auxiliary_inputs, indices)
return nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1),
outputs_, auxiliary_inputs)
if self._next_input_layer is None:
return array_ops.where(
sample_ids, maybe_concatenate_auxiliary_inputs(outputs),
base_next_inputs)
where_sampling = math_ops.cast(
array_ops.where(sample_ids), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(math_ops.logical_not(sample_ids)), dtypes.int32)
outputs_sampling = array_ops.gather_nd(outputs, where_sampling)
inputs_not_sampling = array_ops.gather_nd(base_next_inputs,
where_not_sampling)
sampled_next_inputs = maybe_concatenate_auxiliary_inputs(
self._next_input_layer(outputs_sampling), where_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:39,代码来源:helper.py
示例20: __invert__
def __invert__(self):
# ops.Tensor used math_ops.logical_not as of August 2017.
# Now that bitwise_ops.invert exists, it might make sense
# for both ops.Tensor and TensorNode to use that if the
# type is compatible.
return math_ops.logical_not(self)
开发者ID:keveman,项目名称:tensorflow,代码行数:6,代码来源:tensor_node.py
注:本文中的tensorflow.python.ops.math_ops.logical_not函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论