本文整理汇总了Python中tensorflow.reduce_any函数的典型用法代码示例。如果您正苦于以下问题:Python reduce_any函数的具体用法?Python reduce_any怎么用?Python reduce_any使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了reduce_any函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _compute_sparse_average_correct
def _compute_sparse_average_correct(input_, labels, per_example_weights, topk=1):
"""Returns the numerator and denominator of classifier accuracy."""
labels = tf.to_int64(labels)
labels.get_shape().assert_is_compatible_with([input_.get_shape()[0], None])
if topk == 1:
predictions = tf.reshape(tf.argmax(input_, 1), [-1, 1])
in_topk = tf.reduce_any(tf.equal(labels, predictions), reduction_indices=[1])
else:
# Use broadcasting to check if ANY of the predictions are in the top k.
# TODO(eiderman): For a multi-label top k, what does accuracy mean?
predictions = tf.reshape(tf.nn.top_k(input_, topk)[1], [-1, 1, topk])
labels = tf.expand_dims(labels, [-1])
in_topk = tf.reduce_any(tf.equal(tf.cast(labels, predictions.dtype), predictions), reduction_indices=[1, 2])
correct_predictions = tf.to_float(in_topk)
# If individual examples are weighted, then we want to normalize by that.
if per_example_weights is not None:
per_example_weights = _convert_and_assert_per_example_weights_compatible(
input_, per_example_weights, dtype=None
)
float_weights = tf.to_float(per_example_weights)
# TODO(eiderman): This should use an op that doesn't support broadcasting.
correct_predictions *= float_weights
num_examples = tf.reduce_sum(float_weights)
else:
# shape only holds ints, but we want to always return the same type
# for num_examples to make everything compatible.
num_examples = tf.to_float(tf.gather(tf.shape(input_), 0))
return tf.reduce_sum(correct_predictions), num_examples
开发者ID:google,项目名称:prettytensor,代码行数:30,代码来源:pretty_tensor_loss_methods.py
示例2: _define_step
def _define_step(self, done, score, summary):
"""Combine operations of a phase.
Keeps track of the mean score and when to report it.
Args:
done: Tensor indicating whether current score can be used.
score: Tensor holding the current, possibly intermediate, score.
summary: Tensor holding summary string to write if not an empty string.
Returns:
Tuple of summary tensor, mean score, and new global step. The mean score
is zero for non reporting steps.
"""
if done.shape.ndims == 0:
done = done[None]
if score.shape.ndims == 0:
score = score[None]
score_mean = streaming_mean.StreamingMean((), tf.float32)
with tf.control_dependencies([done, score, summary]):
done_score = tf.gather(score, tf.where(done)[:, 0])
submit_score = tf.cond(tf.reduce_any(done), lambda: score_mean.submit(done_score), tf.no_op)
with tf.control_dependencies([submit_score]):
mean_score = tf.cond(self._report, score_mean.clear, float)
steps_made = tf.shape(score)[0]
next_step = self._step.assign_add(steps_made)
with tf.control_dependencies([mean_score, next_step]):
return tf.identity(summary), mean_score, next_step, steps_made
开发者ID:bulletphysics,项目名称:bullet3,代码行数:28,代码来源:loop.py
示例3: prune_outside_window
def prune_outside_window(boxlist, window, scope=None):
"""Prunes bounding boxes that fall outside a given window.
This function prunes bounding boxes that even partially fall outside the given
window. See also clip_to_window which only prunes bounding boxes that fall
completely outside the window, and clips any bounding boxes that partially
overflow.
Args:
boxlist: a BoxList holding M_in boxes.
window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
of the window
scope: name scope.
Returns:
pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in
valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
in the input tensor.
"""
with tf.name_scope(scope, 'PruneOutsideWindow'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
coordinate_violations = tf.concat([
tf.less(y_min, win_y_min), tf.less(x_min, win_x_min),
tf.greater(y_max, win_y_max), tf.greater(x_max, win_x_max)
], 1)
valid_indices = tf.reshape(
tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
return gather(boxlist, valid_indices), valid_indices
开发者ID:NoPointExc,项目名称:models,代码行数:30,代码来源:box_list_ops.py
示例4: prune_completely_outside_window
def prune_completely_outside_window(boxlist, window, scope=None):
"""Prunes bounding boxes that fall completely outside of the given window.
The function clip_to_window prunes bounding boxes that fall
completely outside the window, but also clips any bounding boxes that
partially overflow. This function does not clip partially overflowing boxes.
Args:
boxlist: a BoxList holding M_in boxes.
window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
of the window
scope: name scope.
Returns:
pruned_boxlist: a new BoxList with all bounding boxes partially or fully in
the window.
valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
in the input tensor.
"""
with tf.name_scope(scope, 'PruneCompleteleyOutsideWindow'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
coordinate_violations = tf.concat([
tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max),
tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min)
], 1)
valid_indices = tf.reshape(
tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
return gather(boxlist, valid_indices), valid_indices
开发者ID:NoPointExc,项目名称:models,代码行数:30,代码来源:box_list_ops.py
示例5: compute_module
def compute_module(accum, module):
mask = tf.equal(module, selection)
reduced_mask = tf.reduce_any(mask, axis=-1)
indices = tf.where(reduced_mask)
affected_inp = tf.boolean_mask(inputs, reduced_mask)
output = module_fnc(affected_inp, module)
return accum + tf.scatter_nd(indices, output, tf.cast(output_shape, tf.int64))
开发者ID:timediv,项目名称:libmodular,代码行数:7,代码来源:modular.py
示例6: kl_divergence
def kl_divergence(distribution_a, distribution_b,
allow_nan_stats=True, name=None):
"""Get the KL-divergence KL(distribution_a || distribution_b).
If there is no KL method registered specifically for `type(distribution_a)`
and `type(distribution_b)`, then the class hierarchies of these types are
searched.
If one KL method is registered between any pairs of classes in these two
parent hierarchies, it is used.
If more than one such registered method exists, the method whose registered
classes have the shortest sum MRO paths to the input types is used.
If more than one such shortest path exists, the first method
identified in the search is used (favoring a shorter MRO distance to
`type(distribution_a)`).
Args:
distribution_a: The first distribution.
distribution_b: The second distribution.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Returns:
A Tensor with the batchwise KL-divergence between `distribution_a`
and `distribution_b`.
Raises:
NotImplementedError: If no KL method is defined for distribution types
of `distribution_a` and `distribution_b`.
"""
kl_fn = _registered_kl(type(distribution_a), type(distribution_b))
if kl_fn is None:
# TODO(b/117098119): For backwards compatibility, we check TF's registry as
# well. This typically happens when this function is called on a pair of
# TF's distributions.
with deprecation.silence():
return tf.distributions.kl_divergence(distribution_a, distribution_b)
with tf.name_scope("KullbackLeibler"):
kl_t = kl_fn(distribution_a, distribution_b, name=name)
if allow_nan_stats:
return kl_t
# Check KL for NaNs
kl_t = tf.identity(kl_t, name="kl")
with tf.control_dependencies([
tf.Assert(
tf.logical_not(
tf.reduce_any(tf.is_nan(kl_t))),
["KL calculation between %s and %s returned NaN values "
"(and was called with allow_nan_stats=False). Values:"
% (distribution_a.name, distribution_b.name), kl_t])]):
return tf.identity(kl_t, name="checked_kl")
开发者ID:asudomoeva,项目名称:probability,代码行数:59,代码来源:kullback_leibler.py
示例7: is_last_day_of_season
def is_last_day_of_season(t):
t_ = dist_util.maybe_get_static_value(t)
if t_ is not None: # static case
step_in_cycle = t_ % num_steps_per_cycle
return any(step_in_cycle == changepoints)
else:
step_in_cycle = tf.floormod(t, num_steps_per_cycle)
return tf.reduce_any(tf.equal(step_in_cycle, changepoints))
开发者ID:asudomoeva,项目名称:probability,代码行数:8,代码来源:seasonal.py
示例8: check_grads
def check_grads(grads_and_vars):
has_nan_ops = []
amax_ops = []
for grad, _ in grads_and_vars:
if grad is not None:
if isinstance(grad, tf.IndexedSlices):
x = grad.values
else:
x = grad
has_nan_ops.append(tf.reduce_any(tf.is_nan(x)))
amax_ops.append(tf.reduce_max(tf.abs(x)))
has_nan = tf.reduce_any(has_nan_ops)
amax = tf.reduce_max(amax_ops)
return has_nan, amax
开发者ID:fotwo,项目名称:OpenSeq2Seq,代码行数:17,代码来源:automatic_loss_scaler.py
示例9: any
def any(x, axis=None, keepdims=False):
"""Bitwise reduction (logical OR).
Return array of uint8 (0s and 1s).
"""
axis = normalize_axis(axis, ndim(x))
x = tf.cast(x, tf.bool)
x = tf.reduce_any(x, reduction_indices=axis, keep_dims=keepdims)
return tf.cast(x, tf.uint8)
开发者ID:daviddiazvico,项目名称:keras,代码行数:9,代码来源:tensorflow_backend.py
示例10: any
def any(x, axis=None, keepdims=False):
'''Bitwise reduction (logical OR).
Returns an uint8 tensor (0s and 1s).
'''
axis = _normalize_axis(axis, ndim(x))
x = tf.cast(x, tf.bool)
x = tf.reduce_any(x, reduction_indices=axis, keep_dims=keepdims)
return tf.cast(x, tf.uint8)
开发者ID:NajNaj,项目名称:keras,代码行数:9,代码来源:tensorflow_backend.py
示例11: retrieve_seq_length_op3
def retrieve_seq_length_op3(data, pad_val=0):
"""An op to compute the length of a sequence, the data shape can be [batch_size, n_step(max)] or
[batch_size, n_step(max), n_features].
If the data has type of tf.string and pad_val is assigned as empty string (''), this op will compute the
length of the string sequence.
Parameters:
-----------
data : tensor
[batch_size, n_step(max)] or [batch_size, n_step(max), n_features] with zero padding on the right hand side.
pad_val:
By default 0. If the data is tf.string, please assign this as empty string ('')
Examples
-----------
>>> data = [[[1],[2],[0],[0],[0]],
>>> [[1],[2],[3],[0],[0]],
>>> [[1],[2],[6],[1],[0]]]
>>> data = tf.convert_to_tensor(data, dtype=tf.float32)
>>> length = tl.layers.retrieve_seq_length_op3(data)
[2, 3, 4]
>>> data = [[[1,2],[2,2],[1,2],[1,2],[0,0]],
>>> [[2,3],[2,4],[3,2],[0,0],[0,0]],
>>> [[3,3],[2,2],[5,3],[1,2],[0,0]]]
>>> data = tf.convert_to_tensor(data, dtype=tf.float32)
>>> length = tl.layers.retrieve_seq_length_op3(data)
[4, 3, 4]
>>> data = [[1,2,0,0,0],
>>> [1,2,3,0,0],
>>> [1,2,6,1,0]]
>>> data = tf.convert_to_tensor(data, dtype=tf.float32)
>>> length = tl.layers.retrieve_seq_length_op3(data)
[2, 3, 4]
>>> data = [['hello','world','','',''],
>>> ['hello','world','tensorlayer','',''],
>>> ['hello','world','tensorlayer','2.0','']]
>>> data = tf.convert_to_tensor(data, dtype=tf.string)
>>> length = tl.layers.retrieve_seq_length_op3(data, pad_val='')
[2, 3, 4]
"""
data_shape_size = data.get_shape().ndims
if data_shape_size == 3:
return tf.reduce_sum(
input_tensor=tf.cast(tf.reduce_any(input_tensor=tf.not_equal(data, pad_val), axis=2), dtype=tf.int32),
axis=1
)
elif data_shape_size == 2:
return tf.reduce_sum(input_tensor=tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32), axis=1)
elif data_shape_size == 1:
raise ValueError("retrieve_seq_length_op3: data has wrong shape! Shape got ", data.get_shape().as_list())
else:
raise ValueError(
"retrieve_seq_length_op3: handling data with num of dims %s hasn't been implemented!" % (data_shape_size)
)
开发者ID:zsdonghao,项目名称:tensorlayer,代码行数:56,代码来源:recurrent.py
示例12: remap_keys
def remap_keys(sparse_tensor):
# Current indices of our SparseTensor that we need to fix
bad_indices = sparse_tensor.indices
# Current values of our SparseTensor that we need to fix
bad_values = sparse_tensor.values
# Group by the batch_indices and get the count for each
size = tf.segment_sum(data = tf.ones_like(bad_indices[:,0], dtype = tf.int64), segment_ids = bad_indices[:,0]) - 1
# The number of batch_indices (this should be batch_size unless it is a partially full batch)
length = tf.shape(size, out_type = tf.int64)[0]
# Finds the cumulative sum which we can use for indexing later
cum = tf.cumsum(size)
# The offsets between each example in the batch due to our concatentation of the keys in the decode_example method
length_range = tf.range(start = 0, limit = length, delta = 1, dtype = tf.int64)
# Indices of the SparseTensor's indices member of the rows we added by the concatentation of our keys in the decode_example method
cum_range = cum + length_range
# The keys that we have extracted back out of our concatentated SparseTensor
gathered_indices = tf.squeeze(tf.gather(bad_indices, cum_range)[:,1])
# The enumerated row indices of the SparseTensor's indices member
sparse_indices_range = tf.range(tf.shape(bad_indices, out_type = tf.int64)[0], dtype = tf.int64)
# We want to find here the row indices of the SparseTensor's indices member that are of our actual data and not the concatentated rows
# So we want to find the intersection of the two sets and then take the opposite of that
x = sparse_indices_range
s = cum_range
# Number of multiples we are going to tile x, which is our sparse_indices_range
tile_multiples = tf.concat([tf.ones(tf.shape(tf.shape(x)), dtype=tf.int64), tf.shape(s, out_type = tf.int64)], axis = 0)
# Expands x, our sparse_indices_range, into a rank 2 tensor and then multiplies the rows by 1 (no copying) and the columns by the number of examples in the batch
x_tile = tf.tile(tf.expand_dims(x, -1), tile_multiples)
# Essentially a vectorized logical or, that we then negate
x_not_in_s = ~tf.reduce_any(tf.equal(x_tile, s), -1)
# The SparseTensor's indices that are our actual data by using the boolean_mask we just made above applied to the entire indices member of our SparseTensor
selected_indices = tf.boolean_mask(tensor = bad_indices, mask = x_not_in_s, axis = 0)
# Apply the same boolean_mask to the entire values member of our SparseTensor to get the actual values data
selected_values = tf.boolean_mask(tensor = bad_values, mask = x_not_in_s, axis = 0)
# Need to replace the first column of our selected_indices with keys, so we first need to tile our gathered_indices
tiling = tf.tile(input = tf.expand_dims(gathered_indices[0], -1), multiples = tf.expand_dims(size[0] , -1))
# We have to repeatedly apply the tiling to each example in the batch
# Since it is jagged we cannot use tf.map_fn due to the stacking of the TensorArray, so we have to create our own custom version
def loop_body(i, tensor_grow):
return i + 1, tf.concat(values = [tensor_grow, tf.tile(input = tf.expand_dims(gathered_indices[i], -1), multiples = tf.expand_dims(size[i] , -1))], axis = 0)
_, result = tf.while_loop(lambda i, tensor_grow: i < length, loop_body, [tf.constant(1, dtype = tf.int64), tiling])
# Concatenate tiled keys with the 2nd column of selected_indices
selected_indices_fixed = tf.concat([tf.expand_dims(result, -1), tf.expand_dims(selected_indices[:, 1], -1)], axis = 1)
# Combine everything together back into a SparseTensor
remapped_sparse_tensor = tf.SparseTensor(indices = selected_indices_fixed, values = selected_values, dense_shape = sparse_tensor.dense_shape)
return remapped_sparse_tensor
开发者ID:TarunBattula,项目名称:training-data-analyst,代码行数:56,代码来源:model.py
示例13: cond
def cond(loop_cnt, prev_out, _, __):
less = tf.less(loop_cnt, output_len_threshold)
is_regular_word = tf.reduce_any(
tf.not_equal(
prev_out,
tf.one_hot([0], FEATURE_SIZE) # <eos>
)
)
return tf.logical_and(less, is_regular_word)
开发者ID:ninotoshi,项目名称:playground,代码行数:10,代码来源:main.py
示例14: has_no_question_marks
def has_no_question_marks(line):
"""Returns True if the line of text has no question marks."""
# split the line into an array of characters
chars = tf.string_split(line[tf.newaxis], "").values
# for each character check if it is a question mark
is_question = tf.equal(chars, "?")
any_question = tf.reduce_any(is_question)
no_question = ~any_question
return no_question
开发者ID:dananjayamahesh,项目名称:tensorflow,代码行数:10,代码来源:imports85.py
示例15: any
def any(x, axis=None, keepdims=False):
'''Bitwise reduction (logical OR).
Return array of int8 (0s and 1s).
'''
if axis is not None and axis < 0:
axis = axis % len(x.get_shape())
x = tf.cast(x, tf.bool)
x = tf.reduce_any(x, reduction_indices=axis, keep_dims=keepdims)
return tf.cast(x, tf.int8)
开发者ID:stevenxxiu,项目名称:keras,代码行数:10,代码来源:tensorflow_backend.py
示例16: target_mask_op
def target_mask_op(data, pad_val=0): # HangSheng: return tensor for mask,if input is tf.string
"""Return tensor for mask, if input is ``tf.string``."""
data_shape_size = data.get_shape().ndims
if data_shape_size == 3:
return tf.cast(tf.reduce_any(input_tensor=tf.not_equal(data, pad_val), axis=2), dtype=tf.int32)
elif data_shape_size == 2:
return tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32)
elif data_shape_size == 1:
raise ValueError("target_mask_op: data has wrong shape!")
else:
raise ValueError("target_mask_op: handling data_shape_size %s hasn't been implemented!" % (data_shape_size))
开发者ID:zsdonghao,项目名称:tensorlayer,代码行数:11,代码来源:recurrent.py
示例17: rnn_model
def rnn_model(full_image):
with tf.variable_scope('main_recurrence') as scope:
low_res = tf.image.resize_images(full_image, FLAGS.context_image_size, FLAGS.context_image_size)
context = context_network(low_res)
classifications_list = []
#provide 0 initialization to lstm1
lstm1 = rnn_cell.BasicLSTMCell(FLAGS.lstm_size)
lstm1_state = tf.zeros([FLAGS.batch_size, lstm1.state_size])
lstm1_outputs = []
lstm1_states = []
with tf.variable_scope('lstm2') as scope:
#provide context initialization to lstm2
lstm2 = rnn_cell.BasicLSTMCell(FLAGS.lstm_size)
lstm2_initial_input = tf.zeros([FLAGS.batch_size, FLAGS.lstm_size])
lstm2_output, lstm2_state = lstm2(lstm2_initial_input, context)
emission = emission_network(lstm2_output)
location, keep_going = _parse_emission_output(emission)
scope.reuse_variables()
valid_classification_list = []
for step in xrange(FLAGS.max_recurrent_steps):
if step > 0:
tf.get_variable_scope().reuse_variables()
keep_going_threshold = tf.constant(FLAGS.keep_going_threshold, dtype=tf.float32)
glimpse_out, glimpse_vars = glimpse_network(full_image, location)
lstm1_output, lstm1_state = lstm1(glimpse_out, lstm1_state)
classifications_list.append(classification_network(lstm1_state))
valids = tf.squeeze(tf.greater(keep_going, keep_going_threshold))
valid_classification_list.append(tf.to_int32(valids))
if not tf.reduce_any(valids):
break
with tf.variable_scope('lstm2') as scope:
scope.reuse_variables()
lstm2_output, lstm2_state = lstm2(lstm1_output, lstm2_state)
location, keep_going = _parse_emission_output(emission_network(lstm2_output))
valid_classifications = tf.pad(tf.pack(valid_classification_list), tf.convert_to_tensor([[0,FLAGS.max_recurrent_steps-step-1],[0,0]]))
classifications = tf.pad(tf.pack(classifications_list), tf.convert_to_tensor([[0,FLAGS.max_recurrent_steps-step-1],[0,0],[0,0]]))
classifications = attention(classifications, valid_classifications)
classifications.get_shape()
return tf.squeeze(classifications), glimpse_vars
开发者ID:bschreck,项目名称:scenic-recursion,代码行数:52,代码来源:recurrent_model.py
示例18: clusterize
def clusterize(batch):
start = time.time()
n = len(batch)
dim = len(batch[0])
print batch
points = tf.placeholder(tf.int32, [n,dim])
cluster_assignments = tf.Variable(tf.zeros([n], dtype=tf.int64))
# Use K random points as the starting centroids
centroids = tf.Variable(tf.slice(tf.random_shuffle(points), [0,0], [K,dim]))
# Replicate to n copies of each centroid and K copies of each
# point, then subtract and compute the sum of squared distances.
rep_centroids = tf.reshape(tf.tile(centroids, [n, 1]), [n, K, dim])
rep_points = tf.reshape(tf.tile(points, [1, K]), [n, K, dim])
sum_squares = tf.reduce_sum(tf.square(rep_points - rep_centroids), reduction_indices=2)
# Use argmin to select the lowest-distance point
best_centroids = tf.argmin(sum_squares, 1)
did_assignments_change = tf.reduce_any(tf.not_equal(best_centroids,
cluster_assignments))
def bucket_mean(data, bucket_ids, num_buckets):
total = tf.unsorted_segment_sum(data, bucket_ids, num_buckets)
count = tf.unsorted_segment_sum(tf.ones_like(data), bucket_ids, num_buckets)
return total / count
means = bucket_mean(points, best_centroids, K)
# Do not write to the assigned clusters variable until after
# computing whether the assignments have changed - hence with_dependencies
with tf.control_dependencies([did_assignments_change]):
do_updates = tf.group(
centroids.assign(means),
cluster_assignments.assign(best_centroids))
changed = True
iters = 0
sess = tf.Session()
sess.run(tf.initialize_all_variables(), feed_dict={points: batch})
while changed and iters < MAX_ITERS:
iters += 1
[changed, _] = sess.run([did_assignments_change, do_updates], feed_dict={points: batch})
[centers, assignments] = sess.run([centroids, cluster_assignments], feed_dict={points: batch})
end = time.time()
print ("Found in %.2f seconds" % (end-start)), iters, "iterations"
return [centers, assignments]
开发者ID:PFAWeb2Control,项目名称:machine-learning,代码行数:52,代码来源:k_means.py
示例19: drop_some
def drop_some(columns,
drop_prob=.15):
"""Zeros out columns with probability `drop_prob`.
Used for rounds of local drop path.
"""
num_columns = tensor_shape(columns)[0]
mask = tf.random_uniform([num_columns])>drop_prob
scale = num_columns/tf.reduce_sum(tf.cast(mask, tf.float32))
return tf.cond(tf.reduce_any(mask),
lambda : apply_mask(mask, columns) * scale,
lambda : random_column(columns))
开发者ID:edgelord,项目名称:FractalNet,代码行数:13,代码来源:fractal_block.py
示例20: _compare
def _compare(self, x, reduction_axes, keep_dims, use_gpu=False):
np_ans = x
if reduction_axes is None:
np_ans = np.any(np_ans, keepdims=keep_dims)
else:
for ra in reduction_axes[::-1]:
np_ans = np.any(np_ans, axis=ra, keepdims=keep_dims)
with self.test_session(use_gpu=use_gpu):
if reduction_axes is not None:
reduction_axes = np.array(reduction_axes).astype(np.int32)
tf_ans = tf.reduce_any(x, reduction_axes, keep_dims)
out = tf_ans.eval()
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
开发者ID:13331151,项目名称:tensorflow,代码行数:14,代码来源:reduction_ops_test.py
注:本文中的tensorflow.reduce_any函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论