本文整理汇总了Python中tensorflow.python.ops.array_ops.size函数的典型用法代码示例。如果您正苦于以下问题:Python size函数的具体用法?Python size怎么用?Python size使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了size函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _fused_batch_norm
def _fused_batch_norm(self, inputs, training):
"""Returns the output of fused batch norm."""
beta = self.beta if self.center else self._beta_const
gamma = self.gamma if self.scale else self._gamma_const
def _fused_batch_norm_training():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
epsilon=self.epsilon,
data_format=self._data_format)
def _fused_batch_norm_inference():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=self.moving_mean,
variance=self.moving_variance,
epsilon=self.epsilon,
is_training=False,
data_format=self._data_format)
output, mean, variance = tf_utils.smart_cond(
training, _fused_batch_norm_training, _fused_batch_norm_inference)
if not self._bessels_correction_test_only:
# Remove Bessel's correction to be consistent with non-fused batch norm.
# Note that the variance computed by fused batch norm is
# with Bessel's correction.
sample_size = math_ops.cast(
array_ops.size(inputs) / array_ops.size(variance), variance.dtype)
factor = (sample_size - math_ops.cast(1.0, variance.dtype)) / sample_size
variance *= factor
training_value = tf_utils.constant_value(training)
if training_value is None:
momentum = tf_utils.smart_cond(training,
lambda: self.momentum,
lambda: 1.0)
else:
momentum = ops.convert_to_tensor(self.momentum)
if training_value or training_value is None:
if distribution_strategy_context.in_cross_replica_context():
strategy = distribution_strategy_context.get_strategy()
mean_update = strategy.extended.update(
self.moving_mean, self._assign_moving_average,
(mean, self.momentum))
variance_update = strategy.extended.update(
self.moving_variance, self._assign_moving_average,
(variance, self.momentum))
else:
mean_update = self._assign_moving_average(self.moving_mean, mean,
momentum)
variance_update = self._assign_moving_average(self.moving_variance,
variance, momentum)
self.add_update(mean_update, inputs=True)
self.add_update(variance_update, inputs=True)
return output
开发者ID:gautam1858,项目名称:tensorflow,代码行数:60,代码来源:normalization.py
示例2: quantiles_ready
def quantiles_ready():
"""The subgraph for when the quantiles are ready."""
quantized_feature = quantile_ops.quantiles([sparse_column_values], [],
[quantile_buckets], [])
quantized_feature = math_ops.cast(quantized_feature[0], dtypes.int64)
quantized_feature = array_ops.reshape(quantized_feature, [-1])
example_indices, _ = array_ops.split(
sparse_column_indices, num_or_size_splits=2, axis=1)
example_indices = array_ops.squeeze(example_indices, [1])
filtered_gradients = array_ops.gather(gradients, example_indices)
filtered_hessians = array_ops.gather(hessians, example_indices)
filtered_partition_ids = array_ops.gather(example_partition_ids,
example_indices)
unique_partitions, mapped_partitions = array_ops.unique(
example_partition_ids)
# Compute aggregate stats for each partition.
per_partition_gradients = math_ops.unsorted_segment_sum(
gradients, mapped_partitions, array_ops.size(unique_partitions))
per_partition_hessians = math_ops.unsorted_segment_sum(
hessians, mapped_partitions, array_ops.size(unique_partitions))
# Prepend a bias feature per partition that accumulates the stats for all
# examples in that partition.
bias_feature_ids = array_ops.fill(
array_ops.shape(unique_partitions), _BIAS_FEATURE_ID)
bias_feature_ids = math_ops.cast(bias_feature_ids, dtypes.int64)
partition_ids = array_ops.concat(
[unique_partitions, filtered_partition_ids], 0)
filtered_gradients = array_ops.concat(
[per_partition_gradients, filtered_gradients], 0)
filtered_hessians = array_ops.concat(
[per_partition_hessians, filtered_hessians], 0)
bucket_ids = array_ops.concat([bias_feature_ids, quantized_feature], 0)
return partition_ids, bucket_ids, filtered_gradients, filtered_hessians
开发者ID:1000sprites,项目名称:tensorflow,代码行数:35,代码来源:ordinal_split_handler.py
示例3: _TopKGrad
def _TopKGrad(op, grad, _):
"""Return the gradients for TopK.
Args:
op: The TopKOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the TopKOp.
Returns:
A list of two tensors, the first being the gradient w.r.t to the input and
TopK, and the second being the gradient w.r.t. to the indices (all zero).
"""
in_shape = array_ops.shape(op.inputs[0])
ind_shape = array_ops.shape(op.outputs[1])
ind_lastdim = array_ops.gather(ind_shape, array_ops.size(ind_shape) - 1)
# Flatten indices to 2D.
ind_2d = array_ops.reshape(op.outputs[1], array_ops.stack([-1, ind_lastdim]))
in_lastdim = array_ops.gather(in_shape, array_ops.size(in_shape) - 1)
outerdim = array_ops.shape(ind_2d)[0]
# Compute linear indices (flattened to 1D).
ind = array_ops.reshape(ind_2d + array_ops.expand_dims(
math_ops.range(0, outerdim * in_lastdim, in_lastdim), -1), [-1])
# Substitute grad to appropriate locations and fill the rest with zeros,
# finally reshaping it to the original input shape.
return [array_ops.reshape(
sparse_ops.sparse_to_dense(ind,
array_ops.reshape(
math_ops.reduce_prod(in_shape), [1]),
array_ops.reshape(grad, [-1]),
validate_indices=False),
in_shape), array_ops.zeros(
[], dtype=dtypes.int32)]
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:34,代码来源:nn_grad.py
示例4: _SparseDenseCwiseMulOrDivGrad
def _SparseDenseCwiseMulOrDivGrad(op, grad, is_mul):
"""Common code for SparseDenseCwise{Mul,Div} gradients."""
x_indices = op.inputs[0]
x_shape = op.inputs[2]
y = op.inputs[3]
y_shape = math_ops.to_int64(array_ops.shape(y))
num_added_dims = array_ops.expand_dims(
array_ops.size(x_shape) - array_ops.size(y_shape), 0)
augmented_y_shape = array_ops.concat(
[array_ops.ones(num_added_dims, ops.dtypes.int64), y_shape], 0)
scaling = x_shape // augmented_y_shape
scaled_indices = x_indices // scaling
scaled_indices = array_ops.slice(scaled_indices,
array_ops.concat([[0], num_added_dims], 0),
[-1, -1])
dense_vals = array_ops.gather_nd(y, scaled_indices)
if is_mul:
dx = grad * dense_vals
dy_val = grad * op.inputs[1]
else:
dx = grad / dense_vals
dy_val = grad * (-op.inputs[1] / math_ops.square(dense_vals))
# indices can repeat after scaling, so we can't use sparse_to_dense().
dy = sparse_ops.sparse_add(
array_ops.zeros_like(y),
sparse_tensor.SparseTensor(scaled_indices, dy_val, y_shape))
# (sp_indices, sp_vals, sp_shape, dense)
return (None, dx, None, dy)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:32,代码来源:sparse_grad.py
示例5: _fused_batch_norm
def _fused_batch_norm(self, inputs, training):
"""Returns the output of fused batch norm."""
# TODO(reedwm): Add support for fp16 inputs.
beta = self.beta if self.center else self._beta_const
gamma = self.gamma if self.scale else self._gamma_const
def _fused_batch_norm_training():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
epsilon=self.epsilon,
data_format=self._data_format)
def _fused_batch_norm_inference():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=self.moving_mean,
variance=self.moving_variance,
epsilon=self.epsilon,
is_training=False,
data_format=self._data_format)
output, mean, variance = utils.smart_cond(
training, _fused_batch_norm_training, _fused_batch_norm_inference)
mean = array_ops.reshape(mean, shape=self.moving_mean.get_shape())
variance = array_ops.reshape(variance,
shape=self.moving_variance.get_shape())
if not self._bessels_correction_test_only:
# Remove Bessel's correction to be consistent with non-fused batch norm.
# Note that the variance computed by fused batch norm is
# with Bessel's correction.
sample_size = math_ops.cast(
array_ops.size(inputs) / array_ops.size(variance), variance.dtype)
factor = (sample_size - math_ops.cast(1.0, variance.dtype)) / sample_size
variance *= factor
training_value = utils.constant_value(training)
if training_value is None:
one_minus_decay = utils.smart_cond(training,
lambda: self._one_minus_decay,
lambda: 0.)
else:
one_minus_decay = ops.convert_to_tensor(self._one_minus_decay)
if training_value or training_value is None:
mean_update = self._assign_moving_average(self.moving_mean, mean,
one_minus_decay)
variance_update = self._assign_moving_average(self.moving_variance,
variance, one_minus_decay)
if context.in_graph_mode():
# Note that in Eager mode, the updates are already executed when running
# assign_moving_averages. So we do not need to put them into
# collections.
self.add_update(mean_update, inputs=inputs)
self.add_update(variance_update, inputs=inputs)
return output
开发者ID:rajeev921,项目名称:tensorflow,代码行数:59,代码来源:normalization.py
示例6: _FoldFusedBatchNormGrad
def _FoldFusedBatchNormGrad(op, unused_grad_y, grad_mean, grad_var, unused_1,
unused_2):
x = op.inputs[0]
n = math_ops.cast(
array_ops.size(x) / array_ops.size(grad_mean), dtypes.float32)
dmean_dx = grad_mean / n
dvar_dx = 2 * grad_var * (x - op.outputs[1]) / (n - 1)
return (dmean_dx + dvar_dx), None, None, None, None
开发者ID:BhaskarNallani,项目名称:tensorflow,代码行数:8,代码来源:fold_batch_norms.py
示例7: _GatherV2Grad
def _GatherV2Grad(op, grad):
"""Gradient for GatherV2 op."""
# params can be large, so colocate the shape calculation with it.
#
# params can be very large for sparse model, array_ops.shape raises
# exception on the Windows platform when any dimension is larger than
# int32. params_shape is not used in optimizer apply_sparse gradients,
# so it's fine to convert it back to int32 regardless of truncation.
params = op.inputs[0]
with ops.colocate_with(params):
params_shape = array_ops.shape(params, out_type=ops.dtypes.int64)
params_shape = math_ops.to_int32(params_shape)
indices = op.inputs[1]
indices_size = array_ops.expand_dims(array_ops.size(indices), 0)
axis = op.inputs[2]
axis_static = tensor_util.constant_value(axis)
# For axis 0 gathers, build an appropriately shaped IndexedSlices.
if axis_static == 0:
values_shape = array_ops.concat([indices_size, params_shape[1:]], 0)
values = array_ops.reshape(grad, values_shape)
indices = array_ops.reshape(indices, indices_size)
return [ops.IndexedSlices(values, indices, params_shape), None, None]
outer_shape = params_shape[:axis]
outer_dims = array_ops.size(outer_shape)
inner_shape = params_shape[axis:][1:]
inner_dims = array_ops.size(inner_shape)
outer_axes_indices = math_ops.range(outer_dims)
inner_axes_indices = math_ops.range(outer_dims + 1,
outer_dims + 1 + inner_dims)
values_shape = array_ops.concat([outer_shape, indices_size, inner_shape], 0)
values = array_ops.reshape(grad, values_shape)
indices = array_ops.reshape(indices, indices_size)
# We need to sum up every slice `values[..., i, ....]` corresponding to
# `params[..., indices[i], ...]`. Since `unsorted_segment_sum` does not
# support an axis parameter, we transpose the gather dimension to the front,
# then use `unsorted_segment_sum` to build a
# [gather_axis, outer_axes, inner_axes] tensor with all the gradients
# affecting each index in `gather_axis` summed up.
transpose_dims = array_ops.concat(
[[outer_dims], outer_axes_indices, inner_axes_indices], 0)
values_transpose = array_ops.transpose(values, transpose_dims)
num_segments = params_shape[axis]
params_grad = math_ops.unsorted_segment_sum(
values_transpose, indices, num_segments)
# Inverts the above transpose by moving dimension 0 back to its original
# position.
invert_transpose_dims = array_ops.concat(
[outer_axes_indices + 1, [0], inner_axes_indices], 0)
params_grad = array_ops.transpose(params_grad, invert_transpose_dims)
return [params_grad, None, None]
开发者ID:Dr4KK,项目名称:tensorflow,代码行数:58,代码来源:array_grad.py
示例8: testSize
def testSize(self):
with self.test_scope():
self.assertEqual(
1, array_ops.size(constant_op.constant(1.0)).numpy())
self.assertEqual(
3, array_ops.size(constant_op.constant([1.0, 2.0, 3.0])).numpy())
self.assertEqual(
4, array_ops.size(
constant_op.constant([[1.0, 2.0], [3.0, 4.0]])).numpy())
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:9,代码来源:eager_test.py
示例9: testDenseShape
def testDenseShape(self):
t_value = [[0, 42], [24, 0]]
self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(t_value)))
self.assertEqual(4, self.evaluate(array_ops.size(t_value)))
self.assertEqual(2, self.evaluate(array_ops.rank(t_value)))
t = constant_op.constant(t_value)
self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(t)))
self.assertEqual(4, self.evaluate(array_ops.size(t)))
self.assertEqual(2, self.evaluate(array_ops.rank(t)))
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:10,代码来源:array_ops_test.py
示例10: _compareSize
def _compareSize(self, x, use_gpu=False):
np_ans = np.asarray(np.size(x))
with self.test_session(use_gpu=use_gpu):
tf_ans = array_ops.size(x)
result = tf_ans.eval()
tf_ans_64 = array_ops.size(x, out_type=dtypes.int64)
result_64 = tf_ans_64.eval()
self.assertAllEqual(np_ans, result)
self.assertAllEqual(np_ans, result_64)
self.assertShapeEqual(np_ans, tf_ans)
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:10,代码来源:shape_ops_test.py
示例11: testDenseShape
def testDenseShape(self):
with self.test_session():
t_value = [[0, 42], [24, 0]]
self.assertAllEqual((2, 2), array_ops.shape(t_value).eval())
self.assertEqual(4, array_ops.size(t_value).eval())
self.assertEqual(2, array_ops.rank(t_value).eval())
t = constant_op.constant(t_value)
self.assertAllEqual((2, 2), array_ops.shape(t).eval())
self.assertEqual(4, array_ops.size(t).eval())
self.assertEqual(2, array_ops.rank(t).eval())
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:11,代码来源:array_ops_test.py
示例12: testSparseShape
def testSparseShape(self):
sp_value = sparse_tensor.SparseTensorValue(
indices=((0, 1), (1, 0)), values=(42, 24), dense_shape=(2, 2))
self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(sp_value)))
self.assertEqual(4, self.evaluate(array_ops.size(sp_value)))
self.assertEqual(2, self.evaluate(array_ops.rank(sp_value)))
sp = sparse_tensor.SparseTensor.from_value(sp_value)
self.assertAllEqual((2, 2), self.evaluate(array_ops.shape(sp)))
self.assertEqual(4, self.evaluate(array_ops.size(sp)))
self.assertEqual(2, self.evaluate(array_ops.rank(sp)))
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:11,代码来源:array_ops_test.py
示例13: active_inputs
def active_inputs():
"""The normal flow when the handler is active."""
# Remove the second column of example indices matrix since it is not
# useful.
example_indices, _ = array_ops.split(
self._sparse_int_column.indices, num_or_size_splits=2, axis=1)
example_indices = array_ops.squeeze(example_indices, [1])
filtered_gradients = array_ops.gather(gradients, example_indices)
filtered_hessians = array_ops.gather(hessians, example_indices)
filtered_partition_ids = array_ops.gather(example_partition_ids,
example_indices)
unique_partitions, mapped_partitions = array_ops.unique(
example_partition_ids)
# Compute aggregate stats for each partition.
# The bias is computed on gradients and hessians (and not
# filtered_gradients) which have exactly one value per example, so we
# don't double count a gradient in multivalent columns.
# Since unsorted_segment_sum can be numerically unstable, use 64bit
# operation.
gradients64 = math_ops.cast(gradients, dtypes.float64)
hessians64 = math_ops.cast(hessians, dtypes.float64)
per_partition_gradients = math_ops.unsorted_segment_sum(
gradients64, mapped_partitions, array_ops.size(unique_partitions))
per_partition_hessians = math_ops.unsorted_segment_sum(
hessians64, mapped_partitions, array_ops.size(unique_partitions))
per_partition_gradients = math_ops.cast(per_partition_gradients,
dtypes.float32)
per_partition_hessians = math_ops.cast(per_partition_hessians,
dtypes.float32)
# Prepend a bias feature per partition that accumulates the stats for all
# examples in that partition.
# Bias is added to the stats even if there are no examples with values in
# the current sparse column. The reason is that the other example batches
# might have values in these partitions so we have to keep the bias
# updated.
bias_feature_ids = array_ops.fill(
array_ops.shape(unique_partitions), _BIAS_FEATURE_ID)
bias_feature_ids = math_ops.cast(bias_feature_ids, dtypes.int64)
partition_ids = array_ops.concat(
[unique_partitions, filtered_partition_ids], 0)
filtered_gradients = array_ops.concat(
[per_partition_gradients, filtered_gradients], 0)
filtered_hessians = array_ops.concat(
[per_partition_hessians, filtered_hessians], 0)
feature_ids = array_ops.concat(
[bias_feature_ids, self._sparse_int_column.values], 0)
# Dimension is always zero for sparse int features.
dimension_ids = array_ops.zeros_like(feature_ids, dtype=dtypes.int64)
feature_ids_and_dimensions = array_ops.stack(
[feature_ids, dimension_ids], axis=1)
return (partition_ids, feature_ids_and_dimensions, filtered_gradients,
filtered_hessians)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:54,代码来源:categorical_split_handler.py
示例14: testSparseShape
def testSparseShape(self):
with self.test_session():
sp_value = sparse_tensor.SparseTensorValue(
indices=((0, 1), (1, 0)), values=(42, 24), dense_shape=(2, 2))
self.assertAllEqual((2, 2), array_ops.shape(sp_value).eval())
self.assertEqual(4, array_ops.size(sp_value).eval())
self.assertEqual(2, array_ops.rank(sp_value).eval())
sp = sparse_tensor.SparseTensor.from_value(sp_value)
self.assertAllEqual((2, 2), array_ops.shape(sp).eval())
self.assertEqual(4, array_ops.size(sp).eval())
self.assertEqual(2, array_ops.rank(sp).eval())
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:12,代码来源:array_ops_test.py
示例15: pack
def pack(self, grouped_grads_and_vars):
"""Pack tensors."""
self.grouped_grads_and_vars = grouped_grads_and_vars
self.all_tower_shapes = []
self.all_tower_sizes = []
device_grad_packs = []
for tower_grads_and_vars in grouped_grads_and_vars:
with ops.colocate_with(tower_grads_and_vars[0][0]):
# Flatten all the grads.
flat_grads = [
array_ops.reshape(g, [-1]) for g, _ in tower_grads_and_vars
]
# Remember the original shape of all the grads.
tower_shapes = [array_ops.shape(g) for g, _ in tower_grads_and_vars]
# Remember the original sizes of all the grads.
tower_sizes = [array_ops.size(g) for g, _ in tower_grads_and_vars]
# Concat all the flat grads into a big flat tensor.
concat_grads = array_ops.concat(flat_grads, 0)
# Split the big tensor into num_splits packs. In cases where the
# total size is not divisible num_splits, the last pack gets
# more elements.
# TODO(zhengxq): it is also possible to optimize away all the concat
# as well.
num_splits = self.num_packs
# The array_ops.size function will sometimes remove static shapes. So if
# all gradient shapes are defined, we use another method to get the
# total size.
# TODO(yuefengz): move this logic to array_ops.size.
if all([g.shape.is_fully_defined() for g, _ in tower_grads_and_vars]):
total_grad_size = sum(
[g.shape.num_elements() for g, _ in tower_grads_and_vars])
else:
total_grad_size = array_ops.size(concat_grads)
split_size = total_grad_size // num_splits
split_size_last = total_grad_size - split_size * (num_splits - 1)
split_sizes = [split_size] * (num_splits - 1) + [split_size_last]
grad_packs = array_ops.split(concat_grads, split_sizes)
# Ready to aggregate the repacked gradients, with fake variables.
# TODO(zhengxq): It is hacky to have to use fake variables.
# We should remove the need for variables in
# aggregate_gradients_using*.
device_grad_packs.append(zip(grad_packs, [None] * num_splits))
self.all_tower_shapes.append(tower_shapes)
self.all_tower_sizes.append(tower_sizes)
return device_grad_packs
开发者ID:AnishShah,项目名称:tensorflow,代码行数:51,代码来源:cross_tower_ops.py
示例16: quantiles_ready
def quantiles_ready():
"""The subgraph for when the quantiles are ready."""
quantized_feature = quantile_ops.quantiles([], [sparse_column_values], [],
[quantile_buckets],
[sparse_column_indices])
quantized_feature = math_ops.cast(quantized_feature[1], dtypes.int64)
quantized_feature = array_ops.squeeze(quantized_feature, axis=0)
example_indices, _ = array_ops.split(
sparse_column_indices, num_or_size_splits=2, axis=1)
example_indices = array_ops.squeeze(example_indices, [1])
filtered_gradients = array_ops.gather(gradients, example_indices)
filtered_hessians = array_ops.gather(hessians, example_indices)
filtered_partition_ids = array_ops.gather(example_partition_ids,
example_indices)
unique_partitions, mapped_partitions = array_ops.unique(
example_partition_ids)
# Compute aggregate stats for each partition.
# Since unsorted_segment_sum can be numerically unstable, use 64bit
# operation.
gradients64 = math_ops.cast(gradients, dtypes.float64)
hessians64 = math_ops.cast(hessians, dtypes.float64)
per_partition_gradients = math_ops.unsorted_segment_sum(
gradients64, mapped_partitions, array_ops.size(unique_partitions))
per_partition_hessians = math_ops.unsorted_segment_sum(
hessians64, mapped_partitions, array_ops.size(unique_partitions))
per_partition_gradients = math_ops.cast(per_partition_gradients,
dtypes.float32)
per_partition_hessians = math_ops.cast(per_partition_hessians,
dtypes.float32)
# Prepend a bias feature per partition that accumulates the stats for all
# examples in that partition.
bias_feature_ids = array_ops.fill(
array_ops.shape(unique_partitions), _BIAS_FEATURE_ID)
bias_feature_ids = math_ops.cast(bias_feature_ids, dtypes.int64)
zeros = array_ops.zeros_like(bias_feature_ids)
bias_feature_ids = array_ops.stack([bias_feature_ids, zeros], axis=1)
partition_ids = array_ops.concat(
[unique_partitions, filtered_partition_ids], 0)
filtered_gradients = array_ops.concat(
[per_partition_gradients, filtered_gradients], 0)
filtered_hessians = array_ops.concat(
[per_partition_hessians, filtered_hessians], 0)
bucket_ids = array_ops.concat([bias_feature_ids, quantized_feature], 0)
return partition_ids, bucket_ids, filtered_gradients, filtered_hessians
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:50,代码来源:ordinal_split_handler.py
示例17: _update_mask
def _update_mask(self, weights, threshold):
"""Updates the mask for a given weight tensor.
This functions first computes the cdf of the weight tensor, and estimates
the threshold value such that 'desired_sparsity' fraction of weights
have magnitude less than the threshold.
Args:
weights: The weight tensor that needs to be masked.
threshold: The current threshold value. The function will compute a new
threshold and return the exponential moving average using the current
value of threshold
Returns:
new_threshold: The new value of the threshold based on weights, and
sparsity at the current global_step
new_mask: A numpy array of the same size and shape as weights containing
0 or 1 to indicate which of the values in weights falls below
the threshold
Raises:
ValueError: if sparsity is not defined
"""
if self._sparsity is None:
raise ValueError('Sparsity variable undefined')
sparsity = self._get_sparsity(weights.op.name)
with ops.name_scope(weights.op.name + '_pruning_ops'):
abs_weights = math_ops.abs(weights)
k = math_ops.cast(
math_ops.round(
math_ops.cast(array_ops.size(abs_weights), dtypes.float32) *
(1 - sparsity)), dtypes.int32)
# Sort the entire array
values, _ = nn_ops.top_k(
array_ops.reshape(abs_weights, [-1]), k=array_ops.size(abs_weights))
# Grab the (k-1) th value
current_threshold = array_ops.gather(values, k - 1)
smoothed_threshold = math_ops.add_n([
math_ops.multiply(current_threshold, 1 - self._spec.threshold_decay),
math_ops.multiply(threshold, self._spec.threshold_decay)
])
new_mask = math_ops.cast(
math_ops.greater_equal(abs_weights, smoothed_threshold),
dtypes.float32)
return smoothed_threshold, new_mask
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:48,代码来源:pruning.py
示例18: call
def call(self, values, weights=None):
"""Accumulate statistics for computing the mean.
For example, if values is [1, 3, 5, 7] then the mean is 4.
If the weights were specified as [1, 1, 0, 0] then the mean would be 2.
Args:
values: Tensor with the per-example value.
weights: Optional weighting of each example. Defaults to 1.
Returns:
The arguments, for easy chaining.
"""
if weights is None:
self.denom.assign_add(
math_ops.cast(array_ops.identity(array_ops.size(values)), self.dtype))
values = math_ops.reduce_sum(values)
self.numer.assign_add(math_ops.cast(values, self.dtype))
else:
weights = math_ops.cast(weights, self.dtype)
self.denom.assign_add(math_ops.reduce_sum(weights))
values = math_ops.cast(values, self.dtype) * weights
self.numer.assign_add(math_ops.reduce_sum(values))
if weights is None:
return values
return values, weights
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:26,代码来源:metrics_impl.py
示例19: dense_make_stats_update
def dense_make_stats_update(is_active, are_buckets_ready, float_column,
quantile_buckets, example_partition_ids, gradients,
hessians, weights, empty_gradients, empty_hessians):
"""Updates the state for dense split handler."""
empty_float = constant_op.constant_v1([], dtype=dtypes.float32)
quantile_values, quantile_weights = control_flow_ops.cond(
is_active[1], # For the next layer, this handler is inactive.
lambda: (float_column, weights),
lambda: (empty_float, empty_float))
def ready_inputs_fn():
"""Branch to execute when quantiles are ready."""
quantized_feature = quantile_ops.quantiles([float_column], [],
[quantile_buckets], [], [])
quantized_feature = math_ops.cast(quantized_feature[0], dtypes.int64)
quantized_feature = array_ops.squeeze(quantized_feature, axis=0)
return (example_partition_ids, quantized_feature, gradients, hessians)
def not_ready_inputs_fn():
return (constant_op.constant_v1([], dtype=dtypes.int32),
constant_op.constant_v1([[]], dtype=dtypes.int64, shape=[1, 2]),
empty_gradients, empty_hessians)
example_partition_ids, feature_ids, gradients, hessians = (
control_flow_ops.cond(
math_ops.logical_and(
math_ops.logical_and(are_buckets_ready,
array_ops.size(quantile_buckets) > 0),
is_active[0]), ready_inputs_fn, not_ready_inputs_fn))
return (quantile_values, quantile_weights, example_partition_ids, feature_ids,
gradients, hessians)
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:32,代码来源:ordinal_split_handler.py
示例20: testEmptyInput
def testEmptyInput(self):
with self.test_session():
x = array_ops.placeholder(dtypes.float32, shape=[0, 3])
self.assertEqual(0, array_ops.size(x).eval())
# reshape would raise if logits is empty
with self.assertRaises(errors_impl.InvalidArgumentError):
nn_ops.softmax(x, axis=0).eval()
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:7,代码来源:softmax_op_test.py
注:本文中的tensorflow.python.ops.array_ops.size函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论