本文整理汇总了Python中tensorflow.python.ops.math_ops.mul函数的典型用法代码示例。如果您正苦于以下问题:Python mul函数的具体用法?Python mul怎么用?Python mul使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了mul函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _LayerWithActivationProcessing
def _LayerWithActivationProcessing(self,
input_tensor=None,
scope='test',
post_activation_bypass=False):
batch_size, height, width, depth = 5, 128, 128, 3
if input_tensor is None:
input_tensor = array_ops.zeros((batch_size, height, width, depth))
weight_init = init_ops.truncated_normal_initializer
with ops.name_scope(scope):
output = layers.conv2d(
input_tensor,
depth, [5, 5],
padding='SAME',
weights_initializer=weight_init(0.09),
activation_fn=None,
normalizer_fn=None,
biases_initializer=None)
output = layers.batch_norm(
output, center=True, scale=True, decay=1.0 - 0.003, fused=True)
output = nn_ops.relu6(output)
scaled_output1 = math_ops.mul(2.0, output)
scaled_output2 = math_ops.mul(3.0, output)
output = scaled_output1 + scaled_output2
return output
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:27,代码来源:quantize_graph_test.py
示例2: GetParams
def GetParams(self):
"""Create a graph containing multiple segment."""
input_name = "input"
input_dims = [2, 32, 32, 3]
output_name = "output"
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(
dtype=dtypes.float32, shape=input_dims, name=input_name)
with g.device("/GPU:0"):
n = inp
c = constant_op.constant(1.0, name="c")
# Adds control dependency from the constant op to a trt incompatible op,
# and adds control dependency from the trt incompatible op to all other
# ops, to make sure the constant op cannot be contracted with any trt
# segment that depends on it.
with g.control_dependencies([c]):
d = self.trt_incompatible_op(n, name="incompatible")
with g.control_dependencies([d]):
n = math_ops.add(n, c, name="add")
n = math_ops.mul(n, n, name="mul")
n = math_ops.add(n, n, name="add1")
n = self.trt_incompatible_op(n, name="incompatible1")
with g.control_dependencies([d]):
n = math_ops.add(n, c, name="add2")
n = math_ops.mul(n, n, name="mul1")
n = math_ops.add(n, n, name="add3")
array_ops.squeeze(n, name=output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[input_dims],
output_names=[output_name],
expected_output_dims=[tuple(input_dims)])
开发者ID:aeverall,项目名称:tensorflow,代码行数:34,代码来源:base_test.py
示例3: unregularized_loss
def unregularized_loss(self, examples):
"""Add operations to compute the loss (without the regularization loss).
Args:
examples: Examples to compute unregularized loss on.
Returns:
An Operation that computes mean (unregularized) loss for given set of
examples.
Raises:
ValueError: if examples are not well defined.
"""
self._assertSpecified(
['example_labels', 'example_weights', 'sparse_features',
'dense_features'], examples)
self._assertList(['sparse_features', 'dense_features'], examples)
with name_scope('sdca/unregularized_loss'):
predictions = self._linear_predictions(examples)
labels = convert_to_tensor(examples['example_labels'])
weights = convert_to_tensor(examples['example_weights'])
if self._options['loss_type'] == 'logistic_loss':
return math_ops.reduce_sum(math_ops.mul(
sigmoid_cross_entropy_with_logits(
predictions, labels), weights)) / math_ops.reduce_sum(weights)
# squared loss
err = math_ops.sub(labels, predictions)
weighted_squared_err = math_ops.mul(math_ops.square(err), weights)
return (math_ops.reduce_sum(weighted_squared_err) /
math_ops.reduce_sum(weights))
开发者ID:4Quant,项目名称:tensorflow,代码行数:32,代码来源:sdca_ops.py
示例4: setUp
def setUp(self):
"""Test setup.
Structure of the forward graph:
f
| |
----- -----
| |
d e
| | | |
--- --------- ---
| | |
a b c
Construct a backward graph using the GradientDescentOptimizer.
"""
self.a = variables.Variable(1.0, name="a")
self.b = variables.Variable(2.0, name="b")
self.c = variables.Variable(4.0, name="c")
self.d = math_ops.mul(self.a, self.b, name="d")
self.e = math_ops.mul(self.b, self.c, name="e")
self.f = math_ops.mul(self.d, self.e, name="f")
# Gradient descent optimizer that minimizes g.
gradient_descent.GradientDescentOptimizer(0.01).minimize(
self.f, name="optim")
self.sess = session.Session()
self.sess.run(variables.global_variables_initializer())
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:30,代码来源:stepper_test.py
示例5: accuracy
def accuracy(predictions, labels, weights=None):
"""Computes the percentage of times that predictions matches labels.
Args:
predictions: the predicted values, a `Tensor` whose dtype and shape
matches 'labels'.
labels: the ground truth values, a `Tensor` of any shape and
bool, integer, or string dtype.
weights: None or `Tensor` of float values to reweight the accuracy.
Returns:
Accuracy `Tensor`.
Raises:
ValueError: if dtypes don't match or
if dtype is not bool, integer, or string.
"""
if not (labels.dtype.is_integer or
labels.dtype in (dtypes.bool, dtypes.string)):
raise ValueError(
'Labels should have bool, integer, or string dtype, not %r' %
labels.dtype)
if not labels.dtype.is_compatible_with(predictions.dtype):
raise ValueError('Dtypes of predictions and labels should match. '
'Given: predictions (%r) and labels (%r)' %
(predictions.dtype, labels.dtype))
with ops.name_scope('accuracy', values=[predictions, labels]):
is_correct = math_ops.cast(
math_ops.equal(predictions, labels), dtypes.float32)
if weights is not None:
is_correct = math_ops.mul(is_correct, weights)
num_values = math_ops.mul(weights, array_ops.ones_like(is_correct))
return math_ops.div(math_ops.reduce_sum(is_correct),
math_ops.reduce_sum(num_values))
return math_ops.reduce_mean(is_correct)
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:35,代码来源:classification.py
示例6: GetParams
def GetParams(self):
"""Create a graph containing multiple segment."""
input_name = "input"
input_dims = [2, 32, 32, 3]
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(
dtype=dtypes.float32, shape=input_dims, name=input_name)
with g.device("/GPU:0"):
n = inp
c = constant_op.constant(1.0, name="c")
n = math_ops.add(n, c, name="add")
n = math_ops.mul(n, n, name="mul")
n = math_ops.add(n, n, name="add1")
n = self.trt_incompatible_op(n, name="incompatible1")
n = math_ops.add(n, c, name="add2")
n = math_ops.mul(n, n, name="mul1")
n = math_ops.add(n, n, name="add3")
array_ops.squeeze(n, name=self.output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[input_dims],
expected_engines={
"my_trt_op_0": ["add2", "add3", "mul1"],
# Why segment ["add", "add1", "mul"] was assigned segment id 1
# instead of 0: the parent node of this segment is actually const
# node 'c', but it's removed later since it's const output of the
# segment which is not allowed.
"my_trt_op_1": ["add", "add1", "mul"]
},
expected_output_dims=tuple(input_dims),
allclose_atol=1.e-06,
allclose_rtol=1.e-06)
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:34,代码来源:base_test.py
示例7: normalize_moments
def normalize_moments(counts, mean_ss, variance_ss, shift, name=None):
"""Calculate the mean and variance of based on the sufficient statistics.
Args:
counts: A `Tensor` containing a the total count of the data (one value).
mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly
shifted) sum of the elements to average over.
variance_ss: A `Tensor` containing the variance sufficient statistics: the
(possibly shifted) squared sum of the data to compute the variance over.
shift: A `Tensor` containing the value by which the data is shifted for
numerical stability, or `None` if no shift was performed.
name: Name used to scope the operations that compute the moments.
Returns:
Two `Tensor` objects: `mean` and `variance`.
"""
with ops.op_scope([counts, mean_ss, variance_ss, shift], name, "normalize"):
divisor = math_ops.inv(counts, name="divisor")
if shift is not None:
shifted_mean = math_ops.mul(mean_ss, divisor, name="shifted_mean")
mean = math_ops.add(shifted_mean, shift, name="mean")
else: # no shift.
shifted_mean = math_ops.mul(mean_ss, divisor, name="mean")
mean = shifted_mean
variance = math_ops.sub(
math_ops.mul(variance_ss, divisor),
math_ops.square(shifted_mean),
name="variance")
return (mean, variance)
开发者ID:BersaKAIN,项目名称:tensorflow,代码行数:29,代码来源:nn.py
示例8: convert_image_dtype
def convert_image_dtype(image, dtype, name=None):
"""Convert `image` to `dtype`, scaling its values if needed.
Images that are represented using floating point values are expected to have
values in the range [0,1). Image data stored in integer data types are
expected to have values in the range `[0,MAX]`, wbere `MAX` is the largest
positive representable number for the data type.
This op converts between data types, scaling the values appropriately before
casting.
Note that for floating point inputs, this op expects values to lie in [0,1).
Conversion of an image containing values outside that range may lead to
overflow errors when converted to integer `Dtype`s.
Args:
image: An image.
dtype: A `DType` to convert `image` to.
name: A name for this operation (optional).
Returns:
`image`, converted to `dtype`.
"""
if dtype == image.dtype:
return image
with ops.op_scope([image], name, 'convert_image') as name:
# Both integer: use integer multiplication in the larger range
if image.dtype.is_integer and dtype.is_integer:
scale_in = image.dtype.max
scale_out = dtype.max
if scale_in > scale_out:
# Scaling down, scale first, then cast. The scaling factor will
# cause in.max to be mapped to above out.max but below out.max+1,
# so that the output is safely in the supported range.
scale = (scale_in + 1) // (scale_out + 1)
scaled = math_ops.div(image, scale)
return math_ops.cast(scaled, dtype)
else:
# Scaling up, cast first, then scale. The scale will not map in.max to
# out.max, but converting back and forth should result in no change.
cast = math_ops.cast(image, dtype)
scale = (scale_out + 1) // (scale_in + 1)
return math_ops.mul(cast, scale)
elif image.dtype.is_floating and dtype.is_floating:
# Both float: Just cast, no possible overflows in the allowed ranges.
return math_ops.cast(image, dtype)
else:
if image.dtype.is_integer:
# Converting to float: first cast, then scale
cast = math_ops.cast(image, dtype)
scale = 1. / image.dtype.max
return math_ops.mul(cast, scale)
else:
# Converting from float: first scale, then cast
scale = dtype.max + 0.5 # avoid rounding problems in the cast
scaled = math_ops.mul(image, scale)
return math_ops.cast(scaled, dtype)
开发者ID:natalya-patrikeeva,项目名称:tensorflow,代码行数:59,代码来源:image_ops.py
示例9: moments
def moments(x, axes, name=None, keep_dims=False):
"""Calculate the mean and variance of `x`.
The mean and variance are calculated by aggregating the contents of `x`
across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean
and variance of a vector.
When using these moments for batch normalization (see
`tf.nn.batch_normalization`):
* for so-called "global normalization", used with convolutional filters with
shape `[batch, height, width, depth]`, pass `axes=[0, 1, 2]`.
* for simple batch normalization pass `axes=[0]` (batch only).
Args:
x: A `Tensor`.
axes: array of ints. Axes along which to compute mean and
variance.
keep_dims: produce moments with the same dimensionality as the input.
name: Name used to scope the operations that compute the moments.
Returns:
Two `Tensor` objects: `mean` and `variance`.
"""
with ops.op_scope([x, axes], name, "moments"):
x = ops.convert_to_tensor(x, name="x")
x_shape = x.get_shape()
if all(x_shape[d].value is not None for d in axes):
# The shape is known in the relevant axes, so we can statically
# compute the divisor.
divisor = 1.0
for d in set(axes):
divisor *= x.get_shape()[d].value
divisor = constant_op.constant(1.0 / divisor, x.dtype, name="divisor")
else:
divisor = constant_op.constant(1.0, dtype=x.dtype)
x_dynamic_shape = array_ops.shape(x)
for d in set(axes):
divisor *= math_ops.cast(x_dynamic_shape[d], x.dtype)
divisor = math_ops.inv(divisor, name="divisor")
constant_axes = constant_op.constant(axes, name="axes")
# Note: We do not use Mean here because it is very slow on GPU.
mean = math_ops.mul(
math_ops.reduce_sum(x,
constant_axes,
keep_dims=True),
divisor,
name="mean")
var = math_ops.mul(
math_ops.reduce_sum(
math_ops.squared_difference(x, mean),
constant_axes,
keep_dims=keep_dims),
divisor,
name="variance")
if keep_dims:
return mean, var
else:
return array_ops.squeeze(mean, squeeze_dims=axes), var
开发者ID:13331151,项目名称:tensorflow,代码行数:58,代码来源:nn.py
示例10: testFindNodesWithBadTensorValues
def testFindNodesWithBadTensorValues(self):
with session.Session() as sess:
u_name = "testFindNodesWithBadTensorValues/u"
v_name = "testFindNodesWithBadTensorValues/v"
w_name = "testFindNodesWithBadTensorValues/w"
x_name = "testFindNodesWithBadTensorValues/x"
y_name = "testFindNodesWithBadTensorValues/y"
z_name = "testFindNodesWithBadTensorValues/z"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v_init = constant_op.constant([2.0, 1.0])
v = variables.Variable(v_init, name=v_name)
# Expected output: [0.0, 3.0]
w = math_ops.sub(u, v, name=w_name)
# Expected output: [inf, 1.3333]
x = math_ops.div(u, w, name=x_name)
# Expected output: [nan, 4.0]
y = math_ops.mul(w, x, name=y_name)
z = math_ops.mul(y, y, name=z_name)
u.initializer.run()
v.initializer.run()
run_options = config_pb2.RunOptions()
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls="file://%s" % self._dump_root)
run_metadata = config_pb2.RunMetadata()
sess.run(z, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(self._dump_root)
def has_bad_value(_, tensor):
return np.any(np.isnan(tensor)) or np.any(np.isinf(tensor))
# Find all "offending tensors".
bad_data = dump.find(has_bad_value)
# Verify that the nodes with bad values are caught through running find
# on the debug dump.
self.assertEqual(3, len(bad_data))
self.assertEqual(x_name, bad_data[0].node_name)
self.assertEqual(y_name, bad_data[1].node_name)
self.assertEqual(z_name, bad_data[2].node_name)
# Test first_n kwarg of find(): Find the first offending tensor.
first_bad_datum = dump.find(has_bad_value, first_n=1)
self.assertEqual(1, len(first_bad_datum))
self.assertEqual(x_name, first_bad_datum[0].node_name)
开发者ID:JamesFysh,项目名称:tensorflow,代码行数:58,代码来源:session_debug_test.py
示例11: natural_exp_decay
def natural_exp_decay(learning_rate, global_step, decay_steps, decay_rate,
staircase=False, name=None):
"""Applies natural exponential decay to the initial learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an exponential decay function
to a provided initial learning rate. It requires an `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
decayed_learning_rate = learning_rate * exp(-decay_rate * global_step)
```
Example: decay exponetially with a base of 0.96:
```python
...
global_step = tf.Variable(0, trainable=False)
learning_rate = 0.1
k = 0.5
learning_rate = tf.train.exponential_time_decay(learning_rate, global_step, k)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
global_step: A Python number.
Global step to use for the decay computation. Must not be negative.
decay_rate: A Python number. The decay rate.
name: String. Optional name of the operation. Defaults to
'ExponentialTimeDecay'
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
"""
with ops.name_scope(name, "NaturalExpDecay",
[learning_rate, global_step, decay_rate]) as name:
learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate")
dtype = learning_rate.dtype
global_step = math_ops.cast(global_step, dtype)
decay_steps = math_ops.cast(decay_steps, dtype)
decay_rate = math_ops.cast(decay_rate, dtype)
p = global_step / decay_steps
if staircase:
p = math_ops.floor(p)
exponent = math_ops.exp(math_ops.mul(math_ops.neg(decay_rate), p))
return math_ops.mul(learning_rate, exponent, name=name)
开发者ID:AriaAsuka,项目名称:tensorflow,代码行数:57,代码来源:learning_rate_decay.py
示例12: testScan_Simple
def testScan_Simple(self):
with self.test_session():
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
v = constant_op.constant(2.0, name="v")
r = functional_ops.scan(lambda a, x: math_ops.mul(a, x), elems)
self.assertAllEqual([1., 2., 6., 24., 120., 720.], r.eval())
r = functional_ops.scan(
lambda a, x: math_ops.mul(a, x), elems, initializer=v)
self.assertAllEqual([2., 4., 12., 48., 240., 1440.], r.eval())
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:11,代码来源:functional_ops_test.py
示例13: testHandleAndValue
def testHandleAndValue(self):
with self.test_session() as sess:
# Return a handle and a value.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.mul(a, b)
h = session_ops.get_session_handle(c)
v = math_ops.mul(a, c)
h, v = sess.run([h, v])
self.assertEqual(50, h.eval())
self.assertEqual(500, v)
开发者ID:tensorflow,项目名称:tensorflow,代码行数:12,代码来源:session_ops_test.py
示例14: moments
def moments(x, axes, name=None):
"""Calculate the mean and variance of `x`.
The mean and variance are calculated by aggregating the contents of `x`
across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean
and variance of a vector.
For so-called "global normalization" needed for convolutional filters pass
`axes=[0, 1, 2]` (batch, height, width). For batch normalization pass
`axes=[0]` (batch).
Args:
x: A `Tensor`.
axes: array of ints. Axes along which to compute mean and
variance.
name: Name used to scope the operations that compute the moments.
Returns:
Two `Tensor` objects: `mean` and `variance`.
"""
with ops.op_scope([x, axes], name, "moments"):
x = ops.convert_to_tensor(x, name="x")
x_shape = x.get_shape()
if all(x_shape[d].value is not None for d in axes):
# The shape is known in the relevant axes, so we can statically
# compute the divisor.
divisor = 1.0
for d in set(axes):
divisor *= x.get_shape()[d].value
divisor = constant_op.constant(1.0 / divisor, x.dtype, name="divisor")
else:
divisor = constant_op.constant(1.0, dtype=x.dtype)
x_dynamic_shape = array_ops.shape(x)
for d in set(axes):
divisor *= math_ops.cast(x_dynamic_shape[d], x.dtype)
divisor = math_ops.inv(divisor, name="divisor")
axes = constant_op.constant(axes, name="axes")
# Note: We do not use Mean here because it is very slow on GPU.
# Note 2: The expression below is potentially more stable.
# It is however a bit slower and stability doesn't appear to be an issue.
# mean = math_ops.reduce_sum(math_ops.mul(x, divisor), axes, name="mean")
# var = math_ops.reduce_sum(math_ops.mul(math_ops.square(x - mean),
# divisor), axes,
# name="variance")
mean = math_ops.mul(math_ops.reduce_sum(x, axes), divisor, name="mean")
# Give x-mean a specific name, so the caller might take advantage of it.
# The caller should have a fallback plan, however: this tensor may not be
# available if this function implementation changes.
x_centered = math_ops.sub(x, mean, name="x_centered")
var = math_ops.mul(math_ops.reduce_sum(math_ops.square(x_centered), axes),
divisor, name="variance")
return mean, var
开发者ID:julian-park,项目名称:tensorflow,代码行数:52,代码来源:nn.py
示例15: testHandleBasic
def testHandleBasic(self):
with self.test_session() as sess:
# Return a handle.
a = constant_op.constant(10)
b = constant_op.constant(5)
c = math_ops.mul(a, b)
h = session_ops.get_session_handle(c)
h = sess.run(h)
# Feed a tensor handle.
f, x = session_ops.get_session_tensor(h.handle, dtypes.int32)
y = math_ops.mul(x, 10)
self.assertEqual(500, sess.run(y, feed_dict={f: h.handle}))
开发者ID:tensorflow,项目名称:tensorflow,代码行数:13,代码来源:session_ops_test.py
示例16: testFoldr_Simple
def testFoldr_Simple(self):
with self.test_session():
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.foldr(lambda a, x: math_ops.mul(math_ops.add(a, x), 2),
elems)
self.assertAllEqual(450, r.eval())
r = functional_ops.foldr(
lambda a, x: math_ops.mul(math_ops.add(a, x), 2),
elems,
initializer=10)
self.assertAllEqual(1282, r.eval())
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:13,代码来源:functional_ops_test.py
示例17: unregularized_loss
def unregularized_loss(self, examples):
"""Add operations to compute the loss (without the regularization loss).
Args:
examples: Examples to compute unregularized loss on.
Returns:
An Operation that computes mean (unregularized) loss for given set of
examples.
Raises:
ValueError: if examples are not well defined.
"""
self._assertSpecified(['example_labels', 'example_weights',
'sparse_features', 'dense_features'], examples)
self._assertList(['sparse_features', 'dense_features'], examples)
with name_scope('sdca/unregularized_loss'):
predictions = math_ops.cast(
self._linear_predictions(examples), dtypes.float64)
labels = math_ops.cast(
internal_convert_to_tensor(
examples['example_labels']), dtypes.float64)
weights = math_ops.cast(
internal_convert_to_tensor(
examples['example_weights']), dtypes.float64)
if self._options['loss_type'] == 'logistic_loss':
return math_ops.reduce_sum(math_ops.mul(
sigmoid_cross_entropy_with_logits(predictions, labels),
weights)) / math_ops.reduce_sum(weights)
if self._options['loss_type'] in ['hinge_loss', 'smooth_hinge_loss']:
# hinge_loss = max{0, 1 - y_i w*x} where y_i \in {-1, 1}. So, we need to
# first convert 0/1 labels into -1/1 labels.
all_ones = array_ops.ones_like(predictions)
adjusted_labels = math_ops.sub(2 * labels, all_ones)
# Tensor that contains (unweighted) error (hinge loss) per
# example.
error = nn_ops.relu(math_ops.sub(all_ones, math_ops.mul(adjusted_labels,
predictions)))
weighted_error = math_ops.mul(error, weights)
return math_ops.reduce_sum(weighted_error) / math_ops.reduce_sum(
weights)
# squared loss
err = math_ops.sub(labels, predictions)
weighted_squared_err = math_ops.mul(math_ops.square(err), weights)
# SDCA squared loss function is sum(err^2) / (2*sum(weights))
return (math_ops.reduce_sum(weighted_squared_err) /
(2.0 * math_ops.reduce_sum(weights)))
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:51,代码来源:sdca_ops.py
示例18: _num_present
def _num_present(losses, weight, per_batch=False):
"""Computes the number of elements in the loss function induced by `weight`.
A given weight tensor induces different numbers of usable elements in the
`losses` tensor. The `weight` tensor is broadcast across `losses` for all
possible dimensions. For example, if `losses` is a tensor of dimension
[4, 5, 6, 3] and weight is a tensor of size [4, 5], then weight is, in effect,
tiled to match the size of `losses`. Following this effective tile, the total
number of present elements is the number of non-zero weights.
Args:
losses: A tensor of size [batch_size, d1, ... dN].
weight: A tensor of size [1] or [batch_size, d1, ... dK] where K < N.
per_batch: Whether to return the number of elements per batch or as a sum
total.
Returns:
The number of present (non-zero) elements in the losses tensor. If
`per_batch` is True, the value is returned as a tensor of size
[batch_size]. Otherwise, a single scalar tensor is returned.
"""
# To ensure that dims of [2, 1] gets mapped to [2,]
weight = array_ops.squeeze(weight)
# If the weight is a scalar, its easy to compute:
if weight.get_shape().ndims == 0:
batch_size = array_ops.reshape(array_ops.slice(array_ops.shape(losses),
[0], [1]), [])
num_per_batch = math_ops.div(math_ops.to_float(array_ops.size(losses)),
math_ops.to_float(batch_size))
num_per_batch = math_ops.select(math_ops.equal(weight, 0),
0.0, num_per_batch)
num_per_batch = math_ops.mul(array_ops.ones(
array_ops.reshape(batch_size, [1])), num_per_batch)
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
# First, count the number of nonzero weights:
if weight.get_shape().ndims >= 1:
reduction_indices = list(range(1, weight.get_shape().ndims))
num_nonzero_per_batch = math_ops.reduce_sum(
math_ops.to_float(math_ops.not_equal(weight, 0)),
reduction_indices=reduction_indices)
# Next, determine the number of elements that weight would broadcast to:
broadcast_dims = array_ops.slice(array_ops.shape(losses),
[weight.get_shape().ndims], [-1])
num_to_broadcast = math_ops.to_float(math_ops.reduce_prod(broadcast_dims))
num_per_batch = math_ops.mul(num_nonzero_per_batch, num_to_broadcast)
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
开发者ID:Nishant23,项目名称:tensorflow,代码行数:50,代码来源:loss_ops.py
示例19: testFold_Grad
def testFold_Grad(self):
with self.test_session():
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
v = constant_op.constant(2.0, name="v")
r = functional_ops.foldl(
lambda a, x: math_ops.mul(a, x), elems, initializer=v)
r = gradients_impl.gradients(r, v)[0]
self.assertAllEqual(720.0, r.eval())
r = functional_ops.foldr(
lambda a, x: math_ops.mul(a, x), elems, initializer=v)
r = gradients_impl.gradients(r, v)[0]
self.assertAllEqual(720.0, r.eval())
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:14,代码来源:functional_ops_test.py
示例20: _weighted_loss
def _weighted_loss(loss, weight):
"""Returns cumulative weighted loss."""
unweighted_loss = array_ops.reshape(loss, shape=(-1,))
weighted_loss = math_ops.mul(unweighted_loss,
array_ops.reshape(
weight, shape=(-1,)))
return weighted_loss
开发者ID:caikehe,项目名称:tensorflow,代码行数:7,代码来源:head.py
注:本文中的tensorflow.python.ops.math_ops.mul函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论