本文整理汇总了Python中tensorflow.python.ops.math_ops.sign函数的典型用法代码示例。如果您正苦于以下问题:Python sign函数的具体用法?Python sign怎么用?Python sign使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sign函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _apply_sparse
def _apply_sparse(self, grad, var):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
alpha_t = math_ops.cast(self._alpha_t, var.dtype.base_dtype)
beta_t = math_ops.cast(self._beta_t, var.dtype.base_dtype)
m = self.get_slot(var, 'm')
m_t = state_ops.assign(
m, (m * beta_t) + (grad * (1 - beta_t)), use_locking=self._use_locking)
sign_g = ops.IndexedSlices(
math_ops.sign(grad.values), grad.indices, dense_shape=grad.dense_shape)
sign_gm = ops.IndexedSlices(
array_ops.gather(math_ops.sign(m_t), sign_g.indices) * sign_g.values,
sign_g.indices,
dense_shape=sign_g.dense_shape)
sign_decayed = math_ops.cast(
self._sign_decay_t, var.dtype.base_dtype)
multiplier_values = alpha_t + sign_decayed * sign_gm.values
multiplier = ops.IndexedSlices(
multiplier_values, sign_gm.indices, dense_shape=sign_gm.dense_shape)
final_update = ops.IndexedSlices(
lr_t * multiplier.values * grad.values,
multiplier.indices,
dense_shape=multiplier.dense_shape)
var_update = state_ops.scatter_sub(
var,
final_update.indices,
final_update.values,
use_locking=self._use_locking)
return control_flow_ops.group(* [var_update, m_t])
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:34,代码来源:addsign.py
示例2: sample_n
def sample_n(self, n, seed=None, name="sample_n"):
"""Sample `n` observations from the Laplace Distributions.
Args:
n: `Scalar`, type int32, the number of observations to sample.
seed: Python integer, the random seed.
name: The name to give this op.
Returns:
samples: `[n, ...]`, a `Tensor` of `n` samples for each
of the distributions determined by broadcasting the parameters.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self._loc, self._scale, n]):
n = ops.convert_to_tensor(n)
n_val = tensor_util.constant_value(n)
shape = array_ops.concat(0, ([n], self.batch_shape()))
# Sample uniformly-at-random from the open-interval (-1, 1).
uniform_samples = random_ops.random_uniform(
shape=shape,
minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
self.dtype.as_numpy_dtype(0.)),
maxval=self.dtype.as_numpy_dtype(1.),
dtype=self.dtype,
seed=seed)
# Provide some hints to shape inference
inferred_shape = tensor_shape.vector(n_val).concatenate(
self.get_batch_shape())
uniform_samples.set_shape(inferred_shape)
return (self._loc - self._scale * math_ops.sign(uniform_samples) *
math_ops.log(1. - math_ops.abs(uniform_samples)))
开发者ID:alephman,项目名称:Tensorflow,代码行数:33,代码来源:laplace.py
示例3: random_sign_uniform
def random_sign_uniform(shape,
minval=None,
maxval=None,
dtype=dtypes.float32,
seed=None):
"""Tensor with (possibly complex) random entries from a "sign Uniform".
Letting `Z` be a random variable equal to `-1` and `1` with equal probability,
Samples from this `Op` are distributed like
```
Z * X, where X ~ Uniform[minval, maxval], if dtype is real,
Z * (X + iY), where X, Y ~ Uniform[minval, maxval], if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
minval: `0-D` `Tensor` giving the minimum values.
maxval: `0-D` `Tensor` giving the maximum values.
dtype: `TensorFlow` `dtype` or Python dtype
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope("random_sign_uniform"):
unsigned_samples = random_uniform(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
if seed is not None:
seed += 12
signs = math_ops.sign(
random_ops.random_uniform(shape, minval=-1., maxval=1., seed=seed))
return unsigned_samples * math_ops.cast(signs, unsigned_samples.dtype)
开发者ID:moses-sun,项目名称:tensorflow,代码行数:35,代码来源:linear_operator_test_util.py
示例4: __call__
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
# Check the shape
if len(shape) < 2:
raise ValueError("The tensor to initialize must be "
"at least two-dimensional")
# Flatten the input shape with the last dimension remaining
# its original shape so it works for conv2d
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (num_cols, num_rows) if num_rows < num_cols else (num_rows,
num_cols)
# Generate a random matrix
a = random_ops.random_normal(flat_shape, dtype=dtype, seed=self.seed)
# Compute the qr factorization
q, r = linalg_ops.qr(a, full_matrices=False)
# Make Q uniform
d = array_ops.diag_part(r)
q *= math_ops.sign(d)
if num_rows < num_cols:
q = array_ops.matrix_transpose(q)
return self.gain * array_ops.reshape(q, shape)
开发者ID:moses-sun,项目名称:tensorflow,代码行数:26,代码来源:init_ops.py
示例5: __call__
def __call__(self, shape, dtype=dtypes.float32):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
Raises:
ValueError: If the dtype is not floating point or the input shape is not
valid.
"""
dtype = _assert_float_dtype(dtype)
# Check the shape
if len(shape) < 2:
raise ValueError("The tensor to initialize must be "
"at least two-dimensional")
# Flatten the input shape with the last dimension remaining
# its original shape so it works for conv2d
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (max(num_cols, num_rows), min(num_cols, num_rows))
# Generate a random matrix
a = random_ops.random_normal(flat_shape, dtype=dtype, seed=self.seed)
# Compute the qr factorization
q, r = gen_linalg_ops.qr(a, full_matrices=False)
# Make Q uniform
d = array_ops.diag_part(r)
q *= math_ops.sign(d)
if num_rows < num_cols:
q = array_ops.matrix_transpose(q)
return self.gain * array_ops.reshape(q, shape)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:35,代码来源:init_ops_v2.py
示例6: _Solve
def _Solve(a, b, c):
"""Return solution of a quadratic minimization.
The optimization equation is:
f(a, b, c) = argmin_w{1/2 * a * w^2 + b * w + c * |w|}
we get optimal solution w*:
w* = -(b - sign(b)*c)/a if |b| > c else w* = 0
REQUIRES: Dimensionality of a and b must be same
Args:
a: A Tensor
b: A Tensor
c: A Tensor with one element.
Returns:
A Tensor w, which is solution for the equation
"""
with ops.name_scope("solve_" + b.op.name):
c = ops.convert_to_tensor(c)
k = array_ops.fill(array_ops.shape(b), c)
zero_t = array_ops.zeros(array_ops.shape(b), dtype=b.dtype)
w = (c * math_ops.sign(b) - b) / a
w = math_ops.select(math_ops.less(math_ops.abs(b), k), zero_t, w)
return w
开发者ID:sherrym,项目名称:tensorflow,代码行数:25,代码来源:ftrl.py
示例7: Test
def Test(self):
np.random.seed(1)
n = shape_[-1]
batch_shape = shape_[:-2]
np_dtype = dtype_.as_numpy_dtype
a = np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
if dtype_.is_complex:
a += 1j * np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
a += np.conj(a.T)
a = np.tile(a, batch_shape + (1, 1))
# Optimal stepsize for central difference is O(epsilon^{1/3}).
epsilon = np.finfo(np_dtype).eps
delta = 0.1 * epsilon**(1.0 / 3.0)
# tolerance obtained by looking at actual differences using
# np.linalg.norm(theoretical-numerical, np.inf) on -mavx build
if dtype_ in (dtypes_lib.float32, dtypes_lib.complex64):
tol = 1e-2
else:
tol = 1e-7
with self.session(use_gpu=True):
tf_a = constant_op.constant(a)
if compute_v_:
tf_e, tf_v = linalg_ops.self_adjoint_eig(tf_a)
# (complex) Eigenvectors are only unique up to an arbitrary phase
# We normalize the vectors such that the first component has phase 0.
top_rows = tf_v[..., 0:1, :]
if tf_a.dtype.is_complex:
angle = -math_ops.angle(top_rows)
phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
else:
phase = math_ops.sign(top_rows)
tf_v *= phase
outputs = [tf_e, tf_v]
else:
tf_e = linalg_ops.self_adjoint_eigvals(tf_a)
outputs = [tf_e]
for b in outputs:
x_init = np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
if dtype_.is_complex:
x_init += 1j * np.random.uniform(
low=-1.0, high=1.0, size=n * n).reshape([n, n]).astype(np_dtype)
x_init += np.conj(x_init.T)
x_init = np.tile(x_init, batch_shape + (1, 1))
theoretical, numerical = gradient_checker.compute_gradient(
tf_a,
tf_a.get_shape().as_list(),
b,
b.get_shape().as_list(),
x_init_value=x_init,
delta=delta)
self.assertAllClose(theoretical, numerical, atol=tol, rtol=tol)
开发者ID:bunbutter,项目名称:tensorflow,代码行数:54,代码来源:self_adjoint_eig_op_test.py
示例8: Compute
def Compute(x):
e, v = linalg_ops.self_adjoint_eig(x)
# (complex) Eigenvectors are only unique up to an arbitrary phase
# We normalize the vectors such that the first component has phase 0.
top_rows = v[..., 0:1, :]
if dtype_.is_complex:
angle = -math_ops.angle(top_rows)
phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
else:
phase = math_ops.sign(top_rows)
v *= phase
return e, v
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:12,代码来源:self_adjoint_eig_op_test.py
示例9: _sample_n
def _sample_n(self, n, seed=None):
shape = array_ops.concat(0, ([n], self.batch_shape()))
# Sample uniformly-at-random from the open-interval (-1, 1).
uniform_samples = random_ops.random_uniform(
shape=shape,
minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
self.dtype.as_numpy_dtype(0.)),
maxval=1.,
dtype=self.dtype,
seed=seed)
return (self.loc - self.scale * math_ops.sign(uniform_samples) *
math_ops.log(1. - math_ops.abs(uniform_samples)))
开发者ID:KalraA,项目名称:tensorflow,代码行数:12,代码来源:laplace.py
示例10: _orthogonal_matrix
def _orthogonal_matrix(self, n):
"""Construct an n x n orthogonal matrix.
Args:
n: dimension.
Returns:
a n x n orthogonal matrix.
"""
a = random_ops.random_normal([n, n], dtype=self.dtype, seed=self.seed)
if self.seed:
self.seed += 1
q, r = linalg_ops.qr(a)
d = array_ops.diag_part(r)
# make q uniform
q *= math_ops.sign(d)
return q
开发者ID:moses-sun,项目名称:tensorflow,代码行数:16,代码来源:init_ops.py
示例11: _BesselI1eGrad
def _BesselI1eGrad(op, grad):
"""Compute gradient of bessel_i1e(x) with respect to its argument."""
x = op.inputs[0]
y = op.outputs[0]
with ops.control_dependencies([grad]):
# For x = 0, the correct gradient is 0.5.
# However, the main branch gives NaN because of the division by x, so
# we impute the gradient manually.
# An alternative solution is to express the gradient via bessel_i0e and
# bessel_i2e, but the latter is not yet implemented in Eigen.
eps = np.finfo(x.dtype.as_numpy_dtype).eps
zeros = array_ops.zeros_like(x)
x_is_not_tiny = math_ops.abs(x) > eps
safe_x = array_ops.where(x_is_not_tiny, x, eps + zeros)
dy_dx = math_ops.bessel_i0e(safe_x) - y * (
math_ops.sign(safe_x) + math_ops.reciprocal(safe_x))
return grad * array_ops.where(x_is_not_tiny, dy_dx, 0.5 + zeros)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:17,代码来源:math_grad.py
示例12: _sample_n
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
# Uniform variates must be sampled from the open-interval `(-1, 1)` rather
# than `[-1, 1)`. In the case of `(0, 1)` we'd use
# `np.finfo(self.dtype.as_numpy_dtype).tiny` because it is the smallest,
# positive, "normal" number. However, the concept of subnormality exists
# only at zero; here we need the smallest usable number larger than -1,
# i.e., `-1 + eps/2`.
uniform_samples = random_ops.random_uniform(
shape=shape,
minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
self.dtype.as_numpy_dtype(0.)),
maxval=1.,
dtype=self.dtype,
seed=seed)
return (self.loc - self.scale * math_ops.sign(uniform_samples) *
math_ops.log1p(-math_ops.abs(uniform_samples)))
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:17,代码来源:laplace.py
示例13: _NormalizingSvd
def _NormalizingSvd(tf_a):
tf_s, tf_u, tf_v = linalg_ops.svd(tf_a, compute_uv=True, full_matrices=True)
# Singular vectors are only unique up to an arbitrary phase. We normalize
# the vectors such that the first component of u (if m >=n) or v (if n > m)
# have phase 0.
m = tf_a.shape[-2]
n = tf_a.shape[-1]
if m >= n:
top_rows = tf_u[..., 0:1, :]
else:
top_rows = tf_v[..., 0:1, :]
if tf_u.dtype.is_complex:
angle = -math_ops.angle(top_rows)
phase = math_ops.complex(math_ops.cos(angle), math_ops.sin(angle))
else:
phase = math_ops.sign(top_rows)
tf_u *= phase[..., :m]
tf_v *= phase[..., :n]
return tf_s, tf_u, tf_v
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:19,代码来源:svd_op_test.py
示例14: cdf
def cdf(self, x, name="cdf"):
"""CDF of observations in `x` under the Laplace distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `loc` and `scale`.
name: The name to give this op.
Returns:
cdf: tensor of dtype `dtype`, the CDFs of `x`.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[self._loc, self._scale, x]):
x = ops.convert_to_tensor(x)
if x.dtype != self.dtype:
raise TypeError("Input x dtype does not match dtype: %s vs. %s"
% (x.dtype, self.dtype))
y = x - self._loc
return 0.5 + 0.5 * math_ops.sign(y) * (
1. - math_ops.exp(-math_ops.abs(y) / self._scale))
开发者ID:alephman,项目名称:Tensorflow,代码行数:19,代码来源:laplace.py
示例15: build
def build(self, inputs_shape):
if inputs_shape[1].value is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s"
% inputs_shape)
input_depth = inputs_shape[1].value
if self._input_initializer is None:
self._input_initializer = init_ops.random_normal_initializer(mean=0.0,
stddev=0.001)
self._input_kernel = self.add_variable(
"input_kernel",
shape=[input_depth, self._num_units],
initializer=self._input_initializer)
if self._recurrent_initializer is None:
self._recurrent_initializer = init_ops.constant_initializer(1.)
self._recurrent_kernel = self.add_variable(
"recurrent_kernel",
shape=[self._num_units],
initializer=self._recurrent_initializer)
# Clip the absolute values of the recurrent weights to the specified minimum
if self._recurrent_min_abs:
abs_kernel = math_ops.abs(self._recurrent_kernel)
min_abs_kernel = math_ops.maximum(abs_kernel, self._recurrent_min_abs)
self._recurrent_kernel = math_ops.multiply(
math_ops.sign(self._recurrent_kernel),
min_abs_kernel
)
# Clip the absolute values of the recurrent weights to the specified maximum
if self._recurrent_max_abs:
self._recurrent_kernel = clip_ops.clip_by_value(self._recurrent_kernel,
-self._recurrent_max_abs,
self._recurrent_max_abs)
self._bias = self.add_variable(
"bias",
shape=[self._num_units],
initializer=init_ops.zeros_initializer(dtype=self.dtype))
self.built = True
开发者ID:xkp793003821,项目名称:indrnn,代码行数:42,代码来源:ind_rnn_cell.py
示例16: _cdf
def _cdf(self, x):
z = self._z(x)
return (0.5 + 0.5 * math_ops.sign(z) *
(1. - math_ops.exp(-math_ops.abs(z))))
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:4,代码来源:laplace.py
示例17: indicator
def indicator(x):
x1_times_x2 = math_ops.reduce_prod(x, reduction_indices=[-1])
return 0.5 * (math_ops.sign(x1_times_x2) + 1.0)
开发者ID:arnonhongklay,项目名称:tensorflow,代码行数:3,代码来源:monte_carlo_test.py
示例18: reduce_weighted_logsumexp
def reduce_weighted_logsumexp(
logx,
w=None,
axis=None,
keep_dims=False,
return_sign=False,
name=None):
"""Computes `log(abs(sum(weight * exp(elements across tensor dimensions))))`.
If all weights `w` are known to be positive, it is more efficient to directly
use `reduce_logsumexp`, i.e., `tf.reduce_logsumexp(logx + tf.log(w))` is more
efficient than `du.reduce_weighted_logsumexp(logx, w)`.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(w * exp(input))). It
avoids overflows caused by taking the exp of large inputs and underflows
caused by taking the log of small inputs.
For example:
```python
x = tf.constant([[0., 0, 0],
[0, 0, 0]])
w = tf.constant([[-1., 1, 1],
[1, 1, 1]])
du.reduce_weighted_logsumexp(x, w)
# ==> log(-1*1 + 1*1 + 1*1 + 1*1 + 1*1 + 1*1) = log(4)
du.reduce_weighted_logsumexp(x, w, axis=0)
# ==> [log(-1+1), log(1+1), log(1+1)]
du.reduce_weighted_logsumexp(x, w, axis=1)
# ==> [log(-1+1+1), log(1+1+1)]
du.reduce_weighted_logsumexp(x, w, axis=1, keep_dims=True)
# ==> [[log(-1+1+1)], [log(1+1+1)]]
du.reduce_weighted_logsumexp(x, w, axis=[0, 1])
# ==> log(-1+5)
```
Args:
logx: The tensor to reduce. Should have numeric type.
w: The weight tensor. Should have numeric type identical to `logx`.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
return_sign: If `True`, returns the sign of the result.
name: A name for the operation (optional).
Returns:
lswe: The `log(abs(sum(weight * exp(x))))` reduced tensor.
sign: (Optional) The sign of `sum(weight * exp(x))`.
"""
with ops.name_scope(name, "reduce_weighted_logsumexp", [logx, w]):
logx = ops.convert_to_tensor(logx, name="logx")
if w is None:
lswe = math_ops.reduce_logsumexp(logx, axis=axis, keep_dims=keep_dims)
if return_sign:
sgn = array_ops.ones_like(lswe)
return lswe, sgn
return lswe
w = ops.convert_to_tensor(w, dtype=logx.dtype, name="w")
log_absw_x = logx + math_ops.log(math_ops.abs(w))
max_log_absw_x = math_ops.reduce_max(log_absw_x, axis=axis, keep_dims=True)
# If the largest element is `-inf` or `inf` then we don't bother subtracting
# off the max. We do this because otherwise we'd get `inf - inf = NaN`. That
# this is ok follows from the fact that we're actually free to subtract any
# value we like, so long as we add it back after taking the `log(sum(...))`.
max_log_absw_x = array_ops.where(
math_ops.is_inf(max_log_absw_x),
array_ops.zeros_like(max_log_absw_x),
max_log_absw_x)
wx_over_max_absw_x = (
math_ops.sign(w) * math_ops.exp(log_absw_x - max_log_absw_x))
sum_wx_over_max_absw_x = math_ops.reduce_sum(
wx_over_max_absw_x,
axis=axis,
keep_dims=keep_dims)
if not keep_dims:
max_log_absw_x = array_ops.squeeze(max_log_absw_x, axis)
sgn = math_ops.sign(sum_wx_over_max_absw_x)
lswe = max_log_absw_x + math_ops.log(sgn * sum_wx_over_max_absw_x)
if return_sign:
return lswe, sgn
return lswe
开发者ID:Kongsea,项目名称:tensorflow,代码行数:96,代码来源:util.py
示例19: _ComplexAbsGrad
def _ComplexAbsGrad(op, grad):
"""Returns the gradient of ComplexAbs."""
# TODO(b/27786104): The cast to complex could be removed once arithmetic
# supports mixtures of complex64 and real values.
return (math_ops.complex(grad, array_ops.zeros_like(grad)) * math_ops.sign(
op.inputs[0]))
开发者ID:neuroradiology,项目名称:tensorflow,代码行数:6,代码来源:math_grad.py
示例20: _BesselI0eGrad
def _BesselI0eGrad(op, grad):
"""Compute gradient of bessel_i0e(x) with respect to its argument."""
x = op.inputs[0]
y = op.outputs[0]
with ops.control_dependencies([grad]):
return grad * (math_ops.bessel_i1e(x) - math_ops.sign(x) * y)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:6,代码来源:math_grad.py
注:本文中的tensorflow.python.ops.math_ops.sign函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论