• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python math_ops.abs函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.math_ops.abs函数的典型用法代码示例。如果您正苦于以下问题:Python abs函数的具体用法?Python abs怎么用?Python abs使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了abs函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _ForUsingWhile

def _ForUsingWhile(start,
                   limit,
                   delta,
                   inputs,
                   forbody,
                   name=None,
                   hostmem=None):
  """Helper to implement a For loop using a While."""
  # To support negative delta (e.g., range(100, 0, -3)), we iterate
  # over the range(n) and use iter * delta + start as the real
  # iteration index. (e.g., for i in range(34): iter = i * (-3) +
  # 100).
  d = math_ops.abs(delta)
  # XLA on TPUs doesn't support integer division
  n = math_ops.cast(
      math_ops.cast((math_ops.abs(limit - start) + d - 1), dtypes.float32) /
      math_ops.cast(d, dtypes.float32), dtypes.int32)

  # Carried loop variables ("extra_args") are implicitly added to the input list
  # of the WhileBody function. WhileCond does not call forbody, and so does not
  # depend on any of forbody's extra_args. Since WhileCond and WhileBody
  # must have identical inputs, we have to augment the cond signature to take
  # the same types as the carried loop variables.
  body_sig = [dtypes.int32] * 4 + list(forbody.declared_input_types)[1:]

  cond_name = "%s_Cond" % forbody.name

  @function.Defun(*body_sig, func_name=cond_name)
  def WhileCond(i, n, *args):
    del args
    return i < n

  body_name = "%s_Body" % forbody.name

  @function.Defun(*body_sig, func_name=body_name)
  def WhileBody(i, n, start, delta, *args):
    """A While wrapper for forbody that handles loop-carried captured inputs."""
    for_result = forbody(start + i * delta, *args)
    # Nullary functions return an Operation. Normal functions can't do this
    # because their return values are converted to Tensors.
    if isinstance(for_result, ops.Operation):
      for_result = ()
    # Unary functions return a single Tensor value.
    elif isinstance(for_result, ops.Tensor):
      for_result = (for_result,)
    return (i + 1, n, start, delta) + tuple(for_result)

  if hostmem is not None:
    hostmem = [0, 1, 2, 3] + [(4 + _) for _ in hostmem]
  else:
    hostmem = [0, 1, 2, 3]

  results = While(
      input_=[0, n, start, delta] + inputs,
      cond=WhileCond,
      body=WhileBody,
      name=name,
      hostmem=hostmem)
  # Slice off the loop-carried captured inputs.
  return list(results[4:len(results)])
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:60,代码来源:functional_ops.py


示例2: GetParams

 def GetParams(self):
   """Test for rank 2 input in TF-TRT."""
   input_names = ["input", "input2"]
   # Two paths: first with rank 2 input, second with rank 4 input.
   input_dims = [[12, 5], [12, 5, 2, 2]]
   output_name = "output"
   g = ops.Graph()
   with g.as_default():
     outputs = []
     for i in range(2):
       x = array_ops.placeholder(
           dtype=dtypes.float32, shape=input_dims[i], name=input_names[i])
       c = constant_op.constant(1.0, name="c%d_1" % i)
       q = math_ops.add(x, c, name="add%d_1" % i)
       q = math_ops.abs(q, name="abs%d_1" % i)
       c = constant_op.constant(2.2, name="c%d_2" % i)
       q = math_ops.add(q, c, name="add%d_2" % i)
       q = math_ops.abs(q, name="abs%d_2" % i)
       c = constant_op.constant(3.0, name="c%d_3" % i)
       q = math_ops.add(q, c, name="add%d_3" % i)
       if i == 0:
         for j in range(2):
           q = array_ops.expand_dims(q, -1, name="expand%d_%d" % (i, j))
       q = gen_math_ops.reciprocal(q, name="reciprocal%d" % i)
       outputs.append(q)
     # Combine both paths
     q = math_ops.add(outputs[0], outputs[1], name="add")
     array_ops.squeeze(q, name=output_name)
   return trt_test.TfTrtIntegrationTestParams(
       gdef=g.as_graph_def(),
       input_names=input_names,
       input_dims=input_dims,
       output_names=[output_name],
       expected_output_dims=[tuple(input_dims[1])])
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:34,代码来源:rank_two_test.py


示例3: _assert_close

def _assert_close(expected, actual, rtol=1e-04, name='assert_close'):
  with ops.name_scope(name, 'assert_close', (expected, actual, rtol)) as scope:
    expected = ops.convert_to_tensor(expected, name='expected')
    actual = ops.convert_to_tensor(actual, name='actual')
    rdiff = math_ops.abs(expected - actual, 'diff') / math_ops.abs(expected)
    rtol = ops.convert_to_tensor(rtol, name='rtol')
    return check_ops.assert_less(
        rdiff,
        rtol,
        data=('Condition expected =~ actual did not hold element-wise:'
              'expected = ', expected, 'actual = ', actual, 'rdiff = ', rdiff,
              'rtol = ', rtol,),
        name=scope)
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:13,代码来源:rnn_test.py


示例4: _apply_sparse_shared

 def _apply_sparse_shared(self, grad, var, indices,
                          scatter_add, scatter_update):
   beta1_power = self._get_beta_accumulators()
   beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
   lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
   beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
   beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
   epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
   # m_t = beta1 * m + (1 - beta1) * g_t
   m = self.get_slot(var, "m")
   m_slice = array_ops.gather(m, indices)
   m_t_slice = m_slice * beta1_t + grad * (1 - beta1_t)
   with ops.control_dependencies([m_t_slice]):
     m_t = scatter_update(m, indices, m_t_slice)
   # u_t = max(beta2 * u, abs(g_t))
   v = self.get_slot(var, "v")
   v_slice = array_ops.gather(v, indices)
   v_t_slice = math_ops.maximum(v_slice * beta2_t, math_ops.abs(grad))
   with ops.control_dependencies([v_t_slice]):
     v_t = scatter_update(v, indices, v_t_slice)
   # theta_t = theta - lr / (1 - beta1^t) * m_t / u_t
   var_slice = -lr_t / (1 - beta1_power) * (m_t_slice /
                                            (v_t_slice + epsilon_t))
   with ops.control_dependencies([var_slice]):
     var_update = scatter_add(var, indices, var_slice)
   return control_flow_ops.group(*[var_update, m_t, v_t])
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:26,代码来源:adamax.py


示例5: absolute_loss

def absolute_loss(predicted, target, name=None):
  """Computes and returns the per-example absolute loss.

  Computes the per-example absolute value of the difference between
  the target and predicted tensors. The tensors must have the same
  shape.

  Args:
    predicted: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]`
      of predicted values.
    target: A `Tensor` of shape `[batch_size, dim_1, ..., dim_n]` of
      target values. The shape of the target tensor should match the
      `predicted` tensor.
    name: A name for the operation (optional).

  Returns:
    A `[batch_size, dim_1, ..., dim_n]` tensor of per-example absolute losses.

  Raises:
    ValueError: If `predicted` and `target` shapes do not match.

  """
  with ops.op_scope([predicted, target], name, "absolute_loss") as scope:
    predicted = ops.convert_to_tensor(predicted, name="predicted")
    target = ops.convert_to_tensor(target, name="target")
    predicted.get_shape().assert_is_compatible_with(target.get_shape())
    return math_ops.abs(target - predicted, name=scope)
开发者ID:13683116633,项目名称:tensorflow,代码行数:27,代码来源:loss_ops.py


示例6: __call__

  def __call__(self, shape, dtype=None, partition_info=None):
    if dtype is None:
      dtype = self.dtype
    # Check the shape
    if len(shape) < 2:
      raise ValueError("The tensor to initialize must be "
                       "at least two-dimensional")
    # Flatten the input shape with the last dimension remaining
    # its original shape so it works for conv2d
    num_rows = 1
    for dim in shape[:-1]:
      num_rows *= dim
    num_cols = shape[-1]
    flat_shape = (num_rows, num_cols)

    # Generate a random matrix
    a = random_ops.random_normal(flat_shape, dtype=dtype, seed=self.seed)
    # Compute the qr factorization
    q, r = linalg_ops.qr(a, full_matrices=False)
    # Make Q uniform
    square_len = math_ops.minimum(num_rows, num_cols)
    d = array_ops.diag_part(r[:square_len, :square_len])
    ph = d / math_ops.abs(d)
    q *= ph
    # Pad zeros to Q (if rows smaller than cols)
    if num_rows < num_cols:
      padding = array_ops.zeros([num_rows, num_cols - num_rows], dtype=dtype)
      q = array_ops.concat([q, padding], 1)
    return self.gain * array_ops.reshape(q, shape)
开发者ID:finardi,项目名称:tensorflow,代码行数:29,代码来源:init_ops.py


示例7: _resource_apply_sparse

  def _resource_apply_sparse(self, grad, var, indices):
    var_dtype = var.dtype.base_dtype
    lr_t = self._decayed_lr(var_dtype)

    beta_1_t = self._get_hyper('beta_1', var_dtype)
    beta_2_t = self._get_hyper('beta_2', var_dtype)
    local_step = math_ops.cast(self.iterations + 1, var_dtype)
    beta_1_power = math_ops.pow(beta_1_t, local_step)
    epsilon_t = self._get_hyper('epsilon', var_dtype)

    # m_t = beta1 * m + (1 - beta1) * g_t
    m = self.get_slot(var, 'm')
    m_slice = array_ops.gather(m, indices)
    m_t_slice = m_slice * beta_1_t + grad * (1 - beta_1_t)
    with ops.control_dependencies([m_t_slice]):
      m_t = self._resource_scatter_update(m, indices, m_t_slice)

    # u_t = max(beta2 * u, abs(g_t))
    v = self.get_slot(var, 'v')
    v_slice = array_ops.gather(v, indices)
    v_t_slice = math_ops.maximum(v_slice * beta_2_t, math_ops.abs(grad))
    with ops.control_dependencies([v_t_slice]):
      v_t = self._resource_scatter_update(v, indices, v_t_slice)
    # theta_t = theta - lr / (1 - beta1^t) * m_t / u_t
    var_slice = -lr_t / (1 - beta_1_power) * (
        m_t_slice / (v_t_slice + epsilon_t))
    with ops.control_dependencies([var_slice]):
      var_update = self._resource_scatter_add(var, indices, var_slice)
    return control_flow_ops.group(*[var_update, m_t, v_t])
开发者ID:aeverall,项目名称:tensorflow,代码行数:29,代码来源:adamax.py


示例8: assert_close

def assert_close(
    x, y, data=None, summarize=None, message=None, name="assert_close"):
  """Assert that that x and y are within machine epsilon of each other.

  Args:
    x: Numeric `Tensor`
    y: Numeric `Tensor`
    data: The tensors to print out if the condition is `False`. Defaults to
      error message and first few entries of `x` and `y`.
    summarize: Print this many entries of each tensor.
    message: A string to prefix to the default message.
    name: A name for this operation (optional).

  Returns:
    Op raising `InvalidArgumentError` if |x - y| > machine epsilon.
  """
  message = message or ""
  x = ops.convert_to_tensor(x, name="x")
  y = ops.convert_to_tensor(y, name="y")

  if x.dtype.is_integer:
    return check_ops.assert_equal(
        x, y, data=data, summarize=summarize, message=message, name=name)

  with ops.name_scope(name, "assert_close", [x, y, data]):
    tol = np.finfo(x.dtype.as_numpy_dtype).resolution
    if data is None:
      data = [
          message,
          "Condition x ~= y did not hold element-wise: x = ", x.name, x, "y = ",
          y.name, y
      ]
    condition = math_ops.reduce_all(math_ops.less_equal(math_ops.abs(x-y), tol))
    return control_flow_ops.Assert(
        condition, data, summarize=summarize)
开发者ID:Nishant23,项目名称:tensorflow,代码行数:35,代码来源:distribution_util.py


示例9: exact_laplacian_kernel

def exact_laplacian_kernel(x, y, stddev):
  """Computes exact Laplacian kernel value(s) for tensors x and y using stddev.

  The Laplacian kernel for vectors u, v is defined as follows:
       K(u, v) = exp(-||u-v|| / stddev)
  where the norm is the l1-norm. x, y can be either vectors or matrices. If they
  are vectors, they must have the same dimension. If they are matrices, they
  must have the same number of columns. In the latter case, the method returns
  (as a matrix) K(u, v) values for all pairs (u, v) where u is a row from x and
  v is a row from y.

  Args:
    x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim].
    y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim].
    stddev: The width of the Gaussian kernel.

  Returns:
    A single value (scalar) with shape (1, 1)  if x, y are vectors or a matrix
    of shape (m, n) with entries K(u, v) (where K is the Laplacian kernel) for
    all (u,v) pairs where u, v are rows from x and y respectively.

  Raises:
    InvalidShapeError: if the shapes of x, y are not compatible.
  """
  x_aligned, y_aligned = _align_matrices(x, y)
  diff_l1_norm = math_ops.reduce_sum(
      math_ops.abs(math_ops.subtract(x_aligned, y_aligned)), 2)
  return math_ops.exp(-diff_l1_norm / stddev)
开发者ID:rmlarsen,项目名称:tensorflow,代码行数:28,代码来源:kernelized_utils.py


示例10: __call__

 def __call__(self, x):
   regularization = 0.
   if self.l1:
     regularization += math_ops.reduce_sum(self.l1 * math_ops.abs(x))
   if self.l2:
     regularization += math_ops.reduce_sum(self.l2 * math_ops.square(x))
   return regularization
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:7,代码来源:regularizers.py


示例11: testChi2WithAbsDf

 def testChi2WithAbsDf(self):
   with self.cached_session():
     df_v = np.array([-1.3, -3.2, 5], dtype=np.float64)
     chi2 = chi2_lib.Chi2WithAbsDf(df=df_v)
     self.assertAllClose(
         math_ops.floor(math_ops.abs(df_v)).eval(),
         chi2.df.eval())
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:7,代码来源:chi2_test.py


示例12: __call__

  def __call__(self, shape, dtype=None, partition_info=None):
    if dtype is None:
      dtype = self.dtype
    # Check the shape
    if len(shape) < 2:
      raise ValueError("The tensor to initialize must be "
                       "at least two-dimensional")
    # Flatten the input shape with the last dimension remaining
    # its original shape so it works for conv2d
    num_rows = 1
    for dim in shape[:-1]:
      num_rows *= dim
    num_cols = shape[-1]
    flat_shape = (num_cols, num_rows) if num_rows < num_cols else (num_rows,
                                                                   num_cols)

    # Generate a random matrix
    a = random_ops.random_normal(flat_shape, dtype=dtype, seed=self.seed)
    # Compute the qr factorization
    q, r = linalg_ops.qr(a, full_matrices=False)
    # Make Q uniform
    d = array_ops.diag_part(r)
    ph = d / math_ops.abs(d)
    q *= ph
    if num_rows < num_cols:
      q = array_ops.matrix_transpose(q)
    return self.gain * array_ops.reshape(q, shape)
开发者ID:Lin-jipeng,项目名称:tensorflow,代码行数:27,代码来源:init_ops.py


示例13: test_bad_kernel_approximation

  def test_bad_kernel_approximation(self, initializer, scale, exact_kernel_fn):
    """Approximation is bad when output dimension is small."""
    # Two distinct inputs.
    x = constant_op.constant([[1.0, -1.0, 0.5]])
    y = constant_op.constant([[-1.0, 1.0, 1.0]])

    small_output_dim = 10
    random_seed.set_random_seed(1234)
    # Initialize layer.
    rff_layer = kernel_layers.RandomFourierFeatures(
        output_dim=small_output_dim,
        kernel_initializer=initializer,
        scale=scale,
        name='random_fourier_features')

    # Apply layer to both inputs.
    output_x = math.sqrt(2.0 / small_output_dim) * rff_layer.apply(x)
    output_y = math.sqrt(2.0 / small_output_dim) * rff_layer.apply(y)

    # The inner products of the outputs (on inputs x and y) approximates the
    # real value of the RBF kernel but poorly since the output dimension of the
    # layer is small.
    exact_kernel_value = exact_kernel_fn(x, y)
    approx_kernel_value = kernelized_utils.inner_product(output_x, output_y)
    abs_error = math_ops.abs(exact_kernel_value - approx_kernel_value)
    if not context.executing_eagerly():
      with self.cached_session() as sess:
        keras_backend._initialize_variables(sess)
        abs_error_eval = sess.run([abs_error])
        self.assertGreater(abs_error_eval[0][0], 0.05)
        self.assertLess(abs_error_eval[0][0], 0.5)
    else:
      self.assertGreater(abs_error, 0.05)
      self.assertLess(abs_error, 0.5)
开发者ID:rmlarsen,项目名称:tensorflow,代码行数:34,代码来源:kernelized_test.py


示例14: huber_loss

def huber_loss(labels, predictions, weights=1.0, delta=1.0, scope=None,
               loss_collection=ops.GraphKeys.LOSSES,
               reduction=Reduction.WEIGHTED_SUM_BY_NONZERO_WEIGHTS):
  """Adds a Huber Loss term to the training procedure.

  For each value x in `error=labels-predictions`, the following is calculated:

  ```
    0.5 * x^2                  if |x| <= d
    0.5 * d^2 + d * (|x| - d)  if |x| > d
  ```

  where d is `delta`.

  See: https://en.wikipedia.org/wiki/Huber_loss

  `weights` acts as a coefficient for the loss. If a scalar is provided, then
  the loss is simply scaled by the given value. If `weights` is a tensor of size
  [batch_size], then the total loss for each sample of the batch is rescaled
  by the corresponding element in the `weights` vector. If the shape of
  `weights` matches the shape of `predictions`, then the loss of each
  measurable element of `predictions` is scaled by the corresponding value of
  `weights`.

  Args:
    labels: The ground truth output tensor, same dimensions as 'predictions'.
    predictions: The predicted outputs.
    weights: Optional `Tensor` whose rank is either 0, or the same rank as
      `labels`, and must be broadcastable to `labels` (i.e., all dimensions must
      be either `1`, or the same as the corresponding `losses` dimension).
    delta: `float`, the point where the huber loss function
      changes from a quadratic to linear.
    scope: The scope for the operations performed in computing the loss.
    loss_collection: collection to which the loss will be added.
    reduction: Type of reduction to apply to loss.

  Returns:
    A scalar `Tensor` that returns the weighted loss.

  Raises:
    ValueError: If the shape of `predictions` doesn't match that of `labels` or
      if the shape of `weights` is invalid.
  """
  with ops.name_scope(scope, "huber_loss",
                      (predictions, labels, weights)) as scope:
    predictions = math_ops.to_float(predictions)
    labels = math_ops.to_float(labels)
    predictions.get_shape().assert_is_compatible_with(labels.get_shape())
    error = math_ops.subtract(predictions, labels)
    abs_error = math_ops.abs(error)
    quadratic = math_ops.minimum(abs_error, delta)
    # The following expression is the same in value as
    # tf.maximum(abs_error - delta, 0), but importantly the gradient for the
    # expression when abs_error == delta is 0 (for tf.maximum it would be 1).
    # This is necessary to avoid doubling the gradient, since there is already a
    # nonzero contribution to the gradient from the quadratic term.
    linear = (abs_error - quadratic)
    losses = 0.5 * quadratic**2 + delta * linear
    return compute_weighted_loss(
        losses, weights, scope, loss_collection, reduction=reduction)
开发者ID:LUTAN,项目名称:tensorflow,代码行数:60,代码来源:losses_impl.py


示例15: inner_loss

    def inner_loss(y_true, y_pred):
        delta = math_ops.abs(math_ops.subtract(y_pred, y_true))
        losses = math_ops.square(delta)
        if clip > 0.0:
            losses = tf.where(delta < clip, 0.5 * losses, delta - 0.5)

        return losses
开发者ID:AlexMikhalev,项目名称:polyaxon,代码行数:7,代码来源:losses.py


示例16: _sliced_wasserstein

def _sliced_wasserstein(a, b, random_sampling_count, random_projection_dim):
  """Compute the approximate sliced Wasserstein distance.

  Args:
      a: (matrix) Distribution "a" of samples (row, col).
      b: (matrix) Distribution "b" of samples (row, col).
      random_sampling_count: (int) Number of random projections to average.
      random_projection_dim: (int) Dimension of the random projection space.
  Returns:
      Float containing the approximate distance between "a" and "b".
  """
  s = array_ops.shape(a)
  means = []
  for _ in range(random_sampling_count):
    # Random projection matrix.
    proj = random_ops.random_normal(
        [array_ops.shape(a)[1], random_projection_dim])
    proj *= math_ops.rsqrt(
        math_ops.reduce_sum(math_ops.square(proj), 0, keepdims=True))
    # Project both distributions and sort them.
    proj_a = math_ops.matmul(a, proj)
    proj_b = math_ops.matmul(b, proj)
    proj_a = _sort_rows(proj_a, s[0])
    proj_b = _sort_rows(proj_b, s[0])
    # Pairwise Wasserstein distance.
    wdist = math_ops.reduce_mean(math_ops.abs(proj_a - proj_b))
    means.append(wdist)
  return math_ops.reduce_mean(means)
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:28,代码来源:sliced_wasserstein_impl.py


示例17: huber_loss

def huber_loss(y_true, y_pred, delta=1.0):
  """Computes Huber loss value.

  For each value x in `error=y_true-y_pred`, the following is calculated:

  ```
  0.5 * x^2                  if |x| <= d
  0.5 * d^2 + d * (|x| - d)  if |x| > d
  ```
  where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss

  Args:
    y_true: tensor of true targets.
    y_pred: tensor of predicted targets.
    delta: A float, the point where the Huber loss function changes from a
      quadratic to linear.

  Returns:
    Tensor with one scalar loss entry per sample.
  """
  y_pred = math_ops.cast(y_pred, dtype=K.floatx())
  y_true = math_ops.cast(y_true, dtype=K.floatx())
  error = math_ops.subtract(y_pred, y_true)
  abs_error = math_ops.abs(error)
  quadratic = math_ops.minimum(abs_error, delta)
  linear = math_ops.subtract(abs_error, quadratic)
  return math_ops.add(
      math_ops.multiply(
          ops.convert_to_tensor(0.5, dtype=quadratic.dtype),
          math_ops.multiply(quadratic, quadratic)),
      math_ops.multiply(delta, linear))
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:31,代码来源:losses.py


示例18: _compute_stft_gradient

 def _compute_stft_gradient(signal, frame_length=32, frame_step=16,
                            fft_length=32):
   """Computes the gradient of the STFT with respect to `signal`."""
   stft = spectral_ops.stft(signal, frame_length, frame_step, fft_length)
   magnitude_stft = math_ops.abs(stft)
   loss = math_ops.reduce_sum(magnitude_stft)
   return gradients_impl.gradients([loss], [signal])[0]
开发者ID:1000sprites,项目名称:tensorflow,代码行数:7,代码来源:spectral_ops_test.py


示例19: sample_n

  def sample_n(self, n, seed=None, name="sample_n"):
    """Sample `n` observations from the Laplace Distributions.

    Args:
      n: `Scalar`, type int32, the number of observations to sample.
      seed: Python integer, the random seed.
      name: The name to give this op.

    Returns:
      samples: `[n, ...]`, a `Tensor` of `n` samples for each
        of the distributions determined by broadcasting the parameters.
    """
    with ops.name_scope(self.name):
      with ops.name_scope(name, values=[self._loc, self._scale, n]):
        n = ops.convert_to_tensor(n)
        n_val = tensor_util.constant_value(n)
        shape = array_ops.concat(0, ([n], self.batch_shape()))
        # Sample uniformly-at-random from the open-interval (-1, 1).
        uniform_samples = random_ops.random_uniform(
            shape=shape,
            minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
                                self.dtype.as_numpy_dtype(0.)),
            maxval=self.dtype.as_numpy_dtype(1.),
            dtype=self.dtype,
            seed=seed)

        # Provide some hints to shape inference
        inferred_shape = tensor_shape.vector(n_val).concatenate(
            self.get_batch_shape())
        uniform_samples.set_shape(inferred_shape)

        return (self._loc - self._scale * math_ops.sign(uniform_samples) *
                math_ops.log(1. - math_ops.abs(uniform_samples)))
开发者ID:alephman,项目名称:Tensorflow,代码行数:33,代码来源:laplace.py


示例20: _prepare

 def _prepare(self, grads_and_vars):
   """"""
   
   if self._lr is None:
     sTy = 0
     sTs = 0
     yTy = 0
     for g_t, x_tm1 in grads_and_vars:
       if g_t is None:
         continue
       with ops.name_scope('update_' + x_tm1.op.name), ops.device(x_tm1.device):
         if isinstance(g_t, ops.Tensor):
           g_tm1 = self.get_slot(x_tm1, 'g')
           s_tm1 = self.get_slot(x_tm1, 's')
           y_t = (g_t-g_tm1)
           sTy += math_ops.reduce_sum(s_tm1*y_t)
           sTs += math_ops.reduce_sum(s_tm1**2)
           yTy += math_ops.reduce_sum(y_t**2)
         else:
           idxs, idxs_ = array_ops.unique(g_t.indices)
           g_t_ = math_ops.unsorted_segment_sum(g_t.values, idxs_, array_ops.size(idxs))
           g_tm1 = self.get_slot(x_tm1, 'g')
           g_tm1_ = array_ops.gather(g_tm1, idxs)
           s_tm1 = self.get_slot(x_tm1, 's')
           s_tm1_ = array_ops.gather(s_tm1, idxs)
           y_t_ = (g_t_-g_tm1_)
           sTy += math_ops.reduce_sum(s_tm1_*y_t_)
           sTs += math_ops.reduce_sum(s_tm1_**2)
           yTy += math_ops.reduce_sum(y_t_**2)
     sTy = math_ops.abs(sTy)
     self._lr = sTs / (sTy + self._eps)
开发者ID:tdozat,项目名称:Optimization,代码行数:31,代码来源:optimizers.py



注:本文中的tensorflow.python.ops.math_ops.abs函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python math_ops.accumulate_n函数代码示例发布时间:2022-05-27
下一篇:
Python math_ops._as_indexed_slices_list函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap