• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python math_ops.conj函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.math_ops.conj函数的典型用法代码示例。如果您正苦于以下问题:Python conj函数的具体用法?Python conj怎么用?Python conj使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了conj函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _ReciprocalGradGrad

def _ReciprocalGradGrad(op, grad):
  b = op.inputs[1]
  # op.output[0]: y = -b * conj(a)^2
  with ops.control_dependencies([grad]):
    ca = math_ops.conj(op.inputs[0])
    cg = math_ops.conj(grad)
    return cg * -2.0 * b * ca, gen_math_ops.reciprocal_grad(ca, grad)
开发者ID:neuroradiology,项目名称:tensorflow,代码行数:7,代码来源:math_grad.py


示例2: _SigmoidGradGrad

def _SigmoidGradGrad(op, grad):
  with ops.control_dependencies([grad.op]):
    a = math_ops.conj(op.inputs[0])
    b = math_ops.conj(op.inputs[1])
    gb = grad * b
    # pylint: disable=protected-access
    return gb - 2.0 * gb * a, gen_math_ops._sigmoid_grad(a, grad)
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:7,代码来源:math_grad.py


示例3: _ReciprocalGradGrad

def _ReciprocalGradGrad(op, grad):
  b = op.inputs[1]
  # op.output[0]: y = -b * conj(a)^2
  with ops.control_dependencies([grad.op]):
    ca = math_ops.conj(op.inputs[0])
    cg = math_ops.conj(grad)
    # pylint: disable=protected-access
    return cg * -2.0 * b * ca, gen_math_ops._reciprocal_grad(ca, grad)
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:8,代码来源:math_grad.py


示例4: _RsqrtGradGrad

def _RsqrtGradGrad(op, grad):
  """Returns backprop gradient for f(a,b) = -0.5 * b * conj(a)^3."""
  a = op.inputs[0]  # a = x^{-1/2}
  b = op.inputs[1]  # backprop gradient for a
  with ops.control_dependencies([grad]):
    ca = math_ops.conj(a)
    cg = math_ops.conj(grad)
    grad_a = -1.5 * cg * b * math_ops.square(ca)
    grad_b = gen_math_ops.rsqrt_grad(ca, grad)
    return grad_a, grad_b
开发者ID:neuroradiology,项目名称:tensorflow,代码行数:10,代码来源:math_grad.py


示例5: _DivGrad

def _DivGrad(op, grad):
  x = op.inputs[0]
  y = op.inputs[1]
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)  # pylint: disable=protected-access
  x = math_ops.conj(x)
  y = math_ops.conj(y)
  return (array_ops.reshape(math_ops.reduce_sum(grad / y, rx), sx),
          array_ops.reshape(math_ops.reduce_sum(grad *
                                         (-x / math_ops.square(y)), ry), sy))
开发者ID:marevol,项目名称:tensorflow,代码行数:11,代码来源:math_grad.py


示例6: _MulGrad

def _MulGrad(op, grad):
  """The gradient of scalar multiplication."""
  x = op.inputs[0]
  y = op.inputs[1]
  assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, " vs. ", y.dtype)
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
  x = math_ops.conj(x)
  y = math_ops.conj(y)
  return (array_ops.reshape(math_ops.reduce_sum(grad * y, rx), sx),
          array_ops.reshape(math_ops.reduce_sum(x * grad, ry), sy))
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:12,代码来源:math_grad.py


示例7: _FloorModGrad

def _FloorModGrad(op, grad):
  """Returns grad * (1, -floor(x/y))."""
  x = math_ops.conj(op.inputs[0])
  y = math_ops.conj(op.inputs[1])

  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
  floor_xy = math_ops.floor_div(x, y)
  gx = array_ops.reshape(math_ops.reduce_sum(grad, rx), sx)
  gy = array_ops.reshape(
      math_ops.reduce_sum(grad * math_ops.negative(floor_xy), ry), sy)
  return gx, gy
开发者ID:PuchatekwSzortach,项目名称:tensorflow,代码行数:13,代码来源:math_grad.py


示例8: _DivGrad

def _DivGrad(op, grad):
  """The gradient for the Div operator."""
  x = op.inputs[0]
  y = op.inputs[1]
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
  x = math_ops.conj(x)
  y = math_ops.conj(y)
  return (array_ops.reshape(math_ops.reduce_sum(math_ops.div(grad, y), rx), sx),
          array_ops.reshape(
              math_ops.reduce_sum(grad * math_ops.div(math_ops.div(-x, y), y),
                                  ry), sy))
开发者ID:PuchatekwSzortach,项目名称:tensorflow,代码行数:13,代码来源:math_grad.py


示例9: _MulGrad

def _MulGrad(op, grad):
  x = op.inputs[0]
  y = op.inputs[1]
  assert x.dtype.base_dtype == y.dtype.base_dtype, (x.dtype, " vs. ", y.dtype)
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
  if x.dtype.base_dtype == dtypes.complex64:
    return (array_ops.reshape(math_ops.reduce_sum(grad * math_ops.conj(y), rx), sx),
            array_ops.reshape(math_ops.reduce_sum(math_ops.conj(x) * grad, ry), sy))
  else:
    return (array_ops.reshape(math_ops.reduce_sum(grad * y, rx), sx),
            array_ops.reshape(math_ops.reduce_sum(x * grad, ry), sy))
开发者ID:TeMedy,项目名称:tensorflow,代码行数:13,代码来源:math_grad.py


示例10: _DivGrad

def _DivGrad(op, grad):
  """The gradient for the Div operator."""
  x = op.inputs[0]
  y = op.inputs[1]
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  # pylint: disable=protected-access
  rx, ry = gen_array_ops._broadcast_gradient_args(sx, sy)
  # pylint: enable=protected-access
  x = math_ops.conj(x)
  y = math_ops.conj(y)
  return (array_ops.reshape(math_ops.reduce_sum(math_ops.div(grad, y), rx), sx),
          array_ops.reshape(math_ops.reduce_sum(
              grad * math_ops.div(-x, math_ops.square(y)), ry), sy))
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:14,代码来源:math_grad.py


示例11: _DivNoNanGrad

def _DivNoNanGrad(op, grad):
  """DivNoNan op gradient."""
  x = op.inputs[0]
  y = op.inputs[1]
  sx = array_ops.shape(x)
  sy = array_ops.shape(y)
  rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
  x = math_ops.conj(x)
  y = math_ops.conj(y)
  return (array_ops.reshape(
      math_ops.reduce_sum(math_ops.div_no_nan(grad, y), rx), sx),
          array_ops.reshape(
              math_ops.reduce_sum(
                  grad * math_ops.div_no_nan(math_ops.div_no_nan(-x, y), y),
                  ry), sy))
开发者ID:AnishShah,项目名称:tensorflow,代码行数:15,代码来源:math_grad.py


示例12: _ZetaGrad

def _ZetaGrad(op, grad):
  """Returns gradient of zeta(x, q) with respect to x and q."""
  # TODO(tillahoffmann): Add derivative with respect to x
  x = op.inputs[0]
  q = op.inputs[1]
  # Broadcast gradients
  sx = array_ops.shape(x)
  sq = array_ops.shape(q)
  unused_rx, rq = gen_array_ops._broadcast_gradient_args(sx, sq)
  # Evaluate gradient
  with ops.control_dependencies([grad.op]):
    x = math_ops.conj(x)
    q = math_ops.conj(q)
    partial_q = -x * math_ops.zeta(x + 1, q)
    return (None,
            array_ops.reshape(math_ops.reduce_sum(partial_q * grad, rq), sq))
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:16,代码来源:math_grad.py


示例13: _TanhGrad

def _TanhGrad(op, grad):
  """Returns grad * (1 - tanh(x) * tanh(x))."""
  y = op.outputs[0]  # y = tanh(x)
  with ops.control_dependencies([grad.op]):
    y = math_ops.conj(y)
    # pylint: disable=protected-access
    return gen_math_ops._tanh_grad(y, grad)
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:7,代码来源:math_grad.py


示例14: _SquareGrad

def _SquareGrad(op, grad):
  x = op.inputs[0]
  # Added control dependencies to prevent 2*x from being computed too early.
  with ops.control_dependencies([grad]):
    x = math_ops.conj(x)
    y = constant_op.constant(2.0, dtype=x.dtype)
    return math_ops.multiply(grad, math_ops.multiply(x, y))
开发者ID:PuchatekwSzortach,项目名称:tensorflow,代码行数:7,代码来源:math_grad.py


示例15: _CosGrad

def _CosGrad(op, grad):
  """Returns grad * -sin(x)."""
  x = op.inputs[0]
  with ops.control_dependencies([grad.op]):
    if x.dtype.is_complex:
      x = math_ops.conj(x)
    return -grad * math_ops.sin(x)
开发者ID:0ruben,项目名称:tensorflow,代码行数:7,代码来源:math_grad.py


示例16: _SigmoidGrad

def _SigmoidGrad(op, grad):
  """Returns grad * sigmoid(x) * (1 - sigmoid(x))."""
  y = op.outputs[0]  # y = sigmoid(x)
  with ops.control_dependencies([grad.op]):
    if y.dtype.is_complex:
      y = math_ops.conj(y)
    return grad * (y * (1 - y))
开发者ID:0ruben,项目名称:tensorflow,代码行数:7,代码来源:math_grad.py


示例17: _TanhGrad

def _TanhGrad(op, grad):
  """Returns grad * (1 - tanh(x) * tanh(x))."""
  y = op.outputs[0]  # y = tanh(x)
  with ops.control_dependencies([grad.op]):
    if y.dtype.is_complex:
      y = math_ops.conj(y)
    return grad * (1 - math_ops.square(y))
开发者ID:0ruben,项目名称:tensorflow,代码行数:7,代码来源:math_grad.py


示例18: _ExpGrad

def _ExpGrad(op, grad):
  """Returns grad * exp(x)."""
  y = op.outputs[0]  # y = e^x
  with ops.control_dependencies([grad.op]):
    if y.dtype.is_complex:
      y = math_ops.conj(y)
    return grad * y
开发者ID:0ruben,项目名称:tensorflow,代码行数:7,代码来源:math_grad.py


示例19: _PolygammaGrad

def _PolygammaGrad(op, grad):
  """Returns gradient of psi(n, x) with respect to n and x."""
  # TODO(tillahoffmann): Add derivative with respect to n
  n = op.inputs[0]
  x = op.inputs[1]
  # Broadcast gradients
  sn = array_ops.shape(n)
  sx = array_ops.shape(x)
  unused_rn, rx = gen_array_ops._broadcast_gradient_args(sn, sx)
  # Evaluate gradient
  with ops.control_dependencies([grad.op]):
    n = math_ops.conj(n)
    x = math_ops.conj(x)
    partial_x = math_ops.polygamma(n + 1, x)
    return (None,
            array_ops.reshape(math_ops.reduce_sum(partial_x * grad, rx), sx))
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:16,代码来源:math_grad.py


示例20: _SigmoidGrad

def _SigmoidGrad(op, grad):
  """Returns grad * sigmoid(x) * (1 - sigmoid(x))."""
  y = op.outputs[0]  # y = sigmoid(x)
  with ops.control_dependencies([grad.op]):
    y = math_ops.conj(y)
    # pylint: disable=protected-access
    return gen_math_ops._sigmoid_grad(y, grad)
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:7,代码来源:math_grad.py



注:本文中的tensorflow.python.ops.math_ops.conj函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python math_ops.cos函数代码示例发布时间:2022-05-27
下一篇:
Python math_ops.complex函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap