• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python nn_ops.softplus函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.nn_ops.softplus函数的典型用法代码示例。如果您正苦于以下问题:Python softplus函数的具体用法?Python softplus怎么用?Python softplus使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了softplus函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testGammaWithSoftplusAlphaBeta

 def testGammaWithSoftplusAlphaBeta(self):
   with self.test_session():
     alpha_v = constant_op.constant([0.0, -2.1], name="alpha")
     beta_v = constant_op.constant([1.0, -3.6], name="beta")
     gamma = gamma_lib.GammaWithSoftplusAlphaBeta(alpha=alpha_v, beta=beta_v)
     self.assertAllEqual(nn_ops.softplus(alpha_v).eval(), gamma.alpha.eval())
     self.assertAllEqual(nn_ops.softplus(beta_v).eval(), gamma.beta.eval())
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:7,代码来源:gamma_test.py


示例2: testBetaWithSoftplusConcentration

 def testBetaWithSoftplusConcentration(self):
   a, b = -4.2, -9.1
   dist = beta_lib.BetaWithSoftplusConcentration(a, b)
   self.assertAllClose(
       self.evaluate(nn_ops.softplus(a)), self.evaluate(dist.concentration1))
   self.assertAllClose(
       self.evaluate(nn_ops.softplus(b)), self.evaluate(dist.concentration0))
开发者ID:AnishShah,项目名称:tensorflow,代码行数:7,代码来源:beta_test.py


示例3: testInverseGammaWithSoftplusAlphaBeta

 def testInverseGammaWithSoftplusAlphaBeta(self):
   with self.test_session():
     alpha = constant_op.constant([-0.1, -2.9], name="alpha")
     beta = constant_op.constant([1.0, -4.8], name="beta")
     inv_gamma = inverse_gamma.InverseGammaWithSoftplusAlphaBeta(
         alpha=alpha, beta=beta, validate_args=True)
     self.assertAllClose(nn_ops.softplus(alpha).eval(), inv_gamma.alpha.eval())
     self.assertAllClose(nn_ops.softplus(beta).eval(), inv_gamma.beta.eval())
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:8,代码来源:inverse_gamma_test.py


示例4: testGammaWithSoftplusConcentrationRate

 def testGammaWithSoftplusConcentrationRate(self):
   with self.test_session():
     alpha_v = constant_op.constant([0.0, -2.1], name="alpha")
     beta_v = constant_op.constant([1.0, -3.6], name="beta")
     gamma = gamma_lib.GammaWithSoftplusConcentrationRate(
         concentration=alpha_v, rate=beta_v)
     self.assertAllEqual(nn_ops.softplus(alpha_v).eval(),
                         gamma.concentration.eval())
     self.assertAllEqual(nn_ops.softplus(beta_v).eval(),
                         gamma.rate.eval())
开发者ID:jzuern,项目名称:tensorflow,代码行数:10,代码来源:gamma_test.py


示例5: testGammaWithSoftplusConcentrationRate

 def testGammaWithSoftplusConcentrationRate(self):
   alpha_v = constant_op.constant([0.0, -2.1], name="alpha")
   beta_v = constant_op.constant([1.0, -3.6], name="beta")
   gamma = gamma_lib.GammaWithSoftplusConcentrationRate(
       concentration=alpha_v, rate=beta_v)
   self.assertAllEqual(
       self.evaluate(nn_ops.softplus(alpha_v)),
       self.evaluate(gamma.concentration))
   self.assertAllEqual(
       self.evaluate(nn_ops.softplus(beta_v)), self.evaluate(gamma.rate))
开发者ID:AnishShah,项目名称:tensorflow,代码行数:10,代码来源:gamma_test.py


示例6: testInverseGammaWithSoftplusConcentrationRate

 def testInverseGammaWithSoftplusConcentrationRate(self):
   with self.cached_session():
     alpha = constant_op.constant([-0.1, -2.9], name="alpha")
     beta = constant_op.constant([1.0, -4.8], name="beta")
     inv_gamma = inverse_gamma.InverseGammaWithSoftplusConcentrationRate(
         concentration=alpha, rate=beta, validate_args=True)
     self.assertAllClose(nn_ops.softplus(alpha).eval(),
                         inv_gamma.concentration.eval())
     self.assertAllClose(nn_ops.softplus(beta).eval(),
                         inv_gamma.rate.eval())
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:10,代码来源:inverse_gamma_test.py


示例7: testNormalWithSoftplusScale

 def testNormalWithSoftplusScale(self):
   mu = array_ops.zeros((10, 3))
   rho = array_ops.ones((10, 3)) * -2.
   normal = normal_lib.NormalWithSoftplusScale(loc=mu, scale=rho)
   self.assertAllEqual(self.evaluate(mu), self.evaluate(normal.loc))
   self.assertAllEqual(
       self.evaluate(nn_ops.softplus(rho)), self.evaluate(normal.scale))
开发者ID:AnishShah,项目名称:tensorflow,代码行数:7,代码来源:normal_test.py


示例8: testNormalWithSoftplusScale

 def testNormalWithSoftplusScale(self):
   with self.test_session():
     mu = array_ops.zeros((10, 3))
     rho = array_ops.ones((10, 3)) * -2.
     normal = normal_lib.NormalWithSoftplusScale(loc=mu, scale=rho)
     self.assertAllEqual(mu.eval(), normal.loc.eval())
     self.assertAllEqual(nn_ops.softplus(rho).eval(), normal.scale.eval())
开发者ID:Immexxx,项目名称:tensorflow,代码行数:7,代码来源:normal_test.py


示例9: _testSoftplus

  def _testSoftplus(self, np_features, use_gpu=False):
    np_features = np.asarray(np_features)
    np_softplus = self._npSoftplus(np_features)
    with self.test_session(use_gpu=use_gpu) as sess:
      softplus = nn_ops.softplus(np_features)
      softplus_inverse = distribution_util.softplus_inverse(softplus)
      [tf_softplus, tf_softplus_inverse] = sess.run([
          softplus, softplus_inverse])
    self.assertAllCloseAccordingToType(np_softplus, tf_softplus)
    rtol = {"float16": 0.07, "float32": 0.003, "float64": 0.002}.get(
        str(np_features.dtype), 1e-6)
    # This will test that we correctly computed the inverse by verifying we
    # recovered the original input.
    self.assertAllCloseAccordingToType(
        np_features, tf_softplus_inverse,
        atol=0., rtol=rtol)
    self.assertAllEqual(np.ones_like(tf_softplus).astype(np.bool),
                        tf_softplus > 0)

    self.assertShapeEqual(np_softplus, softplus)
    self.assertShapeEqual(np_softplus, softplus_inverse)

    self.assertAllEqual(np.ones_like(tf_softplus).astype(np.bool),
                        np.isfinite(tf_softplus))
    self.assertAllEqual(np.ones_like(tf_softplus_inverse).astype(np.bool),
                        np.isfinite(tf_softplus_inverse))
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:26,代码来源:distribution_util_test.py


示例10: testNormalWithSoftplusSigma

 def testNormalWithSoftplusSigma(self):
   with self.test_session():
     mu = array_ops.zeros((10, 3))
     rho = array_ops.ones((10, 3)) * -2.
     normal = normal_lib.NormalWithSoftplusSigma(mu=mu, sigma=rho)
     self.assertAllEqual(mu.eval(), normal.mu.eval())
     self.assertAllEqual(nn_ops.softplus(rho).eval(), normal.sigma.eval())
开发者ID:ivankreso,项目名称:tensorflow,代码行数:7,代码来源:normal_test.py


示例11: testLaplaceWithSoftplusScale

 def testLaplaceWithSoftplusScale(self):
   with self.test_session():
     loc_v = constant_op.constant([0.0, 1.0], name="loc")
     scale_v = constant_op.constant([-1.0, 2.0], name="scale")
     laplace = laplace_lib.LaplaceWithSoftplusScale(loc=loc_v, scale=scale_v)
     self.assertAllClose(nn_ops.softplus(scale_v).eval(), laplace.scale.eval())
     self.assertAllClose(loc_v.eval(), laplace.loc.eval())
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:7,代码来源:laplace_test.py


示例12: testLaplaceWithSoftplusScale

 def testLaplaceWithSoftplusScale(self):
   loc_v = constant_op.constant([0.0, 1.0], name="loc")
   scale_v = constant_op.constant([-1.0, 2.0], name="scale")
   laplace = laplace_lib.LaplaceWithSoftplusScale(loc=loc_v, scale=scale_v)
   self.assertAllClose(
       self.evaluate(nn_ops.softplus(scale_v)), self.evaluate(laplace.scale))
   self.assertAllClose(self.evaluate(loc_v), self.evaluate(laplace.loc))
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:7,代码来源:laplace_test.py


示例13: _forward_log_det_jacobian

 def _forward_log_det_jacobian(self, x):
   if self._static_event_ndims == 0:
     return x - 2. * nn_ops.softplus(x)
   else:
     # This code is similar to nn_ops.log_softmax but different because we have
     # an implicit zero column to handle. I.e., instead of:
     #   reduce_sum(logits - reduce_sum(exp(logits), dim))
     # we must do:
     #   log_normalization = 1 + reduce_sum(exp(logits))
     #   -log_normalization + reduce_sum(logits - log_normalization)
     log_normalization = nn_ops.softplus(
         math_ops.reduce_logsumexp(x, axis=-1, keep_dims=True))
     fldj = (-log_normalization +
             math_ops.reduce_sum(x - log_normalization,
                                 axis=-1,
                                 keep_dims=True))
     return array_ops.squeeze(fldj, squeeze_dims=-1)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:17,代码来源:softmax_centered.py


示例14: _testSoftplus

 def _testSoftplus(self, np_features, use_gpu=False):
   np_softplus = self._npSoftplus(np_features)
   with self.test_session(use_gpu=use_gpu):
     softplus = nn_ops.softplus(np_features)
     tf_softplus = softplus.eval()
   self.assertAllCloseAccordingToType(np_softplus, tf_softplus)
   self.assertTrue(np.all(tf_softplus > 0))
   self.assertShapeEqual(np_softplus, softplus)
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:8,代码来源:softplus_op_test.py


示例15: jensen_shannon

def jensen_shannon(logu, self_normalized=False, name=None):
  """The Jensen-Shannon Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  When `self_normalized = True`, the Jensen-Shannon Csiszar-function is:

  ```none
  f(u) = u log(u) - (1 + u) log(1 + u) + (u + 1) log(2)
  ```

  When `self_normalized = False` the `(u + 1) log(2)` term is omitted.

  Observe that as an f-Divergence, this Csiszar-function implies:

  ```none
  D_f[p, q] = KL[p, m] + KL[q, m]
  m(x) = 0.5 p(x) + 0.5 q(x)
  ```

  In a sense, this divergence is the "reverse" of the Arithmetic-Geometric
  f-Divergence.

  This Csiszar-function induces a symmetric f-Divergence, i.e.,
  `D_f[p, q] = D_f[q, p]`.

  Warning: this function makes non-log-space calculations and may therefore be
  numerically unstable for `|logu| >> 0`.

  For more information, see:
    Lin, J. "Divergence measures based on the Shannon entropy." IEEE Trans.
    Inf. Th., 37, 145-151, 1991.

  Args:
    logu: Floating-type `Tensor` representing `log(u)` from above.
    self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
      `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
      when `p, q` are unnormalized measures.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    jensen_shannon_of_u: Floating-type `Tensor` of the Csiszar-function
      evaluated at `u = exp(logu)`.
  """

  with ops.name_scope(name, "jensen_shannon", [logu]):
    logu = ops.convert_to_tensor(logu, name="logu")
    npdt = logu.dtype.as_numpy_dtype
    y = nn_ops.softplus(logu)
    if self_normalized:
      y -= np.log(2).astype(npdt)
    return math_ops.exp(logu) * logu - (1. + math_ops.exp(logu)) * y
开发者ID:Joetz,项目名称:tensorflow,代码行数:56,代码来源:csiszar_divergence_impl.py


示例16: testMultivariateNormalDiagWithSoftplusStDev

  def testMultivariateNormalDiagWithSoftplusStDev(self):
    mu = [-1.0, 1.0]
    diag = [-1.0, -2.0]
    with self.test_session():
      dist = distributions.MultivariateNormalDiagWithSoftplusStDev(mu, diag)
      samps = dist.sample(1000, seed=0).eval()
      cov_mat = array_ops.matrix_diag(nn_ops.softplus(diag)).eval()**2

      self.assertAllClose(mu, samps.mean(axis=0), atol=0.1)
      self.assertAllClose(cov_mat, np.cov(samps.T), atol=0.1)
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:10,代码来源:mvn_test.py


示例17: testStudentTWithAbsDfSoftplusSigma

 def testStudentTWithAbsDfSoftplusSigma(self):
   with self.test_session():
     df = constant_op.constant([-3.2, -4.6])
     mu = constant_op.constant([-4.2, 3.4])
     sigma = constant_op.constant([-6.4, -8.8])
     student = ds.StudentTWithAbsDfSoftplusSigma(df=df, mu=mu, sigma=sigma)
     self.assertAllClose(
         math_ops.floor(math_ops.abs(df)).eval(), student.df.eval())
     self.assertAllClose(mu.eval(), student.mu.eval())
     self.assertAllClose(nn_ops.softplus(sigma).eval(), student.sigma.eval())
开发者ID:willdzeng,项目名称:tensorflow,代码行数:10,代码来源:student_t_test.py


示例18: testStudentTWithAbsDfSoftplusScale

 def testStudentTWithAbsDfSoftplusScale(self):
   df = constant_op.constant([-3.2, -4.6])
   mu = constant_op.constant([-4.2, 3.4])
   sigma = constant_op.constant([-6.4, -8.8])
   student = student_t.StudentTWithAbsDfSoftplusScale(
       df=df, loc=mu, scale=sigma)
   self.assertAllClose(
       math_ops.floor(self.evaluate(math_ops.abs(df))),
       self.evaluate(student.df))
   self.assertAllClose(self.evaluate(mu), self.evaluate(student.loc))
   self.assertAllClose(
       self.evaluate(nn_ops.softplus(sigma)), self.evaluate(student.scale))
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:12,代码来源:student_t_test.py


示例19: arithmetic_geometric

def arithmetic_geometric(logu, self_normalized=False, name=None):
  """The Arithmetic-Geometric Csiszar-function in log-space.

  A Csiszar-function is a member of,

  ```none
  F = { f:R_+ to R : f convex }.
  ```

  When `self_normalized = True` the Arithmetic-Geometric Csiszar-function is:

  ```none
  f(u) = (1 + u) log( (1 + u) / sqrt(u) ) - (1 + u) log(2)
  ```

  When `self_normalized = False` the `(1 + u) log(2)` term is omitted.

  Observe that as an f-Divergence, this Csiszar-function implies:

  ```none
  D_f[p, q] = KL[m, p] + KL[m, q]
  m(x) = 0.5 p(x) + 0.5 q(x)
  ```

  In a sense, this divergence is the "reverse" of the Jensen-Shannon
  f-Divergence.

  This Csiszar-function induces a symmetric f-Divergence, i.e.,
  `D_f[p, q] = D_f[q, p]`.

  Warning: this function makes non-log-space calculations and may therefore be
  numerically unstable for `|logu| >> 0`.

  Args:
    logu: Floating-type `Tensor` representing `log(u)` from above.
    self_normalized: Python `bool` indicating whether `f'(u=1)=0`. When
      `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
      when `p, q` are unnormalized measures.
    name: Python `str` name prefixed to Ops created by this function.

  Returns:
    arithmetic_geometric_of_u: Floating-type `Tensor` of the
      Csiszar-function evaluated at `u = exp(logu)`.
  """

  with ops.name_scope(name, "arithmetic_geometric", [logu]):
    logu = ops.convert_to_tensor(logu, name="logu")
    y = nn_ops.softplus(logu) - 0.5 * logu
    if self_normalized:
      y -= np.log(2.).astype(logu.dtype.as_numpy_dtype)
    return (1. + math_ops.exp(logu)) * y
开发者ID:Joetz,项目名称:tensorflow,代码行数:51,代码来源:csiszar_divergence_impl.py


示例20: testGradient

 def testGradient(self):
   with self.test_session():
     x = constant_op.constant(
         [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],
         shape=[2, 5],
         name="x")
     y = nn_ops.softplus(x, name="softplus")
     x_init = np.asarray(
         [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],
         dtype=np.float32,
         order="F")
     err = gradient_checker.compute_gradient_error(
         x, [2, 5], y, [2, 5], x_init_value=x_init)
   logging.vlog(2, "softplus (float) gradient err = ", err)
   self.assertLess(err, 1e-4)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:15,代码来源:distribution_util_test.py



注:本文中的tensorflow.python.ops.nn_ops.softplus函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap