• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.check_numerics函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.check_numerics函数的典型用法代码示例。如果您正苦于以下问题:Python check_numerics函数的具体用法?Python check_numerics怎么用?Python check_numerics使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了check_numerics函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testGradient

  def testGradient(self):
    s = [2, 3, 4, 2]
    # NOTE(kearnes): divide by 20 so product is a reasonable size
    x = np.arange(1.0, 49.0).reshape(s).astype(np.float32) / 20.
    with self.test_session():
      t = tf.convert_to_tensor(x)

      su = tf.reduce_prod(t, [])
      jacob_t, jacob_n = gradient_checker.ComputeGradient(
          t, s, su, [2, 3, 4, 2], x_init_value=x, delta=1)
      self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)

      su = tf.reduce_prod(t, [1, 2])
      jacob_t, jacob_n = gradient_checker.ComputeGradient(
          t, s, su, [2, 2], x_init_value=x, delta=1)
      self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)

      su = tf.reduce_prod(t, [0, 1, 2, 3])
      jacob_t, jacob_n = gradient_checker.ComputeGradient(
          t, s, su, [1], x_init_value=x, delta=1)
      self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3)

    # NOTE(kearnes): the current gradient calculation gives NaNs for 0 inputs
    x = np.arange(0.0, 48.0).reshape(s).astype(np.float32) / 20.
    with self.test_session():
      t = tf.convert_to_tensor(x)
      su = tf.reduce_prod(t, [])
      jacob_t, _ = gradient_checker.ComputeGradient(
          t, s, su, [2, 3, 4, 2], x_init_value=x, delta=1)
      with self.assertRaisesOpError("Tensor had NaN values"):
        tf.check_numerics(jacob_t, message="_ProdGrad NaN test").op.run()
开发者ID:debaratidas1994,项目名称:tensorflow,代码行数:31,代码来源:reduction_ops_test.py


示例2: simulate

  def simulate(self, action):
    """Step the environment.

    The result of the step can be accessed from the variables defined below.

    Args:
      action: Tensor holding the action to apply.

    Returns:
      Operation.
    """
    with tf.name_scope('environment/simulate'):
      if action.dtype in (tf.float16, tf.float32, tf.float64):
        action = tf.check_numerics(action, 'action')
      observ_dtype = self._parse_dtype(self._env.observation_space)
      observ, reward, done = tf.py_func(
          lambda a: self._env.step(a)[:3], [action],
          [observ_dtype, tf.float32, tf.bool], name='step')
      observ = tf.check_numerics(observ, 'observ')
      reward = tf.check_numerics(reward, 'reward')
      return tf.group(
          self._observ.assign(observ),
          self._action.assign(action),
          self._reward.assign(reward),
          self._done.assign(done),
          self._step.assign_add(1))
开发者ID:AndrewMeadows,项目名称:bullet3,代码行数:26,代码来源:in_graph_env.py


示例3: _network

  def _network(self, observ, length=None, state=None, reuse=True):
    """Compute the network output for a batched sequence of observations.

    Optionally, the initial state can be specified. The weights should be
    reused for all calls, except for the first one. Output is a named tuple
    containing the policy as a TensorFlow distribution, the policy mean and log
    standard deviation, the approximated state value, and the new recurrent
    state.

    Args:
      observ: Sequences of observations.
      length: Batch of sequence lengths.
      state: Batch of initial recurrent states.
      reuse: Python boolean whether to reuse previous variables.

    Returns:
      NetworkOutput tuple.
    """
    with tf.variable_scope('network', reuse=reuse):
      observ = tf.convert_to_tensor(observ)
      use_gpu = self._config.use_gpu and utility.available_gpus()
      with tf.device('/gpu:0' if use_gpu else '/cpu:0'):
        observ = tf.check_numerics(observ, 'observ')
        cell = self._config.network(self._batch_env.action.shape[1].value)
        (mean, logstd, value), state = tf.nn.dynamic_rnn(cell,
                                                         observ,
                                                         length,
                                                         state,
                                                         tf.float32,
                                                         swap_memory=True)
      mean = tf.check_numerics(mean, 'mean')
      logstd = tf.check_numerics(logstd, 'logstd')
      value = tf.check_numerics(value, 'value')
      policy = tf.contrib.distributions.MultivariateNormalDiag(mean, tf.exp(logstd))
      return _NetworkOutput(policy, mean, logstd, value, state)
开发者ID:bulletphysics,项目名称:bullet3,代码行数:35,代码来源:algorithm.py


示例4: batch_norm

def batch_norm(input_,
               dim,
               name,
               scale=True,
               train=True,
               epsilon=1e-8,
               decay=.1,
               axes=[0],
               bn_lag=DEFAULT_BN_LAG):
    """Batch normalization."""
    # create variables
    with tf.variable_scope(name):
        var = variable_on_cpu(
            "var", [dim], tf.constant_initializer(1.), trainable=False)
        mean = variable_on_cpu(
            "mean", [dim], tf.constant_initializer(0.), trainable=False)
        step = variable_on_cpu("step", [], tf.constant_initializer(0.), trainable=False)
        if scale:
            gamma = variable_on_cpu("gamma", [dim], tf.constant_initializer(1.))
        beta = variable_on_cpu("beta", [dim], tf.constant_initializer(0.))
    # choose the appropriate moments
    if train:
        used_mean, used_var = tf.nn.moments(input_, axes, name="batch_norm")
        cur_mean, cur_var = used_mean, used_var
        if bn_lag > 0.:
            used_mean -= (1. - bn_lag) * (used_mean - tf.stop_gradient(mean))
            used_var -= (1 - bn_lag) * (used_var - tf.stop_gradient(var))
            used_mean /= (1. - bn_lag**(step + 1))
            used_var /= (1. - bn_lag**(step + 1))
    else:
        used_mean, used_var = mean, var
        cur_mean, cur_var = used_mean, used_var

    # normalize
    res = (input_ - used_mean) / tf.sqrt(used_var + epsilon)
    # de-normalize
    if scale:
        res *= gamma
    res += beta

    # update variables
    if train:
        with tf.name_scope(name, "AssignMovingAvg", [mean, cur_mean, decay]):
            with ops.colocate_with(mean):
                new_mean = tf.assign_sub(
                    mean,
                    tf.check_numerics(decay * (mean - cur_mean), "NaN in moving mean."))
        with tf.name_scope(name, "AssignMovingAvg", [var, cur_var, decay]):
            with ops.colocate_with(var):
                new_var = tf.assign_sub(
                    var,
                    tf.check_numerics(decay * (var - cur_var),
                                      "NaN in moving variance."))
        with tf.name_scope(name, "IncrementTime", [step]):
            with ops.colocate_with(step):
                new_step = tf.assign_add(step, 1.)
        res += 0. * new_mean * new_var * new_step

    return res
开发者ID:Peratham,项目名称:models,代码行数:59,代码来源:real_nvp_utils.py


示例5: recurrent_gaussian

def recurrent_gaussian(
    config, action_space, observations, length, state=None):
  """Independent recurrent policy and feed forward value networks.

  The policy network outputs the mean action and the standard deviation is
  learned as independent parameter vector. The last policy layer is recurrent
  and uses a GRU cell.

  Args:
    config: Configuration object.
    action_space: Action space of the environment.
    observations: Sequences of observations.
    length: Batch of sequence lengths.
    state: Batch of initial recurrent states.

  Raises:
    ValueError: Unexpected action space.

  Returns:
    Attribute dictionary containing the policy, value, and state.
  """
  if not isinstance(action_space, gym.spaces.Box):
    raise ValueError('Network expects continuous actions.')
  if not len(action_space.shape) == 1:
    raise ValueError('Network only supports 1D action vectors.')
  action_size = action_space.shape[0]
  init_output_weights = tf.contrib.layers.variance_scaling_initializer(
      factor=config.init_output_factor)
  before_softplus_std_initializer = tf.constant_initializer(
      np.log(np.exp(config.init_std) - 1))
  cell = tf.contrib.rnn.GRUBlockCell(config.policy_layers[-1])
  flat_observations = tf.reshape(observations, [
      tf.shape(observations)[0], tf.shape(observations)[1],
      functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)])
  with tf.variable_scope('policy'):
    x = flat_observations
    for size in config.policy_layers[:-1]:
      x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
    x, state = tf.nn.dynamic_rnn(cell, x, length, state, tf.float32)
    mean = tf.contrib.layers.fully_connected(
        x, action_size, tf.tanh,
        weights_initializer=init_output_weights)
    std = tf.nn.softplus(tf.get_variable(
        'before_softplus_std', mean.shape[2:], tf.float32,
        before_softplus_std_initializer))
    std = tf.tile(
        std[None, None],
        [tf.shape(mean)[0], tf.shape(mean)[1]] + [1] * (mean.shape.ndims - 2))
  with tf.variable_scope('value'):
    x = flat_observations
    for size in config.value_layers:
      x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
    value = tf.contrib.layers.fully_connected(x, 1, None)[..., 0]
  mean = tf.check_numerics(mean, 'mean')
  std = tf.check_numerics(std, 'std')
  value = tf.check_numerics(value, 'value')
  policy = CustomKLDiagNormal(mean, std)
  return agents.tools.AttrDict(policy=policy, value=value, state=state)
开发者ID:shamanez,项目名称:agents,代码行数:58,代码来源:networks.py


示例6: batch_norm_log_diff

def batch_norm_log_diff(input_,
                        dim,
                        name,
                        train=True,
                        epsilon=1e-8,
                        decay=.1,
                        axes=[0],
                        reuse=None,
                        bn_lag=DEFAULT_BN_LAG):
    """Batch normalization with corresponding log determinant Jacobian."""
    if reuse is None:
        reuse = not train
    # create variables
    with tf.variable_scope(name) as scope:
        if reuse:
            scope.reuse_variables()
        var = variable_on_cpu(
            "var", [dim], tf.constant_initializer(1.), trainable=False)
        mean = variable_on_cpu(
            "mean", [dim], tf.constant_initializer(0.), trainable=False)
        step = variable_on_cpu("step", [], tf.constant_initializer(0.), trainable=False)
    # choose the appropriate moments
    if train:
        used_mean, used_var = tf.nn.moments(input_, axes, name="batch_norm")
        cur_mean, cur_var = used_mean, used_var
        if bn_lag > 0.:
            used_var = stable_var(input_=input_, mean=used_mean, axes=axes)
            cur_var = used_var
            used_mean -= (1 - bn_lag) * (used_mean - tf.stop_gradient(mean))
            used_mean /= (1. - bn_lag**(step + 1))
            used_var -= (1 - bn_lag) * (used_var - tf.stop_gradient(var))
            used_var /= (1. - bn_lag**(step + 1))
    else:
        used_mean, used_var = mean, var
        cur_mean, cur_var = used_mean, used_var

    # update variables
    if train:
        with tf.name_scope(name, "AssignMovingAvg", [mean, cur_mean, decay]):
            with ops.colocate_with(mean):
                new_mean = tf.assign_sub(
                    mean,
                    tf.check_numerics(
                        decay * (mean - cur_mean), "NaN in moving mean."))
        with tf.name_scope(name, "AssignMovingAvg", [var, cur_var, decay]):
            with ops.colocate_with(var):
                new_var = tf.assign_sub(
                    var,
                    tf.check_numerics(decay * (var - cur_var),
                                      "NaN in moving variance."))
        with tf.name_scope(name, "IncrementTime", [step]):
            with ops.colocate_with(step):
                new_step = tf.assign_add(step, 1.)
        used_var += 0. * new_mean * new_var * new_step
    used_var += epsilon

    return used_mean, used_var
开发者ID:Peratham,项目名称:models,代码行数:57,代码来源:real_nvp_utils.py


示例7: antenna_jones

    def antenna_jones(lm, stokes, alpha, ref_freq):
        """
        Compute the jones terms for each antenna.

        lm, stokes and alpha are the source variables.
        """

        # Compute the complex phase
        cplx_phase = rime.phase(lm, D.uvw, D.frequency, CT=CT)

        # Check for nans/infs in the complex phase
        phase_msg = ("Check that '1 - l**2  - m**2 >= 0' holds "
                    "for all your lm coordinates. This is required "
                    "for 'n = sqrt(1 - l**2 - m**2) - 1' "
                    "to be finite.")

        phase_real = tf.check_numerics(tf.real(cplx_phase), phase_msg)
        phase_imag = tf.check_numerics(tf.imag(cplx_phase), phase_msg)

        # Compute the square root of the brightness matrix
        # (as well as the sign)
        bsqrt, sgn_brightness = rime.b_sqrt(stokes, alpha,
            D.frequency, ref_freq, CT=CT,
            polarisation_type=polarisation_type)

        # Check for nans/infs in the bsqrt
        bsqrt_msg = ("Check that your stokes parameters "
                    "satisfy I**2 >= Q**2 + U**2 + V**2. "
                    "Montblanc performs a cholesky decomposition "
                    "of the brightness matrix and the above must "
                    "hold for this to produce valid values.")

        bsqrt_real = tf.check_numerics(tf.real(bsqrt), bsqrt_msg)
        bsqrt_imag = tf.check_numerics(tf.imag(bsqrt), bsqrt_msg)

        # Compute the direction dependent effects from the beam
        ejones = rime.e_beam(lm, D.frequency,
            D.pointing_errors, D.antenna_scaling,
            beam_sin, beam_cos,
            D.beam_extents, D.beam_freq_map, D.ebeam)

        deps = [phase_real, phase_imag, bsqrt_real, bsqrt_imag]
        deps = [] # Do nothing for now

        # Combine the brightness square root, complex phase,
        # feed rotation and beam dde's
        with tf.control_dependencies(deps):
            antenna_jones = rime.create_antenna_jones(bsqrt, cplx_phase,
                                                    feed_rotation, ejones, FT=FT)
            return antenna_jones, sgn_brightness
开发者ID:ska-sa,项目名称:montblanc,代码行数:50,代码来源:RimeSolver.py


示例8: recurrent_gaussian

def recurrent_gaussian(
    config, action_size, observations, length, state=None):
  """Independent recurrent policy and feed forward value networks.

  The policy network outputs the mean action and the log standard deviation
  is learned as independent parameter vector. The last policy layer is
  recurrent and uses a GRU cell.

  Args:
    config: Configuration object.
    action_size: Length of the action vector.
    observations: Sequences of observations.
    length: Batch of sequence lengths.
    state: Batch of initial recurrent states.

  Returns:
    NetworkOutput tuple.
  """
  mean_weights_initializer = tf.contrib.layers.variance_scaling_initializer(
      factor=config.init_mean_factor)
  logstd_initializer = tf.random_normal_initializer(config.init_logstd, 1e-10)
  cell = tf.contrib.rnn.GRUBlockCell(config.policy_layers[-1])
  flat_observations = tf.reshape(observations, [
      tf.shape(observations)[0], tf.shape(observations)[1],
      functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)])
  with tf.variable_scope('policy'):
    x = flat_observations
    for size in config.policy_layers[:-1]:
      x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
    x, state = tf.nn.dynamic_rnn(cell, x, length, state, tf.float32)
    mean = tf.contrib.layers.fully_connected(
        x, action_size, tf.tanh,
        weights_initializer=mean_weights_initializer)
    logstd = tf.get_variable(
        'logstd', mean.shape[2:], tf.float32, logstd_initializer)
    logstd = tf.tile(
        logstd[None, None],
        [tf.shape(mean)[0], tf.shape(mean)[1]] + [1] * (mean.shape.ndims - 2))
  with tf.variable_scope('value'):
    x = flat_observations
    for size in config.value_layers:
      x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
    value = tf.contrib.layers.fully_connected(x, 1, None)[..., 0]
  mean = tf.check_numerics(mean, 'mean')
  logstd = tf.check_numerics(logstd, 'logstd')
  value = tf.check_numerics(value, 'value')
  policy = tf.contrib.distributions.MultivariateNormalDiag(
      mean, tf.exp(logstd))
  # assert state.shape.as_list()[0] is not None
  return NetworkOutput(policy, mean, logstd, value, state)
开发者ID:AndrewMeadows,项目名称:bullet3,代码行数:50,代码来源:networks.py


示例9: testPassThrough

 def testPassThrough(self):
     with self.test_session(graph=tf.Graph()):
         t1 = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3])
         checked = tf.check_numerics(t1, message="pass through test")
         value = checked.eval()
         self.assertAllEqual(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), value)
         self.assertEqual([2, 3], checked.get_shape())
开发者ID:tongwang01,项目名称:tensorflow,代码行数:7,代码来源:numerics_test.py


示例10: perform

  def perform(self, observ):
    """Compute batch of actions and a summary for a batch of observation.

    Args:
      observ: Tensor of a batch of observations for all agents.

    Returns:
      Tuple of action batch tensor and summary tensor.
    """
    with tf.name_scope('perform/'):
      observ = self._observ_filter.transform(observ)
      network = self._network(observ[:, None], tf.ones(observ.shape[0]), self._last_state)
      action = tf.cond(self._is_training, network.policy.sample, lambda: network.mean)
      logprob = network.policy.log_prob(action)[:, 0]
      # pylint: disable=g-long-lambda
      summary = tf.cond(
          self._should_log, lambda: tf.summary.merge([
              tf.summary.histogram('mean', network.mean[:, 0]),
              tf.summary.histogram('std', tf.exp(network.logstd[:, 0])),
              tf.summary.histogram('action', action[:, 0]),
              tf.summary.histogram('logprob', logprob)
          ]), str)
      # Remember current policy to append to memory in the experience callback.
      with tf.control_dependencies([
          utility.assign_nested_vars(self._last_state, network.state),
          self._last_action.assign(action[:, 0]),
          self._last_mean.assign(network.mean[:, 0]),
          self._last_logstd.assign(network.logstd[:, 0])
      ]):
        return tf.check_numerics(action[:, 0], 'action'), tf.identity(summary)
开发者ID:bulletphysics,项目名称:bullet3,代码行数:30,代码来源:algorithm.py


示例11: add_check_numerics_ops

def add_check_numerics_ops():
    """Connect a `check_numerics` to every floating point tensor.
    `check_numerics` operations themselves are added for each `half`, `float`,
    or `double` tensor in the graph. For all ops in the graph, the
    `check_numerics` op for all of its (`half`, `float`, or `double`) inputs
    is guaranteed to run before the `check_numerics` op on any of its outputs.
    Returns:
      A `group` op depending on all `check_numerics` ops added.

    Based on `tf.add_check_numerics_ops`; modified to work around problem with
    variables in different "frames" (triggered by attempt to merge nodes
    from inside and outside the while loop of an RNN).
    """
    check_op = []
    # This code relies on the ordering of ops in get_operations().
    # The producer of a tensor always comes before that tensor's consumer in
    # this list. This is true because get_operations() returns ops in the order
    # added, and an op can only be added after its inputs are added.
    for op in tf.get_default_graph().get_operations():
        if op.name and any(re.search(pattern, op.name) for pattern in NO_MONITOR):
            continue
        for output in op.outputs:
            if output.dtype in [tf.float16, tf.float32, tf.float64] and \
                    output.op._get_control_flow_context() == \
                    tf.get_default_graph()._get_control_flow_context():
                message = op.name + ":" + str(output.value_index)
                with tf.control_dependencies(check_op):
                    check_op = [tf.check_numerics(output, message=message)]
    return tf.group(*check_op)
开发者ID:futurulus,项目名称:rl-cards,代码行数:29,代码来源:tfutils.py


示例12: _BuildLoss

  def _BuildLoss(self):
    # 1. reconstr_loss seems doesn't do better than l2 loss.
    # 2. Only works when using reduce_mean. reduce_sum doesn't work.
    # 3. It seems kl loss doesn't play an important role.
    self.loss = 0
    with tf.variable_scope('loss'):
      if self.params['l2_loss']:
        l2_loss = tf.reduce_mean(tf.square(self.diff_output - self.diffs[1]))
        tf.summary.scalar('l2_loss', l2_loss)
        self.loss += l2_loss
      if self.params['reconstr_loss']:
        reconstr_loss = (-tf.reduce_mean(
            self.diffs[1] * (1e-10 + self.diff_output) +
            (1-self.diffs[1]) * tf.log(1e-10 + 1 - self.diff_output)))
        reconstr_loss = tf.check_numerics(reconstr_loss, 'reconstr_loss')
        tf.summary.scalar('reconstr_loss', reconstr_loss)
        self.loss += reconstr_loss
      if self.params['kl_loss']:
        kl_loss = (0.5 * tf.reduce_mean(
            tf.square(self.z_mean) + tf.square(self.z_stddev) -
            2 * self.z_stddev_log - 1))
        tf.summary.scalar('kl_loss', kl_loss)
        self.loss += kl_loss

      tf.summary.scalar('loss', self.loss)
开发者ID:ALISCIFP,项目名称:models,代码行数:25,代码来源:model.py


示例13: transform

  def transform(self, value):
    """Normalize a single or batch tensor.

    Applies the activated transformations in the constructor using current
    estimates of mean and variance.

    Args:
      value: Batch or single value tensor.

    Returns:
      Normalized batch or single value tensor.
    """
    with tf.name_scope(self._name + '/transform'):
      no_batch_dim = value.shape.ndims == self._mean.shape.ndims
      if no_batch_dim:
        # Add a batch dimension if necessary.
        value = value[None, ...]
      if self._center:
        value -= self._mean[None, ...]
      if self._scale:
        # We cannot scale before seeing at least two samples.
        value /= tf.cond(
            self._count > 1, lambda: self._std() + 1e-8, lambda: tf.ones_like(self._var_sum))[None]
      if self._clip:
        value = tf.clip_by_value(value, -self._clip, self._clip)
      # Remove batch dimension if necessary.
      if no_batch_dim:
        value = value[0]
      return tf.check_numerics(value, 'value')
开发者ID:bulletphysics,项目名称:bullet3,代码行数:29,代码来源:normalize.py


示例14: _value_loss

  def _value_loss(self, observ, reward, length):
    """Compute the loss function for the value baseline.

    The value loss is the difference between empirical and approximated returns
    over the collected episodes. Returns the loss tensor and a summary strin.

    Args:
      observ: Sequences of observations.
      reward: Sequences of reward.
      length: Batch of sequence lengths.

    Returns:
      Tuple of loss tensor and summary tensor.
    """
    with tf.name_scope('value_loss'):
      value = self._network(observ, length).value
      return_ = utility.discounted_return(reward, length, self._config.discount)
      advantage = return_ - value
      value_loss = 0.5 * self._mask(advantage**2, length)
      summary = tf.summary.merge([
          tf.summary.histogram('value_loss', value_loss),
          tf.summary.scalar('avg_value_loss', tf.reduce_mean(value_loss))
      ])
      value_loss = tf.reduce_mean(value_loss)
      return tf.check_numerics(value_loss, 'value_loss'), summary
开发者ID:bulletphysics,项目名称:bullet3,代码行数:25,代码来源:algorithm.py


示例15: logdet_grad

def logdet_grad(op, grad):
    a = op.inputs[0]
    a_adj_inv = tf.check_numerics(
                    tf.matrix_inverse(a, adjoint=True), 
                    'zero determinant')
    out_shape = tf.concat([tf.shape(a)[:-2], [1, 1]], axis=0)
    return tf.reshape(grad, out_shape) * a_adj_inv
开发者ID:EverettYou,项目名称:EFL,代码行数:7,代码来源:EFL.py


示例16: fit

    def fit(self, X_train, y_train, X_min, X_max, ridge):  # pylint: disable=arguments-differ
        super(GPRGD, self).fit(X_train, y_train, ridge)
        self.X_min = X_min
        self.X_max = X_max

        with tf.Session(graph=self.graph,
                        config=tf.ConfigProto(
                            intra_op_parallelism_threads=self.num_threads_)) as sess:
            xt_ = tf.Variable(self.X_train[0], tf.float32)
            xt_ph = tf.placeholder(tf.float32)
            xt_assign_op = xt_.assign(xt_ph)
            init = tf.global_variables_initializer()
            sess.run(init)
            K2_mat = tf.transpose(tf.expand_dims(tf.sqrt(tf.reduce_sum(tf.pow(
                tf.subtract(xt_, self.X_train), 2), 1)), 0))
            if self.check_numerics is True:
                K2_mat = tf.check_numerics(K2_mat, "K2_mat: ")
            K2__ = tf.cast(self.magnitude * tf.exp(-K2_mat / self.length_scale), tf.float32)
            if self.check_numerics is True:
                K2__ = tf.check_numerics(K2__, "K2__: ")
            yhat_gd = tf.cast(tf.matmul(tf.transpose(K2__), self.xy_), tf.float32)
            if self.check_numerics is True:
                yhat_gd = tf.check_numerics(yhat_gd, message="yhat: ")
            sig_val = tf.cast((tf.sqrt(self.magnitude - tf.matmul(
                tf.transpose(K2__), tf.matmul(self.K_inv, K2__)))), tf.float32)
            if self.check_numerics is True:
                sig_val = tf.check_numerics(sig_val, message="sigma: ")
            LOG.debug("\nyhat_gd : %s", str(sess.run(yhat_gd)))
            LOG.debug("\nsig_val : %s", str(sess.run(sig_val)))

            loss = tf.squeeze(tf.subtract(self.mu_multiplier * yhat_gd,
                                          self.sigma_multiplier * sig_val))
            if self.check_numerics is True:
                loss = tf.check_numerics(loss, "loss: ")
            optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate,
                                               epsilon=self.epsilon)
            # optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)
            train = optimizer.minimize(loss)

            self.vars['xt_'] = xt_
            self.vars['xt_ph'] = xt_ph
            self.ops['xt_assign_op'] = xt_assign_op
            self.ops['yhat_gd'] = yhat_gd
            self.ops['sig_val2'] = sig_val
            self.ops['loss_op'] = loss
            self.ops['train_op'] = train
        return self
开发者ID:FullStackHan,项目名称:ottertune,代码行数:47,代码来源:gp_tf.py


示例17: AddTraining

  def AddTraining(self,
                  task_context,
                  batch_size,
                  learning_rate=0.1,
                  decay_steps=4000,
                  momentum=0.9,
                  corpus_name='documents'):
    """Builds a trainer to minimize the cross entropy cost function.

    Args:
      task_context: file path from which to read the task context
      batch_size: batch size to request from reader op
      learning_rate: initial value of the learning rate
      decay_steps: decay learning rate by 0.96 every this many steps
      momentum: momentum parameter used when training with momentum
      corpus_name: name of the task input to read parses from

    Returns:
      Dictionary of named training nodes.
    """
    with tf.name_scope('training'):
      nodes = self.training
      nodes.update(self._AddGoldReader(task_context, batch_size, corpus_name))
      nodes.update(self._BuildNetwork(nodes['feature_endpoints'],
                                      return_average=False))
      nodes.update(self._AddCostFunction(batch_size, nodes['gold_actions'],
                                         nodes['logits']))
      # Add the optimizer
      if self._only_train:
        trainable_params = [v
                            for k, v in self.params.iteritems()
                            if k in self._only_train]
      else:
        trainable_params = self.params.values()
      lr = self._AddLearningRate(learning_rate, decay_steps)
      optimizer = tf.train.MomentumOptimizer(lr,
                                             momentum,
                                             use_locking=self._use_locking)
      train_op = optimizer.minimize(nodes['cost'], var_list=trainable_params)
      for param in trainable_params:
        slot = optimizer.get_slot(param, 'momentum')
        self.inits[slot.name] = state_ops.init_variable(slot,
                                                        tf.zeros_initializer)
        self.variables[slot.name] = slot
      numerical_checks = [
          tf.check_numerics(param,
                            message='Parameter is not finite.')
          for param in trainable_params
          if param.dtype.base_dtype in [tf.float32, tf.float64]
      ]
      check_op = tf.group(*numerical_checks)
      avg_update_op = tf.group(*self._averaging.values())
      train_ops = [train_op]
      if self._check_parameters:
        train_ops.append(check_op)
      if self._use_averaging:
        train_ops.append(avg_update_op)
      nodes['train_op'] = tf.group(*train_ops, name='train_op')
    return nodes
开发者ID:TrendonixNetwork,项目名称:ProjectCybonix,代码行数:59,代码来源:graph_builder.py


示例18: _conv

 def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
     r = super()._conv(name, x, filter_size, in_filters, out_filters, strides)
     r = tf.check_numerics(r, "okay")
     p = tf.abs(r)/tf.reduce_sum(tf.abs(r), axis=(1,2,3), keep_dims=True)
     w,h,c = p.get_shape().as_list()[1:]
     N = w*h*c*2
     if self.fix_randomness:
         p_keep = 1-tf.exp(-N*p)
         rand = tf.constant(np.random.uniform(size=(p_keep.shape[0],w,h,c)),
                            dtype=tf.float32)
     else:
         p_keep = 1-tf.exp(-N*p)
         rand = tf.random_uniform(tf.shape(p_keep))
     keep = rand<p_keep
     r = tf.cast(keep, tf.float32)*r/(p_keep+1e-8)
     r = tf.check_numerics(r, "OH NO")
     return r
开发者ID:locussam,项目名称:obfuscated-gradients,代码行数:17,代码来源:sap_model.py


示例19: _policy_loss

  def _policy_loss(
      self, mean, logstd, old_mean, old_logstd, action, advantage, length):
    """Compute the policy loss composed of multiple components.

    1. The policy gradient loss is importance sampled from the data-collecting
       policy at the beginning of training.
    2. The second term is a KL penalty between the policy at the beginning of
       training and the current policy.
    3. Additionally, if this KL already changed more than twice the target
       amount, we activate a strong penalty discouraging further divergence.

    Args:
      mean: Sequences of action means of the current policy.
      logstd: Sequences of action log stddevs of the current policy.
      old_mean: Sequences of action means of the behavioral policy.
      old_logstd: Sequences of action log stddevs of the behavioral policy.
      action: Sequences of actions.
      advantage: Sequences of advantages.
      length: Batch of sequence lengths.

    Returns:
      Tuple of loss tensor and summary tensor.
    """
    with tf.name_scope('policy_loss'):
      entropy = utility.diag_normal_entropy(mean, logstd)
      kl = tf.reduce_mean(self._mask(utility.diag_normal_kl(
          old_mean, old_logstd, mean, logstd), length), 1)
      policy_gradient = tf.exp(
          utility.diag_normal_logpdf(mean, logstd, action) -
          utility.diag_normal_logpdf(old_mean, old_logstd, action))
      surrogate_loss = -tf.reduce_mean(self._mask(
          policy_gradient * tf.stop_gradient(advantage), length), 1)
      kl_penalty = self._penalty * kl
      cutoff_threshold = self._config.kl_target * self._config.kl_cutoff_factor
      cutoff_count = tf.reduce_sum(
          tf.cast(kl > cutoff_threshold, tf.int32))
      with tf.control_dependencies([tf.cond(
          cutoff_count > 0,
          lambda: tf.Print(0, [cutoff_count], 'kl cutoff! '), int)]):
        kl_cutoff = (
            self._config.kl_cutoff_coef *
            tf.cast(kl > cutoff_threshold, tf.float32) *
            (kl - cutoff_threshold) ** 2)
      policy_loss = surrogate_loss + kl_penalty + kl_cutoff
      summary = tf.summary.merge([
          tf.summary.histogram('entropy', entropy),
          tf.summary.histogram('kl', kl),
          tf.summary.histogram('surrogate_loss', surrogate_loss),
          tf.summary.histogram('kl_penalty', kl_penalty),
          tf.summary.histogram('kl_cutoff', kl_cutoff),
          tf.summary.histogram('kl_penalty_combined', kl_penalty + kl_cutoff),
          tf.summary.histogram('policy_loss', policy_loss),
          tf.summary.scalar('avg_surr_loss', tf.reduce_mean(surrogate_loss)),
          tf.summary.scalar('avg_kl_penalty', tf.reduce_mean(kl_penalty)),
          tf.summary.scalar('avg_policy_loss', tf.reduce_mean(policy_loss))])
      policy_loss = tf.reduce_mean(policy_loss, 0)
      return tf.check_numerics(policy_loss, 'policy_loss'), summary
开发者ID:AndrewMeadows,项目名称:bullet3,代码行数:57,代码来源:algorithm.py


示例20: discounted_return

def discounted_return(reward, length, discount):
  """Discounted Monte-Carlo returns."""
  timestep = tf.range(reward.shape[1].value)
  mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
  return_ = tf.reverse(tf.transpose(tf.scan(
      lambda agg, cur: cur + discount * agg,
      tf.transpose(tf.reverse(mask * reward, [1]), [1, 0]),
      tf.zeros_like(reward[:, -1]), 1, False), [1, 0]), [1])
  return tf.check_numerics(tf.stop_gradient(return_), 'return')
开发者ID:shamanez,项目名称:agents,代码行数:9,代码来源:utility.py



注:本文中的tensorflow.check_numerics函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.cholesky函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.ceil函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap