• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tf_logging.debug函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.platform.tf_logging.debug函数的典型用法代码示例。如果您正苦于以下问题:Python debug函数的具体用法?Python debug怎么用?Python debug使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了debug函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _connect_ops

  def _connect_ops(self, info):
    """Connect the previously copied ops."""
    for op in info.sgv.ops:
      logging.debug("Finalizing op: %s", op.name)
      op_ = info.transformed_ops[op]

      # pylint: disable=protected-access
      if op_.inputs:
        raise ValueError("The newly transformed op should not have "
                         "any inputs yet: {}".format(op_.name))
      inputs_ = [self._transformed_t(info, t) for t in op.inputs]
      for t in inputs_:
        op_._add_input(t)

      # Finalize original op.
      if op._original_op:
        original_op = info.transform_original_op_handler(info, op._original_op)
        if original_op is None:
          logging.debug("Could not find original op for: %s", op_.name)
        else:
          op_._original_op = original_op

      # Finalize control inputs:
      control_inputs_ = [self.transform_control_input_handler(info, ci)
                         for ci in op.control_inputs]
      control_inputs_ = [ci for ci in control_inputs_ if ci is not None]
      reroute.add_control_inputs(op_, control_inputs_)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:27,代码来源:transform.py


示例2: transform

  def transform(self, feature_column):
    """Returns a Tensor which represents given feature_column.

    Args:
      feature_column: An instance of FeatureColumn.

    Returns:
      A Tensor which represents given feature_column. It may create a new Tensor
      or re-use an existing one.

    Raises:
      ValueError: if FeatureColumn cannot be handled by this Transformer.
    """
    logging.debug('Transforming feature_column %s', feature_column)
    if feature_column in self._columns_to_tensors:
      # Feature_column is already transformed.
      return self._columns_to_tensors[feature_column]

    feature_column.insert_transformed_feature(self._columns_to_tensors)

    if feature_column not in self._columns_to_tensors:
      raise ValueError('Column {} is not supported.'.format(
          feature_column.name))

    return self._columns_to_tensors[feature_column]
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:25,代码来源:feature_column_ops.py


示例3: _transformed_t

  def _transformed_t(self, info, t, consumer_op):
    """Return tre transformed tensor of `t`."""
    if t in info.transformed_ts:
      # If op is in the subgraph, just return its transformed counterpart.
      return info.transformed_ts[t]

    if t in info.sgv_inputs_set:
      # `t` is an input of the subgraph.
      return self.transform_external_input_handler(info, t)
    elif t.op in info.ops:
      # `t` is an internal tensor but is not transformed yet because it
      # belongs to a graph cycle.
      logging.debug("Cyclic tensor: t.name = %s", t.name)
      # Try to find an existing tensor we can use for now,
      # otherwise create one. We'll rewire this later.
      if consumer_op.type == "Merge":
        first_input = consumer_op.inputs[0]
        tmp_t_ = self._transformed_t(info, first_input, consumer_op)
      elif t.op.type == "Enter":
        enter_input = t.op.inputs[0]
        tmp_t_ = self._transformed_t(info, enter_input, consumer_op)
      else:
        with info.graph_.as_default():
          tmp_t_ = util.make_placeholder_from_tensor(t, scope=info.scope_,
                                                     prefix="geph_tmp")
        logging.debug("Created temporary placeholder: %s.", tmp_t_.name)
      # Register as temporary and return.
      info.tmp_cyclic_ts.append((t, tmp_t_, consumer_op))
      return tmp_t_
    else:
      # `t` is a hidden input of the subgraph.
      return self.transform_external_hidden_input_handler(info, t)
开发者ID:bikong2,项目名称:tensorflow,代码行数:32,代码来源:transform.py


示例4: evaluate_and_export

    def evaluate_and_export(self):
      """Evaluate and (maybe) export the current model.

      Returns:
        Evaluation results. Returns `None` if current round of evaluation is
        skipped.

      Raises:
        RuntimeError: for any unexpected internal error.
        TypeError: if evaluation result has wrong type.
      """
      latest_ckpt_path = self._estimator.latest_checkpoint()
      if not latest_ckpt_path:
        self._log_err_msg('Estimator is not trained yet. Will start an '
                          'evaluation when a checkpoint is ready.')
        return None

      if latest_ckpt_path == self._previous_ckpt_path:
        self._log_err_msg(
            'No new checkpoint ready for evaluation. Skip the current '
            'evaluation pass as evaluation results are expected to be same '
            'for the same checkpoint.')
        return None
      eval_result = self._estimator.evaluate(
          input_fn=self._eval_spec.input_fn,
          steps=self._eval_spec.steps,
          name=self._eval_spec.name,
          checkpoint_path=latest_ckpt_path,
          hooks=self._eval_spec.hooks)

      if not eval_result:
        raise RuntimeError(
            'Internal error: `Estimator.evaluate` should never return empty '
            'result.')
      if not isinstance(eval_result, dict):
        raise TypeError(
            '`Estimator.evaluate` should return dict. Given {}.'.format(
                type(eval_result)))
      if ops.GraphKeys.GLOBAL_STEP not in eval_result:
        raise RuntimeError(
            'Internal error: `Estimator.evaluate` result should have '
            '`global_step` in result. Given {}'.format(eval_result))

      is_the_final_export = (eval_result[ops.GraphKeys.GLOBAL_STEP] >=
                             self._max_training_steps
                             if self._max_training_steps else False)
      self._export_eval_result(eval_result, latest_ckpt_path,
                               is_the_final_export)

      if is_the_final_export:
        logging.debug('Calling exporter with the `is_the_final_export=True`.')
        self._is_final_export_triggered = True

      self._last_warning_time = 0
      self._previous_ckpt_path = latest_ckpt_path
      return eval_result
开发者ID:ilya-edrenkin,项目名称:tensorflow,代码行数:56,代码来源:training.py


示例5: _SetPath

  def _SetPath(self, path):
    old_path = self._path
    if old_path and not gcs.IsGCSPath(old_path):
      # We're done with the path, so store its size.
      size = io_wrapper.Size(old_path)
      logging.debug('Setting latest size of %s to %d', old_path, size)
      self._finalized_sizes[old_path] = size

    self._path = path
    self._loader = self._loader_factory(path)
开发者ID:0ruben,项目名称:tensorflow,代码行数:10,代码来源:directory_watcher.py


示例6: __init__

 def __init__(self, file_path):
   if file_path is None:
     raise ValueError('A file path is required')
   file_path = resource_loader.readahead_file_path(file_path)
   logging.debug('Opening a record reader pointing at %s', file_path)
   self._reader = pywrap_tensorflow.PyRecordReader_New(
       compat.as_bytes(file_path), 0)
   # Store it for logging purposes.
   self._file_path = file_path
   if not self._reader:
     raise IOError('Failed to open a record reader pointing to %s' % file_path)
开发者ID:0ruben,项目名称:tensorflow,代码行数:11,代码来源:event_file_loader.py


示例7: _input_thread_fn_for_loading

  def _input_thread_fn_for_loading(self, session, enqueue_ops, iterations):
    count = 0
    while True:
      signal = self._signal_queue.get()
      if signal == _SIGNAL.STOP:
        logging.info('Stop Infeed input thread.')
        return

      for i in range(iterations):
        logging.debug('InfeedEnqueue data for iteration (%d, %d)', count, i)
        session.run(enqueue_ops)
      count += 1
开发者ID:awisbith,项目名称:tensorflow,代码行数:12,代码来源:tpu_estimator.py


示例8: run

 def run(self):
   # Don't fetch logs or adjust timing: just ping the watchdog.
   #
   # If we hit an exception, reset our session as it is likely broken.
   while self._running:
     try:
       self._worker_manager.ping(request=None)
       time.sleep(self.ping_interval)
     except errors.OpError as e:
       # Catch any TF errors that occur so we don't stop sending heartbeats
       logging.debug('Caught error while sending heartbeat: %s', e)
       self._reset_manager()
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:12,代码来源:session_support.py


示例9: Load

 def Load(self):
   # Create a temp file to hold the contents that we haven't seen yet.
   with tempfile.NamedTemporaryFile(prefix='tf-gcs-') as temp_file:
     name = temp_file.name
     logging.debug('Temp file created at %s', name)
     gcs.CopyContents(self._gcs_path, self._gcs_offset, temp_file)
     reader = pywrap_tensorflow.PyRecordReader_New(compat.as_bytes(name), 0)
     while reader.GetNext():
       event = event_pb2.Event()
       event.ParseFromString(reader.record())
       yield event
     logging.debug('No more events in %s', name)
     self._gcs_offset += reader.offset()
开发者ID:0-T-0,项目名称:tensorflow,代码行数:13,代码来源:gcs_file_loader.py


示例10: every_n_step_end

  def every_n_step_end(self, step, outputs):
    super(ValidationMonitor, self).every_n_step_end(step, outputs)
    # TODO(mdan): The use of step below is probably misleading.
    # The code should probably use the step from the checkpoint, because
    # that's what is being evaluated.
    if self._estimator is None:
      raise ValueError("Missing call to set_estimator.")
    # Check that we are not running evaluation on the same checkpoint.
    latest_path = saver_lib.latest_checkpoint(self._estimator.model_dir)
    if latest_path is None:
      logging.debug("Skipping evaluation since model has not been saved yet "
                    "at step %d.", step)
      return False
    if latest_path is not None and latest_path == self._latest_path:
      logging.debug("Skipping evaluation due to same checkpoint %s for step %d "
                    "as for step %d.", latest_path, step,
                    self._latest_path_step)
      return False
    self._latest_path = latest_path
    self._latest_path_step = step

    # Run evaluation and log it.
    validation_outputs = self._estimator.evaluate(
        x=self.x, y=self.y, input_fn=self.input_fn, batch_size=self.batch_size,
        steps=self.eval_steps, metrics=self.metrics, hooks=self.hooks,
        name=self.name)
    stats = []
    for name in validation_outputs:
      stats.append("%s = %s" % (name, str(validation_outputs[name])))
    logging.info("Validation (step %d): %s", step, ", ".join(stats))

    # Early stopping logic.
    if self.early_stopping_rounds is not None:
      if self.early_stopping_metric not in validation_outputs:
        raise ValueError("Metric %s missing from outputs %s." % (
            self.early_stopping_metric, set(validation_outputs.keys())))
      current_value = validation_outputs[self.early_stopping_metric]
      if (self._best_value is None or (self.early_stopping_metric_minimize and
                                       (current_value < self._best_value)) or
          (not self.early_stopping_metric_minimize and
           (current_value > self._best_value))):
        self._best_value = current_value
        self._best_value_step = step
      stop_now = (step - self._best_value_step >= self.early_stopping_rounds)
      if stop_now:
        logging.info("Stopping. Best step: {} with {} = {}."
                     .format(self._best_value_step,
                             self.early_stopping_metric, self._best_value))
        self._early_stopped = True
        return True
    return False
开发者ID:Immexxx,项目名称:tensorflow,代码行数:51,代码来源:monitors.py


示例11: Load

  def Load(self):
    """Loads all new values from disk.

    Calling Load multiple times in a row will not 'drop' events as long as the
    return value is not iterated over.

    Yields:
      All values that were written to disk that have not been yielded yet.
    """
    while self._reader.GetNext():
      event = event_pb2.Event()
      event.ParseFromString(self._reader.record())
      yield event
    logging.debug('No more events in %s', self._file_path)
开发者ID:0ruben,项目名称:tensorflow,代码行数:14,代码来源:event_file_loader.py


示例12: copy_op_handler

def copy_op_handler(info, op, copy_shape=True):
  """Copy a `tf.Operation`.

  Args:
    info: Transform._TmpInfo instance.
    op: the `tf.Operation` to be copied.
    copy_shape: also copy the shape of the tensor
  Returns:
    A `(op, op_outputs)` tuple containing the transformed op and its outputs.
  """
  # pylint: disable=protected-access

  # Clone the node def:
  node_def_ = deepcopy(op._node_def)

  # Transform name:
  name_ = info.new_name(op.name)
  name_ = info.graph_.unique_name(name_)
  node_def_.name = name_

  # Copy the other inputs needed for initialization
  output_types_ = op._output_types[:]
  input_types_ = op._input_types[:]

  # Make a copy of the op_def too.
  # Its unique to every _type_ of Operation.
  op_def_ = deepcopy(op._op_def)

  # Initialize a new Operation instance
  op_ = tf_ops.Operation(node_def_, info.graph_, [], output_types_,
                         [], input_types_, None, op_def_)

  # copy the shape over
  if copy_shape:
    for t, t_ in zip(op.outputs, op_.outputs):
      t_.set_shape(t.get_shape())

  # Finalize original op.
  if op._original_op:
    original_op = info.transform_original_op_handler(info, op._original_op)
    if original_op is None:
      logging.debug("Could not find original op of: %s", op_.name)
    else:
      op_._original_op = original_op

  # Add op to the graph
  info.graph_._add_op(op_)

  return op_, op_.outputs
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:49,代码来源:transform.py


示例13: evaluate_and_export

    def evaluate_and_export(self):
      """Evaluate and (maybe) export the current model.

      Returns:
        A tuple of `EvalResult` instance and the export results.

      Raises:
        RuntimeError: for any unexpected internal error.
        TypeError: if evaluation result has wrong type.
      """
      latest_ckpt_path = self._estimator.latest_checkpoint()
      if not latest_ckpt_path:
        self._log_err_msg('Estimator is not trained yet. Will start an '
                          'evaluation when a checkpoint is ready.')
        return _EvalResult(status=_EvalStatus.MISSING_CHECKPOINT), []

      if latest_ckpt_path == self._previous_ckpt_path:
        self._log_err_msg(
            'No new checkpoint ready for evaluation. Skip the current '
            'evaluation pass as evaluation results are expected to be same '
            'for the same checkpoint.')
        return _EvalResult(status=_EvalStatus.NO_NEW_CHECKPOINT), []

      metrics = self._estimator.evaluate(
          input_fn=self._eval_spec.input_fn,
          steps=self._eval_spec.steps,
          name=self._eval_spec.name,
          checkpoint_path=latest_ckpt_path,
          hooks=self._eval_spec.hooks)

      # _EvalResult validates the metrics.
      eval_result = _EvalResult(
          status=_EvalStatus.EVALUATED,
          metrics=metrics,
          checkpoint_path=latest_ckpt_path)

      is_the_final_export = (
          eval_result.metrics[ops.GraphKeys.GLOBAL_STEP] >=
          self._max_training_steps if self._max_training_steps else False)
      export_results = self._export_eval_result(eval_result,
                                                is_the_final_export)

      if is_the_final_export:
        logging.debug('Calling exporter with the `is_the_final_export=True`.')
        self._is_final_export_triggered = True

      self._last_warning_time = 0
      self._previous_ckpt_path = latest_ckpt_path
      return eval_result, export_results
开发者ID:AnishShah,项目名称:tensorflow,代码行数:49,代码来源:training.py


示例14: ping

  def ping(self, request=None, timeout_in_ms=5000):
    """Ping all workers, returning the parsed status results."""
    if request is None:
      request = event_pb2.WorkerHeartbeatRequest()

    options = config_pb2.RunOptions(timeout_in_ms=timeout_in_ms)
    results = self._session.run(
        self._ops,
        feed_dict={self._request_placeholder: request.SerializeToString()},
        options=options)
    parsed_results = [
        event_pb2.WorkerHeartbeatResponse.FromString(res_pb)
        for res_pb in results
    ]
    logging.debug('Ping results: %s', parsed_results)
    return parsed_results
开发者ID:AnishShah,项目名称:tensorflow,代码行数:16,代码来源:session_support.py


示例15: _copy_ops

  def _copy_ops(self, info):
    """Copy ops without connecting them."""
    for op in info.sgv.ops:
      logging.debug("Copying op: %s", op.name)
      # TODO(fkp): return a subgraph?
      op_, op_outputs_ = self.transform_op_handler(info, op)
      if op is op_:
        raise ValueError("In-place tranformation not allowed.")

      # Process op.
      info.transformed_ops[op] = op_
      self.assign_collections_handler(info, op, op_)

      # Process output tensors.
      for op_output, op_output_ in zip(op.outputs, op_outputs_):
        info.transformed_ts[op_output] = op_output_
        self.assign_collections_handler(info, op_output, op_output_)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:17,代码来源:transform.py


示例16: _Create

def _Create(baseclass, subclass_name, *args, **kwargs):
  """Creates an instance of a named subclass.

  Args:
    baseclass: The expected base class.
    subclass_name: The fully-qualified type name of the subclass to create.
    *args: Passed to the subclass constructor.
    **kwargs: Passed to the subclass constructor.

  Returns:
    An instance of the named subclass, or None on error.
  """
  subclass = _GetClass(subclass_name)
  if subclass is None:
    return None  # _GetClass() already logged an error
  if not issubclass(subclass, baseclass):
    logging.debug('Class "%s" is not a subclass of "%s"', subclass_name,
                  baseclass.__name__)
    return None
  return subclass(*args, **kwargs)
开发者ID:JiweiHe,项目名称:models,代码行数:20,代码来源:registry.py


示例17: _SetPath

  def _SetPath(self, path):
    """Sets the current path to watch for new events.

    This also records the size of the old path, if any. If the size can't be
    found, an error is logged.

    Args:
      path: The full path of the file to watch.
    """
    old_path = self._path
    if old_path and not gcs.IsGCSPath(old_path):
      try:
        # We're done with the path, so store its size.
        size = io_wrapper.Size(old_path)
        logging.debug('Setting latest size of %s to %d', old_path, size)
        self._finalized_sizes[old_path] = size
      except (IOError, OSError) as e:
        logging.error('Unable to get size of %s: %s', old_path, e)

    self._path = path
    self._loader = self._loader_factory(path)
开发者ID:10imaging,项目名称:tensorflow,代码行数:21,代码来源:directory_watcher.py


示例18: _subscribe

def _subscribe(tensor, side_effects, control_cache):
  """Helper method that subscribes a single tensor to a list of side_effects.

  This method will check if the given tensor has already been subscribed or if
  it's a tensor returned by a previous call to `subscribe()` and, if so, will
  reuse the existing identity op, appending the given side effects to the list
  of existing ones.

  Args:
    tensor: The `tf.Tensor` to be subscribed.
    side_effects: List of side_effect functions, see subscribe for details.
    control_cache: `_ControlOutputCache` helper to get control_outputs faster.

  Returns:
    The modified replacement to the passed in tensor which triggers the side
    effects or the given tensor, if it was already been subscribed.
  """
  # Check if the given tensor has a numpy compatible type (see dtypes.py).
  # If not, we cannot subscribe it, so we just return the original tensor.
  if not tensor.dtype.is_numpy_compatible:
    logging.debug(('Tensor {} has an un-supported {} type and cannot be '
                   'subscribed.').format(tensor.name, tensor.dtype))
    return tensor

  if _is_subscribed_identity(tensor):
    return _subscribe_extend(tensor, side_effects)

  # Check if the given tensor has already been subscribed by inspecting its
  # outputs.
  name_scope = tensor.op.name + '/subscription/Identity'
  consumers = tensor.consumers()
  matching_ops = [op for op in consumers if op.name.startswith(name_scope)]
  assert len(matching_ops) <= 1, ('Op {} must only have one subscription '
                                  'op connected to it').format(tensor.op.name)
  if len(matching_ops) == 1:
    candidate_tensor = matching_ops[0].outputs[0]
    if _is_subscribed_identity(candidate_tensor):
      return _subscribe_extend(candidate_tensor, side_effects)

  return _subscribe_new(tensor, side_effects, control_cache)
开发者ID:aritratony,项目名称:tensorflow,代码行数:40,代码来源:subscribe.py


示例19: _connect_control_inputs

  def _connect_control_inputs(self, info):
    """Connect the previously copied ops."""
    for op in info.sgv.ops:
      logging.debug("Connecting control inputs of op: %s", op.name)
      op_ = info.transformed_ops[op]

      # Finalize original op.
      # TODO(fkp): Stop worrying about _original_op and remove this code?
      # pylint: disable=protected-access
      if op._original_op:
        original_op = self.transform_original_op_handler(info, op._original_op)
        if original_op is None:
          logging.debug("Could not find original op for: %s", op_.name)
        else:
          op_._original_op = original_op
      # pylint: enable=protected-access

      # Finalize control inputs:
      control_inputs_ = [self.transform_control_input_handler(info, ci)
                         for ci in op.control_inputs]
      control_inputs_ = [ci for ci in control_inputs_ if ci is not None]
      reroute.add_control_inputs(op_, control_inputs_)
开发者ID:bikong2,项目名称:tensorflow,代码行数:22,代码来源:transform.py


示例20: Load

  def Load(self):
    """Loads all new values from disk.

    Calling Load multiple times in a row will not 'drop' events as long as the
    return value is not iterated over.

    Yields:
      All values that were written to disk that have not been yielded yet.
    """
    while True:
      try:
        with errors.raise_exception_on_not_ok_status() as status:
          self._reader.GetNext(status)
      except (errors.DataLossError, errors.OutOfRangeError):
        # We ignore partial read exceptions, because a record may be truncated.
        # PyRecordReader holds the offset prior to the failed read, so retrying
        # will succeed.
        break
      event = event_pb2.Event()
      event.ParseFromString(self._reader.record())
      yield event
    logging.debug('No more events in %s', self._file_path)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:22,代码来源:event_file_loader.py



注:本文中的tensorflow.python.platform.tf_logging.debug函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tf_logging.error函数代码示例发布时间:2022-05-27
下一篇:
Python test.test_src_dir_path函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap