• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python file_io.recursive_create_dir函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.lib.io.file_io.recursive_create_dir函数的典型用法代码示例。如果您正苦于以下问题:Python recursive_create_dir函数的具体用法?Python recursive_create_dir怎么用?Python recursive_create_dir使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了recursive_create_dir函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: local_analysis

def local_analysis(args):
  if args.analysis:
    # Already analyzed.
    return

  if not args.schema or not args.features:
    raise ValueError('Either --analysis, or both --schema and --features are provided.')

  tf_config = json.loads(os.environ.get('TF_CONFIG', '{}'))
  cluster_spec = tf_config.get('cluster', {})
  if len(cluster_spec.get('worker', [])) > 0:
    raise ValueError('If "schema" and "features" are provided, local analysis will run and ' +
                     'only BASIC scale-tier (no workers node) is supported.')

  if cluster_spec and not (args.schema.startswith('gs://') and args.features.startswith('gs://')):
    raise ValueError('Cloud trainer requires GCS paths for --schema and --features.')

  print('Running analysis.')
  schema = json.loads(file_io.read_file_to_string(args.schema).decode())
  features = json.loads(file_io.read_file_to_string(args.features).decode())
  args.analysis = os.path.join(args.job_dir, 'analysis')
  args.transform = True
  file_io.recursive_create_dir(args.analysis)
  feature_analysis.run_local_analysis(args.analysis, args.train, schema, features)
  print('Analysis done.')
开发者ID:googledatalab,项目名称:pydatalab,代码行数:25,代码来源:task.py


示例2: _write_object_graph

def _write_object_graph(saveable_view, export_dir, asset_file_def_index):
  """Save a SavedObjectGraph proto for `root`."""
  # SavedObjectGraph is similar to the CheckpointableObjectGraph proto in the
  # checkpoint. It will eventually go into the SavedModel.
  proto = saved_object_graph_pb2.SavedObjectGraph()
  saveable_view.fill_object_graph_proto(proto)

  coder = nested_structure_coder.StructureCoder()
  for concrete_function in saveable_view.concrete_functions:
    serialized = function_serialization.serialize_concrete_function(
        concrete_function, saveable_view.captured_tensor_node_ids, coder)
    if serialized is not None:
      proto.concrete_functions[concrete_function.name].CopyFrom(
          serialized)

  for obj, obj_proto in zip(saveable_view.nodes, proto.nodes):
    _write_object_proto(obj, obj_proto, asset_file_def_index)

  extra_asset_dir = os.path.join(
      compat.as_bytes(export_dir),
      compat.as_bytes(constants.EXTRA_ASSETS_DIRECTORY))
  file_io.recursive_create_dir(extra_asset_dir)
  object_graph_filename = os.path.join(
      extra_asset_dir, compat.as_bytes("object_graph.pb"))
  file_io.write_string_to_file(object_graph_filename, proto.SerializeToString())
开发者ID:terrytangyuan,项目名称:tensorflow,代码行数:25,代码来源:save.py


示例3: write_graph

def write_graph(graph_def, logdir, name, as_text=True):
  """Writes a graph proto to a file.

  The graph is written as a binary proto unless `as_text` is `True`.

  ```python
  v = tf.Variable(0, name='my_variable')
  sess = tf.Session()
  tf.train.write_graph(sess.graph_def, '/tmp/my-model', 'train.pbtxt')
  ```

  Args:
    graph_def: A `GraphDef` protocol buffer.
    logdir: Directory where to write the graph. This can refer to remote
      filesystems, such as Google Cloud Storage (GCS).
    name: Filename for the graph.
    as_text: If `True`, writes the graph as an ASCII proto.
  """
  # gcs does not have the concept of directory at the moment.
  if not file_io.file_exists(logdir) and not logdir.startswith("gs:"):
    file_io.recursive_create_dir(logdir)
  path = os.path.join(logdir, name)
  if as_text:
    file_io.write_string_to_file(path, str(graph_def))
  else:
    file_io.write_string_to_file(path, graph_def.SerializeToString())
开发者ID:2020zyc,项目名称:tensorflow,代码行数:26,代码来源:training_util.py


示例4: parse_arguments

def parse_arguments(argv):
  """Parse command line arguments.

  Args:
    argv: list of command line arguments, includeing programe name.

  Returns:
    An argparse Namespace object.
  """
  parser = argparse.ArgumentParser(
      description='Runs Preprocessing on structured CSV data.')
  parser.add_argument('--input-file-pattern',
                      type=str,
                      required=True,
                      help='Input CSV file names. May contain a file pattern')
  parser.add_argument('--output-dir',
                      type=str,
                      required=True,
                      help='Google Cloud Storage which to place outputs.')
  parser.add_argument('--schema-file',
                      type=str,
                      required=True,
                      help=('BigQuery json schema file'))

  args = parser.parse_args(args=argv[1:])

  # Make sure the output folder exists if local folder.
  file_io.recursive_create_dir(args.output_dir)

  return args
开发者ID:googledatalab,项目名称:pydatalab,代码行数:30,代码来源:local_preprocess.py


示例5: test_numerics

  def test_numerics(self):
    test_folder = os.path.join(self._bucket_root, 'test_numerics')
    input_file_path = os.path.join(test_folder, 'input.csv')
    output_folder = os.path.join(test_folder, 'test_output')
    file_io.recursive_create_dir(output_folder)

    file_io.write_string_to_file(
      input_file_path,
      '\n'.join(['%s,%s' % (i, 10 * i + 0.5) for i in range(100)]))

    schema = [{'name': 'col1', 'type': 'INTEGER'},
              {'name': 'col2', 'type': 'FLOAT'}]
    features = {'col1': {'transform': 'scale', 'source_column': 'col1'},
                'col2': {'transform': 'identity', 'source_column': 'col2'}}
    analyze.run_cloud_analysis(
        output_dir=output_folder,
        csv_file_pattern=input_file_path,
        bigquery_table=None,
        schema=schema,
        inverted_features=analyze.invert_features(features))

    stats = json.loads(
        file_io.read_file_to_string(
            os.path.join(output_folder, analyze.constant.STATS_FILE)).decode())

    self.assertEqual(stats['num_examples'], 100)
    col = stats['column_stats']['col1']
    self.assertAlmostEqual(col['max'], 99.0)
    self.assertAlmostEqual(col['min'], 0.0)
    self.assertAlmostEqual(col['mean'], 49.5)

    col = stats['column_stats']['col2']
    self.assertAlmostEqual(col['max'], 990.5)
    self.assertAlmostEqual(col['min'], 0.5)
    self.assertAlmostEqual(col['mean'], 495.5)
开发者ID:javiervicho,项目名称:pydatalab,代码行数:35,代码来源:test_analyze.py


示例6: main

def main(argv=None):
  args = parse_arguments(sys.argv if argv is None else argv)

  if args.schema:
    schema = json.loads(
        file_io.read_file_to_string(args.schema).decode())
  else:
    import google.datalab.bigquery as bq
    schema = bq.Table(args.bigquery).schema._bq_schema
  features = json.loads(
      file_io.read_file_to_string(args.features).decode())

  file_io.recursive_create_dir(args.output)

  if args.cloud:
    run_cloud_analysis(
        output_dir=args.output,
        csv_file_pattern=args.csv,
        bigquery_table=args.bigquery,
        schema=schema,
        features=features)
  else:
    feature_analysis.run_local_analysis(
        output_dir=args.output,
        csv_file_pattern=args.csv,
        schema=schema,
        features=features)
开发者ID:googledatalab,项目名称:pydatalab,代码行数:27,代码来源:analyze.py


示例7: _save_and_write_assets

  def _save_and_write_assets(self, assets_collection_to_add=None):
    """Saves asset to the meta graph and writes asset files to disk.

    Args:
      assets_collection_to_add: The collection where the asset paths are setup.
    """
    asset_source_filepath_list = self._save_assets(assets_collection_to_add)

    # Return if there are no assets to write.
    if len(asset_source_filepath_list) is 0:
      tf_logging.info("No assets to write.")
      return

    assets_destination_dir = os.path.join(
        compat.as_bytes(self._export_dir),
        compat.as_bytes(constants.ASSETS_DIRECTORY))

    if not file_io.file_exists(assets_destination_dir):
      file_io.recursive_create_dir(assets_destination_dir)

    # Copy each asset from source path to destination path.
    for asset_source_filepath in asset_source_filepath_list:
      asset_source_filename = os.path.basename(asset_source_filepath)

      asset_destination_filepath = os.path.join(
          compat.as_bytes(assets_destination_dir),
          compat.as_bytes(asset_source_filename))
      file_io.copy(
          asset_source_filepath, asset_destination_filepath, overwrite=True)

    tf_logging.info("Assets written to: %s", assets_destination_dir)
开发者ID:Qstar,项目名称:tensorflow,代码行数:31,代码来源:builder.py


示例8: _write_object_graph

def _write_object_graph(saveable_view, export_dir, asset_file_def_index):
  """Save a SavedObjectGraph proto for `root`."""
  # SavedObjectGraph is similar to the CheckpointableObjectGraph proto in the
  # checkpoint. It will eventually go into the SavedModel.
  proto = saved_object_graph_pb2.SavedObjectGraph()
  saveable_view.fill_object_graph_proto(proto)

  node_ids = util.ObjectIdentityDictionary()
  for i, obj in enumerate(saveable_view.nodes):
    node_ids[obj] = i
    if resource_variable_ops.is_resource_variable(obj):
      node_ids[obj.handle] = i
    elif isinstance(obj, tracking.TrackableAsset):
      node_ids[obj.asset_path.handle] = i

  for obj, obj_proto in zip(saveable_view.nodes, proto.nodes):
    _write_object_proto(obj, obj_proto, asset_file_def_index, node_ids)

  extra_asset_dir = os.path.join(
      compat.as_bytes(export_dir),
      compat.as_bytes(constants.EXTRA_ASSETS_DIRECTORY))
  file_io.recursive_create_dir(extra_asset_dir)
  object_graph_filename = os.path.join(
      extra_asset_dir, compat.as_bytes("object_graph.pb"))
  file_io.write_string_to_file(object_graph_filename, proto.SerializeToString())
开发者ID:rmlarsen,项目名称:tensorflow,代码行数:25,代码来源:save.py


示例9: save

  def save(self, as_text=False):
    """Writes a `SavedModel` protocol buffer to disk.

    The function writes the SavedModel protocol buffer to the export directory
    in serialized format.

    Args:
      as_text: Writes the SavedModel protocol buffer in text format to disk.

    Returns:
      The path to which the SavedModel protocol buffer was written.
    """
    if not file_io.file_exists(self._export_dir):
      file_io.recursive_create_dir(self._export_dir)

    if as_text:
      path = os.path.join(
          compat.as_bytes(self._export_dir),
          compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT))
      file_io.write_string_to_file(path, str(self._saved_model))
    else:
      path = os.path.join(
          compat.as_bytes(self._export_dir),
          compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))
      file_io.write_string_to_file(path, self._saved_model.SerializeToString())
    tf_logging.info("SavedModel written to: %s", path)

    return path
开发者ID:1000sprites,项目名称:tensorflow,代码行数:28,代码来源:builder_impl.py


示例10: _save_and_write_assets

  def _save_and_write_assets(self, assets_collection_to_add=None):
    """Saves asset to the meta graph and writes asset files to disk.

    Args:
      assets_collection_to_add: The collection where the asset paths are setup.
    """
    asset_source_filepath_list = _maybe_save_assets(assets_collection_to_add)

    # Return if there are no assets to write.
    if len(asset_source_filepath_list) is 0:
      tf_logging.info("No assets to write.")
      return

    assets_destination_dir = os.path.join(
        compat.as_bytes(self._export_dir),
        compat.as_bytes(constants.ASSETS_DIRECTORY))

    if not file_io.file_exists(assets_destination_dir):
      file_io.recursive_create_dir(assets_destination_dir)

    # Copy each asset from source path to destination path.
    for asset_source_filepath in asset_source_filepath_list:
      asset_source_filename = os.path.basename(asset_source_filepath)

      asset_destination_filepath = os.path.join(
          compat.as_bytes(assets_destination_dir),
          compat.as_bytes(asset_source_filename))

      # Only copy the asset file to the destination if it does not already
      # exist. This is to ensure that an asset with the same name defined as
      # part of multiple graphs is only copied the first time.
      if not file_io.file_exists(asset_destination_filepath):
        file_io.copy(asset_source_filepath, asset_destination_filepath)

    tf_logging.info("Assets written to: %s", assets_destination_dir)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:35,代码来源:builder_impl.py


示例11: add_meta_graph_and_variables

  def add_meta_graph_and_variables(self,
                                   sess,
                                   tags,
                                   signature_def_map=None,
                                   assets_collection=None,
                                   legacy_init_op=None):
    """Adds the current meta graph to the SavedModel and saves variables.

    Creates a Saver to save the variables from the provided session. Exports the
    corresponding meta graph def. This function assumes that the variables to be
    saved have been initialized. For a given `SavedModelBuilder`, this API must
    be called exactly once and for the first meta graph to save. For subsequent
    meta graph defs to be added, the `add_meta_graph()` API must be used.

    Args:
      sess: The TensorFlow session from which to save the meta graph and
        variables.
      tags: The set of tags with which to save the meta graph.
      signature_def_map: The map of signature def map to add to the meta graph
        def.
      assets_collection: Assets collection to be saved with SavedModel.
      legacy_init_op: Op or group of ops to execute after the restore op upon a
        load.
    """
    if self._has_saved_variables:
      raise AssertionError("Variables and assets have already been saved. "
                           "Please invoke `add_meta_graph()` instead.")

    # Save asset files and write them to disk, if any.
    self._save_and_write_assets(assets_collection)

    # Create the variables sub-directory, if it does not exist.
    variables_dir = os.path.join(
        compat.as_text(self._export_dir),
        compat.as_text(constants.VARIABLES_DIRECTORY))
    if not file_io.file_exists(variables_dir):
      file_io.recursive_create_dir(variables_dir)

    variables_path = os.path.join(
        compat.as_text(variables_dir),
        compat.as_text(constants.VARIABLES_FILENAME))

    # Add legacy init op to the SavedModel.
    self._maybe_add_legacy_init_op(legacy_init_op)

    # Save the variables and export meta graph def.
    saver = tf_saver.Saver(
        variables.all_variables(),
        sharded=True,
        write_version=saver_pb2.SaverDef.V2)
    saver.save(sess, variables_path, write_meta_graph=False)
    meta_graph_def = saver.export_meta_graph()

    # Tag the meta graph def and add it to the SavedModel.
    self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map)

    # Mark this instance of SavedModel as having saved variables, such that
    # subsequent attempts to save variables will fail.
    self._has_saved_variables = True
开发者ID:caikehe,项目名称:tensorflow,代码行数:59,代码来源:builder.py


示例12: testCreateRecursiveDir

 def testCreateRecursiveDir(self):
   dir_path = os.path.join(self._base_dir, "temp_dir/temp_dir1/temp_dir2")
   file_io.recursive_create_dir(dir_path)
   file_path = os.path.join(dir_path, "temp_file")
   file_io.FileIO(file_path, mode="w").write("testing")
   self.assertTrue(file_io.file_exists(file_path))
   file_io.delete_recursively(os.path.join(self._base_dir, "temp_dir"))
   self.assertFalse(file_io.file_exists(file_path))
开发者ID:JamesFysh,项目名称:tensorflow,代码行数:8,代码来源:file_io_test.py


示例13: get_or_create_assets_dir

def get_or_create_assets_dir(export_dir):
  """Return assets sub-directory, or create one if it doesn't exist."""
  assets_destination_dir = get_assets_dir(export_dir)

  if not file_io.file_exists(assets_destination_dir):
    file_io.recursive_create_dir(assets_destination_dir)

  return assets_destination_dir
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:8,代码来源:utils_impl.py


示例14: testMultipleColumnsRaw

  def testMultipleColumnsRaw(self):
    """Test training starting from raw csv."""
    output_dir = tempfile.mkdtemp()
    try:
      features = {
          'num': {'transform': 'identity'},
          'num2': {'transform': 'key', 'source_column': 'num'},
          'target': {'transform': 'target'},
          'text': {'transform': 'bag_of_words'},
          'text2': {'transform': 'multi_hot', 'source_column': 'text'},
          'text3': {'transform': 'tfidf', 'source_column': 'text'},
          'text4': {'transform': 'key', 'source_column': 'text'}}
      schema = [
          {'name': 'num', 'type': 'integer'},
          {'name': 'target', 'type': 'float'},
          {'name': 'text', 'type': 'string'}]
      data = ['1,2,hello world\n', '4,8,bye moon\n', '5,10,hello moon\n', '11,22,moon moon\n']
      file_io.recursive_create_dir(output_dir)
      file_io.write_string_to_file(os.path.join(output_dir, 'schema.json'),
                                   json.dumps(schema, indent=2))
      file_io.write_string_to_file(os.path.join(output_dir, 'features.json'),
                                   json.dumps(features, indent=2))
      file_io.write_string_to_file(os.path.join(output_dir, 'data.csv'),
                                   ''.join(data))

      cmd = ['python %s' % os.path.join(CODE_PATH, 'analyze.py'),
             '--output=' + os.path.join(output_dir, 'analysis'),
             '--csv=' + os.path.join(output_dir, 'data.csv'),
             '--schema=' + os.path.join(output_dir, 'schema.json'),
             '--features=' + os.path.join(output_dir, 'features.json')]
      subprocess.check_call(' '.join(cmd), shell=True)

      cmd = ['cd %s && ' % CODE_PATH,
             'python -m trainer.task',
             '--train=' + os.path.join(output_dir, 'data.csv'),
             '--eval=' + os.path.join(output_dir, 'data.csv'),
             '--job-dir=' + os.path.join(output_dir, 'training'),
             '--analysis=' + os.path.join(output_dir, 'analysis'),
             '--model=linear_regression',
             '--train-batch-size=4',
             '--eval-batch-size=4',
             '--max-steps=200',
             '--learning-rate=0.1',
             '--transform']

      subprocess.check_call(' '.join(cmd), shell=True)

      result = run_exported_model(
          model_path=os.path.join(output_dir, 'training', 'model'),
          csv_data=['20,hello moon'])

      # check keys were made
      self.assertEqual(20, result['num2'])
      self.assertEqual('hello moon', result['text4'])
    finally:
      shutil.rmtree(output_dir)
开发者ID:googledatalab,项目名称:pydatalab,代码行数:56,代码来源:test_training.py


示例15: testManyKeys

  def testManyKeys(self):
    output_dir = tempfile.mkdtemp()
    try:
      features = {
          'keyint': {'transform': 'key'},
          'keyfloat': {'transform': 'key'},
          'keystr': {'transform': 'key'},
          'num': {'transform': 'identity'},
          'target': {'transform': 'target'}}
      schema = [
          {'name': 'keyint', 'type': 'integer'},
          {'name': 'keyfloat', 'type': 'float'},
          {'name': 'keystr', 'type': 'string'},
          {'name': 'num', 'type': 'integer'},
          {'name': 'target', 'type': 'float'}]
      data = ['1,1.5,one,1,2\n', '2,2.5,two,4,8\n', '3,3.5,three,5,10\n']
      file_io.recursive_create_dir(output_dir)
      file_io.write_string_to_file(os.path.join(output_dir, 'schema.json'),
                                   json.dumps(schema, indent=2))
      file_io.write_string_to_file(os.path.join(output_dir, 'features.json'),
                                   json.dumps(features, indent=2))
      file_io.write_string_to_file(os.path.join(output_dir, 'data.csv'),
                                   ''.join(data))

      cmd = ['python %s' % os.path.join(CODE_PATH, 'analyze.py'),
             '--output=' + os.path.join(output_dir, 'analysis'),
             '--csv=' + os.path.join(output_dir, 'data.csv'),
             '--schema=' + os.path.join(output_dir, 'schema.json'),
             '--features=' + os.path.join(output_dir, 'features.json')]
      subprocess.check_call(' '.join(cmd), shell=True)

      cmd = ['cd %s && ' % CODE_PATH,
             'python -m trainer.task',
             '--train=' + os.path.join(output_dir, 'data.csv'),
             '--eval=' + os.path.join(output_dir, 'data.csv'),
             '--job-dir=' + os.path.join(output_dir, 'training'),
             '--analysis=' + os.path.join(output_dir, 'analysis'),
             '--model=linear_regression',
             '--train-batch-size=4',
             '--eval-batch-size=4',
             '--max-steps=2000',
             '--transform']

      subprocess.check_call(' '.join(cmd), shell=True)

      result = run_exported_model(
          model_path=os.path.join(output_dir, 'training', 'model'),
          csv_data=['7,4.5,hello,1'])
      self.assertEqual(7, result['keyint'])
      self.assertAlmostEqual(4.5, result['keyfloat'])
      self.assertEqual('hello', result['keystr'])
    finally:
      shutil.rmtree(output_dir)
开发者ID:parthea,项目名称:pydatalab,代码行数:53,代码来源:test_training.py


示例16: end

    def end(self, session=None):
        super(ExportLastModelMonitor, self).end(session)

        file_io.recursive_create_dir(self._dest)
        _recursive_copy(self.last_export_dir, self._dest)

        if self._additional_assets:
            # TODO(rhaertel): use the actual assets directory. For now, metadata.yaml
            # must be a sibling of the export.meta file.
            assets_dir = self._dest
            file_io.create_dir(assets_dir)
            _copy_all(self._additional_assets, assets_dir)
开发者ID:obulpathi,项目名称:cloud,代码行数:12,代码来源:util.py


示例17: testTopNZero

  def testTopNZero(self):
    """Test top_n=0 gives all the classes."""
    output_dir = tempfile.mkdtemp()
    try:
      features = {
          'num': {'transform': 'identity'},
          'target': {'transform': 'target'}}
      schema = [
          {'name': 'num', 'type': 'integer'},
          {'name': 'target', 'type': 'string'}]
      data = ['1,1\n', '4,2\n', '5,3\n', '11,1\n']
      file_io.recursive_create_dir(output_dir)
      file_io.write_string_to_file(os.path.join(output_dir, 'schema.json'),
                                   json.dumps(schema, indent=2))
      file_io.write_string_to_file(os.path.join(output_dir, 'features.json'),
                                   json.dumps(features, indent=2))
      file_io.write_string_to_file(os.path.join(output_dir, 'data.csv'),
                                   ''.join(data))

      cmd = ['python %s' % os.path.join(CODE_PATH, 'analyze.py'),
             '--output=' + os.path.join(output_dir, 'analysis'),
             '--csv=' + os.path.join(output_dir, 'data.csv'),
             '--schema=' + os.path.join(output_dir, 'schema.json'),
             '--features=' + os.path.join(output_dir, 'features.json')]
      subprocess.check_call(' '.join(cmd), shell=True)

      cmd = ['cd %s && ' % CODE_PATH,
             'python -m trainer.task',
             '--train=' + os.path.join(output_dir, 'data.csv'),
             '--eval=' + os.path.join(output_dir, 'data.csv'),
             '--job-dir=' + os.path.join(output_dir, 'training'),
             '--analysis=' + os.path.join(output_dir, 'analysis'),
             '--model=linear_classification',
             '--train-batch-size=4',
             '--eval-batch-size=4',
             '--max-steps=1',
             '--top-n=0',  # This parameter is tested in this test!
             '--learning-rate=0.1',
             '--transform']

      subprocess.check_call(' '.join(cmd), shell=True)

      result = run_exported_model(
          model_path=os.path.join(output_dir, 'training', 'model'),
          csv_data=['20'])

      keys = result.keys()
      self.assertIn('predicted', keys)
      self.assertIn('1', keys)
      self.assertIn('2', keys)
      self.assertIn('3', keys)
    finally:
      shutil.rmtree(output_dir)
开发者ID:googledatalab,项目名称:pydatalab,代码行数:53,代码来源:test_training.py


示例18: test_text

  def test_text(self):
    test_folder = os.path.join(self._bucket_root, 'test_text')
    input_file_path = os.path.join(test_folder, 'input.csv')
    output_folder = os.path.join(test_folder, 'test_output')
    file_io.recursive_create_dir(output_folder)

    csv_file = ['the quick brown fox,raining in kir,cat1|cat2,true',
                'quick   brown brown chicken,raining in pdx,cat2|cat3|cat4,false']
    file_io.write_string_to_file(
      input_file_path,
      '\n'.join(csv_file))

    schema = [{'name': 'col1', 'type': 'STRING'},
              {'name': 'col2', 'type': 'STRING'},
              {'name': 'col3', 'type': 'STRING'},
              {'name': 'col4', 'type': 'STRING'}]
    features = {'col1': {'transform': 'bag_of_words', 'source_column': 'col1'},
                'col2': {'transform': 'tfidf', 'source_column': 'col2'},
                'col3': {'transform': 'multi_hot', 'source_column': 'col3', 'separator': '|'},
                'col4': {'transform': 'target'}}
    analyze.run_cloud_analysis(
        output_dir=output_folder,
        csv_file_pattern=input_file_path,
        bigquery_table=None,
        schema=schema,
        features=features)

    stats = json.loads(
        file_io.read_file_to_string(
            os.path.join(output_folder, analyze.constant.STATS_FILE)).decode())
    self.assertEqual(stats['column_stats']['col1']['vocab_size'], 5)
    self.assertEqual(stats['column_stats']['col2']['vocab_size'], 4)
    self.assertEqual(stats['column_stats']['col3']['vocab_size'], 4)

    vocab_str = file_io.read_file_to_string(
        os.path.join(output_folder,
                     analyze.constant.VOCAB_ANALYSIS_FILE % 'col1'))
    vocab = pd.read_csv(six.StringIO(vocab_str),
                        header=None,
                        names=['col1', 'count'])
    self.assertEqual(vocab['col1'].tolist(),
                     ['brown', 'quick', 'chicken', 'fox', 'the', ])
    self.assertEqual(vocab['count'].tolist(), [2, 2, 1, 1, 1])

    vocab_str = file_io.read_file_to_string(
        os.path.join(output_folder,
                     analyze.constant.VOCAB_ANALYSIS_FILE % 'col2'))
    vocab = pd.read_csv(six.StringIO(vocab_str),
                        header=None,
                        names=['col2', 'count'])
    self.assertEqual(vocab['col2'].tolist(), ['in', 'raining', 'kir', 'pdx'])
    self.assertEqual(vocab['count'].tolist(), [2, 2, 1, 1])
开发者ID:googledatalab,项目名称:pydatalab,代码行数:52,代码来源:test_analyze.py


示例19: test_categorical

  def test_categorical(self):
    test_folder = os.path.join(self._bucket_root, 'test_categorical')
    input_file_path = os.path.join(test_folder, 'input.csv')
    output_folder = os.path.join(test_folder, 'test_output')
    file_io.recursive_create_dir(output_folder)

    csv_file = ['red,car,apple', 'red,truck,pepper', 'red,van,apple', 'blue,bike,grape',
                'blue,train,apple', 'green,airplane,pepper']
    file_io.write_string_to_file(
      input_file_path,
      '\n'.join(csv_file))

    schema = [{'name': 'color', 'type': 'STRING'},
              {'name': 'transport', 'type': 'STRING'},
              {'name': 'type', 'type': 'STRING'}]
    features = {'color': {'transform': 'one_hot', 'source_column': 'color'},
                'transport': {'transform': 'embedding', 'source_column': 'transport'},
                'type': {'transform': 'target'}}
    analyze.run_cloud_analysis(
        output_dir=output_folder,
        csv_file_pattern=input_file_path,
        bigquery_table=None,
        schema=schema,
        features=features)

    stats = json.loads(
        file_io.read_file_to_string(
            os.path.join(output_folder, analyze.constant.STATS_FILE)).decode())
    self.assertEqual(stats['column_stats']['color']['vocab_size'], 3)
    self.assertEqual(stats['column_stats']['transport']['vocab_size'], 6)

    # Color column.
    vocab_str = file_io.read_file_to_string(
      os.path.join(output_folder, analyze.constant.VOCAB_ANALYSIS_FILE % 'color'))
    vocab = pd.read_csv(six.StringIO(vocab_str),
                        header=None,
                        names=['color', 'count'])
    expected_vocab = pd.DataFrame(
        {'color': ['red', 'blue', 'green'], 'count': [3, 2, 1]},
        columns=['color', 'count'])
    pd.util.testing.assert_frame_equal(vocab, expected_vocab)

    # transport column.
    vocab_str = file_io.read_file_to_string(
        os.path.join(output_folder,
                     analyze.constant.VOCAB_ANALYSIS_FILE % 'transport'))
    vocab = pd.read_csv(six.StringIO(vocab_str),
                        header=None,
                        names=['transport', 'count'])
    self.assertEqual(vocab['count'].tolist(), [1 for i in range(6)])
    self.assertEqual(vocab['transport'].tolist(),
                     ['airplane', 'bike', 'car', 'train', 'truck', 'van'])
开发者ID:googledatalab,项目名称:pydatalab,代码行数:52,代码来源:test_analyze.py


示例20: end

  def end(self, session=None):
    super(ExportLastModelMonitor, self).end(session)
    # Recursively copy the last location export dir from the exporter into the
    # main export location.
    file_io.recursive_create_dir(self._final_model_location)
    _recursive_copy(self.last_export_dir, self._final_model_location)

    if self._additional_assets:
      # TODO(rhaertel): use the actual assets directory. For now, metadata.json
      # must be a sibling of the export.meta file.
      assets_dir = self._final_model_location
      file_io.create_dir(assets_dir)
      _copy_all(self._additional_assets, assets_dir)
开发者ID:cottrell,项目名称:notebooks,代码行数:13,代码来源:util.py



注:本文中的tensorflow.python.lib.io.file_io.recursive_create_dir函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python file_io.rename函数代码示例发布时间:2022-05-27
下一篇:
Python file_io.read_file_to_string函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap