• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.decode_csv函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.decode_csv函数的典型用法代码示例。如果您正苦于以下问题:Python decode_csv函数的具体用法?Python decode_csv怎么用?Python decode_csv使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了decode_csv函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: read_csv_examples

  def read_csv_examples(image_dir, label_dir, batch_size=100, num_epochs=None, task_index=None, num_workers=None):
    print_log(worker_num, "num_epochs: {0}".format(num_epochs))
    # Setup queue of csv image filenames
    tf_record_pattern = os.path.join(image_dir, 'part-*')
    images = tf.gfile.Glob(tf_record_pattern)
    print_log(worker_num, "images: {0}".format(images))
    image_queue = tf.train.string_input_producer(images, shuffle=False, capacity=1000, num_epochs=num_epochs, name="image_queue")

    # Setup queue of csv label filenames
    tf_record_pattern = os.path.join(label_dir, 'part-*')
    labels = tf.gfile.Glob(tf_record_pattern)
    print_log(worker_num, "labels: {0}".format(labels))
    label_queue = tf.train.string_input_producer(labels, shuffle=False, capacity=1000, num_epochs=num_epochs, name="label_queue")

    # Setup reader for image queue
    img_reader = tf.TextLineReader(name="img_reader")
    _, img_csv = img_reader.read(image_queue)
    image_defaults = [ [1.0] for col in range(784) ]
    img = tf.pack(tf.decode_csv(img_csv, image_defaults))
    # Normalize values to [0,1]
    norm = tf.constant(255, dtype=tf.float32, shape=(784,))
    image = tf.div(img, norm)
    print_log(worker_num, "image: {0}".format(image))

    # Setup reader for label queue
    label_reader = tf.TextLineReader(name="label_reader")
    _, label_csv = label_reader.read(label_queue)
    label_defaults = [ [1.0] for col in range(10) ]
    label = tf.pack(tf.decode_csv(label_csv, label_defaults))
    print_log(worker_num, "label: {0}".format(label))

    # Return a batch of examples
    return tf.train.batch([image,label], batch_size, num_threads=args.readers, name="batch_csv")
开发者ID:Aravindreddy986,项目名称:TensorFlowOnSpark,代码行数:33,代码来源:mnist_dist.py


示例2: read_fer2013

def read_fer2013(eval_data):
    """
    Read and parse the examples from the FER2013 data file
    
    Args:
        eval_data: boolean indicating whether we are using training or evaluation data
    
    Returns:
        A single example contained in an object with fields:
            height: number of rows
            width: number of columns
            depth: number of colour channels
            key: filename and record number for the example
            label: an int32 Tensor with the label in the range 0..6
            image: a [height, width, depth] int32 Tensor with the image data
    """

    class FER2013Record(object):
        pass
    result = FER2013Record()

    # Dataset dimensions
    result.height = 48
    result.width = 48
    result.depth = 1

    # Set up the reader
    filename = tf.train.string_input_producer(["FER2013 data/fer2013/fer2013.csv"])

    # read from the data file
    # training data starts on line 2 (single header line)
    # test data starts after the training data
    skip_lines = 1
    if eval_data:
        skip_lines = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
    reader = tf.TextLineReader(skip_header_lines=skip_lines)

    # Read a line corresponding to an example
    result.key, value = reader.read(filename)

    # Decode the line according to its formatting
    def1 = [[0],["Empty"],["Empty"]]
    result.label, image, result.testOrTrain = tf.decode_csv(value, def1)
    # The middle column corresponds to the image data of 48x48 = 2304
    # The data is space separated hence field_delim=' '
    def2 = [[0]]*(result.height*result.width)
    image = tf.decode_csv(image, def2, field_delim=' ')
    
    image = tf.reshape(image, [result.height, result.width, -1])
    result.image = tf.cast(image, tf.uint8)

    return result
开发者ID:sjagter,项目名称:Python,代码行数:52,代码来源:FER2013_input.py


示例3: parse_csv

def parse_csv(csv_row, is_serving=False):
  """Takes the string input tensor (csv) and returns a dict of rank-2 tensors.

  Takes a rank-1 tensor and converts it into rank-2 tensor, with respect to
  its data type (inferred from the metadata).

  Args:
    csv_row: rank-2 tensor of type string (csv).
    is_serving: boolean to indicate whether this function is called during
      serving or training, since the csv_row serving input is different than
      the training input (i.e., no target column).
  Returns:
    rank-2 tensor of the correct data type.
  """
  if is_serving:
    column_names = metadata.SERVING_COLUMN_NAMES
    defaults = []
    # create the defaults for the serving columns.
    for serving_feature in metadata.SERVING_COLUMN_NAMES:
      feature_index = metadata.COLUMN_NAMES.index(serving_feature)
      defaults.append(metadata.DEFAULTS[feature_index])
  else:
    column_names = metadata.COLUMN_NAMES
    defaults = metadata.DEFAULTS

  columns = tf.decode_csv(csv_row, record_defaults=defaults)
  features = dict(zip(column_names, columns))

  return features
开发者ID:zhang01GA,项目名称:cloudml-samples,代码行数:29,代码来源:inputs.py


示例4: _decode

def _decode(example_batch):
    """Decode a batch of CSV lines into a feature map."""

    if FLAGS.is_predicting:
        record_defaults = [[0.0], [""], [0.0], [""], [0.0], [""], [""], [""],
                           [""], [""], [0.0], [0.0], [0.0], [""]]
    else:
        record_defaults = [[0.0], [""], [0.0], [""], [0.0], [""], [""], [""],
                           [""], [""], [0.0], [0.0], [0.0], [""], [""]]

    fields = tf.decode_csv(example_batch, record_defaults, field_delim=',')
    if FLAGS.is_predicting:
        data = {LABEL: tf.constant("")}
    else:
        data = {LABEL: fields[14]}

    data["age"] = fields[0]
    data["workclass"] = fields[1]
    data["fnlwgt"] = fields[2]
    data["education"] = fields[3]
    data["education-num"] = fields[4]
    data["marital-status"] = fields[5]
    data["occupation"] = fields[6]
    data["relationship"] = fields[7]
    data["race"] = fields[8]
    data["sex"] = fields[9]
    data["capital-gain"] = fields[10]
    data["capital-loss"] = fields[11]
    data["hours-per-week"] = fields[12]
    data["native-country"] = fields[13]

    return data
开发者ID:ckml,项目名称:tf_learn,代码行数:32,代码来源:inputs.py


示例5: _decode_csv

  def _decode_csv(line):
    """Takes the string input tensor and parses it to feature dict and target.

    All the columns except the first one are treated as feature column. The
    first column is expected to be the target.
    Only returns target for if with_target is True.

    Args:
      line: csv rows in tensor format.

    Returns:
      features: A dictionary of features with key as "column_names" from
        self._column_header.
      target: tensor of target values which is the first column of the file.
        This will only be returned if with_target==True.
    """
    column_header = column_names if with_target else column_names[:4]
    record_defaults = [[0.] for _ in xrange(len(column_names) - 1)]
    # Pass label as integer.
    if with_target:
      record_defaults.append([0])
    columns = tf.decode_csv(line, record_defaults=record_defaults)
    features = dict(zip(column_header, columns))
    target = features.pop(column_names[4]) if with_target else None
    return features, target
开发者ID:zhang01GA,项目名称:cloudml-samples,代码行数:25,代码来源:model.py


示例6: parse_csv

 def parse_csv(value):
   tf.logging.info('Parsing {}'.format(data_file))
   columns = tf.decode_csv(value, record_defaults=_CSV_COLUMN_DEFAULTS)
   features = dict(zip(_CSV_COLUMNS, columns))
   labels = features.pop('income_bracket')
   classes = tf.equal(labels, '>50K')  # binary classification
   return features, classes
开发者ID:812864539,项目名称:models,代码行数:7,代码来源:census_dataset.py


示例7: test_inputs

    def test_inputs(self, csv, batch_size):
        print("input csv file path: %s, batch size: %d" % (csv, batch_size))
        filename_queue = tf.train.string_input_producer([csv], shuffle=False)
        reader = tf.TextLineReader()
        _, serialized_example = reader.read(filename_queue)
        filename, label = tf.decode_csv(serialized_example, [["path"], [0]])

        label = tf.cast(label, tf.int32)
        jpg = tf.read_file(filename)
        image = tf.image.decode_jpeg(jpg, channels=3)
        image = tf.cast(image, tf.float32)
        print "original image shape:"
        print image.get_shape()

        # resize to distort
        dist = tf.image.resize_images(image, FLAGS.scale_h, FLAGS.scale_w)
        # random crop
        dist = tf.image.resize_image_with_crop_or_pad(dist, FLAGS.input_h, FLAGS.input_w)

        min_fraction_of_examples_in_queue = 0.4
        min_queue_examples = int(FLAGS.num_examples_per_epoch_for_train * min_fraction_of_examples_in_queue)
        print (
        'filling queue with %d train images before starting to train.  This will take a few minutes.' % min_queue_examples)

        return self._generate_image_and_label_batch(dist, label, min_queue_examples, batch_size, shuffle=False)
开发者ID:qiuzhangcheng,项目名称:InceptionV3_TensorFlow,代码行数:25,代码来源:datasets.py


示例8: read

def read(filename_queue):
  class Record(object):
    pass
  result = Record()

  reader = tf.TextLineReader()
  result.key, line = reader.read(filename_queue)

  #sess = tf.Session()
  #print(line[0].eval(session=sess), line[1].eval(session=sess))
  #sess.close()

  #print(line.get_shape())
  record_defaults = [[0] for _ in xrange(2305)]
  columns = tf.decode_csv(line, record_defaults=record_defaults)
  #print("PRINT: " , len(columns))
  x = tf.pack(columns[1:])
  cls = columns[0]
  result.height = 48
  result.width = 48
  result.label = tf.cast(cls, tf.int32)
  depth_major = tf.reshape(x, [result.height, result.width, 1])
  three_chann = tf.concat(2, [depth_major, depth_major, depth_major])
  print(three_chann.get_shape())
  result.image = three_chann
  return result
开发者ID:vye16,项目名称:6867-project,代码行数:26,代码来源:input.py


示例9: _input_fn

  def _input_fn():
    num_epochs = 100 if mode == tf.contrib.learn.ModeKeys.TRAIN else 1

    # could be a path to one file or a file pattern.
    input_file_names = tf.train.match_filenames_once(filename)
    filename_queue = tf.train.string_input_producer(
        input_file_names, num_epochs=num_epochs, shuffle=True)

    reader = tf.TextLineReader()
    _, value = reader.read_up_to(filename_queue, num_records=BATCH_SIZE)

    value_column = tf.expand_dims(value, -1)
    print 'readcsv={}'.format(value_column)
    
    # all_data is a list of tensors
    all_data = tf.decode_csv(value_column, record_defaults=DEFAULTS)  
    inputs = all_data[:len(all_data)-N_OUTPUTS]  # first few values
    label = all_data[len(all_data)-N_OUTPUTS : ] # last few values
    
    # from list of tensors to tensor with one more dimension
    inputs = tf.concat(inputs, axis=1)
    label = tf.concat(label, axis=1)
    print 'inputs={}'.format(inputs)
    
    return {TIMESERIES_COL: inputs}, label   # dict of features, label
开发者ID:GoogleCloudPlatform,项目名称:training-data-analyst,代码行数:25,代码来源:model.py


示例10: multi_reader_multi_example

def multi_reader_multi_example():
    # create a FIFO queue
    filenames = ['a.csv', 'b.csv', 'c.csv']
    filename_queue = tf.train.string_input_producer(filenames, shuffle=False)

    # create reader
    reader = tf.TextLineReader()
    key, value = reader.read(filename_queue)

    record_defaults = [['null'], ['null']]
    example_list = [tf.decode_csv(value, record_defaults=record_defaults) for _ in range(2)]

    example_batch, label_batch = tf.train.batch_join(example_list, batch_size=5)

    # run graph
    with tf.Session() as sess:
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        try:
            while not coord.should_stop():
                print(example_batch.eval())
        except tf.errors.OutOfRangeError:
            print('epoches completed!')
        finally:
            coord.request_stop()

        coord.join(threads)
开发者ID:Zumbalamambo,项目名称:deepcv,代码行数:28,代码来源:readfile.py


示例11: read_image_unlabeled

def read_image_unlabeled(filename_queue, raw_img):
  class StatefarmRecord(object):
    pass
  result = StatefarmRecord()

  # Read a record, getting filenames from the filename_queue.  
  result.key, _ = tf.decode_csv(filename_queue.dequeue(), [[""], [""]], " ")

  # Extract raw JPG data as a string
  # raw_contents = tf.read_file(result.key)
  # raw_contents = raw_img

  # Decode raw data as a PNG. Defaults to uint8 encoding.
  # result.uint8image = tf.image.decode_png(raw_contents)
  result.uint8image = raw_img.astype('uint8')

  # TENSORFLOW BUG: image shape not statically determined, so force
  # it to have correct CIFAR100 dimensions
  # result.uint8image.set_shape((32, 32, 3))

  # Kind of hacky, but set a label so we can use the same structure
  # THIS SHOULD ALWAYS BE IGNORED DURING COMPUTATION, since we are
  # dealing with unlabaled data
  result.label = tf.cast(tf.string_to_number("0"), tf.int32)

  return result
开发者ID:gleichnitz,项目名称:duplicate_image_detection,代码行数:26,代码来源:dup_input.py


示例12: raw_training_input_fn

  def raw_training_input_fn():
    """Training input function that reads raw data and applies transforms."""

    if isinstance(raw_data_file_pattern, six.string_types):
      filepath_list = [raw_data_file_pattern]
    else:
      filepath_list = raw_data_file_pattern

    files = []
    for path in filepath_list:
      files.extend(file_io.get_matching_files(path))

    filename_queue = tf.train.string_input_producer(
        files, num_epochs=num_epochs, shuffle=randomize_input)

    csv_id, csv_lines = tf.TextLineReader().read_up_to(filename_queue, training_batch_size)

    queue_capacity = (reader_num_threads + 3) * training_batch_size + min_after_dequeue
    if randomize_input:
      _, batch_csv_lines = tf.train.shuffle_batch(
          tensors=[csv_id, csv_lines],
          batch_size=training_batch_size,
          capacity=queue_capacity,
          min_after_dequeue=min_after_dequeue,
          enqueue_many=True,
          num_threads=reader_num_threads,
          allow_smaller_final_batch=allow_smaller_final_batch)

    else:
      _, batch_csv_lines = tf.train.batch(
          tensors=[csv_id, csv_lines],
          batch_size=training_batch_size,
          capacity=queue_capacity,
          enqueue_many=True,
          num_threads=reader_num_threads,
          allow_smaller_final_batch=allow_smaller_final_batch)

    csv_header, record_defaults = csv_header_and_defaults(features, schema, stats, keep_target=True)
    parsed_tensors = tf.decode_csv(batch_csv_lines, record_defaults, name='csv_to_tensors')
    raw_features = dict(zip(csv_header, parsed_tensors))

    transform_fn = make_preprocessing_fn(analysis_output_dir, features, keep_target=True)
    transformed_tensors = transform_fn(raw_features)

    # Expand the dims of non-sparse tensors. This is needed by tf.learn.
    transformed_features = {}
    for k, v in six.iteritems(transformed_tensors):
      if isinstance(v, tf.Tensor) and v.get_shape().ndims == 1:
        transformed_features[k] = tf.expand_dims(v, -1)
      else:
        transformed_features[k] = v

    # Remove the target tensor, and return it directly
    target_name = get_target_name(features)
    if not target_name or target_name not in transformed_features:
      raise ValueError('Cannot find target transform in features')

    transformed_target = transformed_features.pop(target_name)

    return transformed_features, transformed_target
开发者ID:parthea,项目名称:pydatalab,代码行数:60,代码来源:feature_transforms.py


示例13: parse_example_tensor

def parse_example_tensor(examples, train_config, keep_target):
  """Read the csv files.

  Args:
    examples: string tensor
    train_config: training config
    keep_target: if true, the target column is expected to exist and it is
        returned in the features dict.

  Returns:
    Dict of feature_name to tensor. Target feature is in the dict.
  """

  csv_header = []
  if keep_target:
    csv_header = train_config['csv_header']
  else:
    csv_header = [name for name in train_config['csv_header']
                  if name != train_config['target_column']]

  # record_defaults are used by tf.decode_csv to insert defaults, and to infer
  # the datatype.
  record_defaults = [[train_config['csv_defaults'][name]]
                     for name in csv_header]
  tensors = tf.decode_csv(examples, record_defaults, name='csv_to_tensors')

  # I'm not really sure why expand_dims needs to be called. If using regression
  # models, it errors without it.
  tensors = [tf.expand_dims(x, axis=1) for x in tensors]

  tensor_dict = dict(zip(csv_header, tensors))
  return tensor_dict
开发者ID:googledatalab,项目名称:pydatalab,代码行数:32,代码来源:util.py


示例14: decode_csv

 def decode_csv(line):
     parsed_line = tf.decode_csv(line, [[0.], [0.], [0.], [0.], [0]])
     label = parsed_line[-1:]  # Last element is the label
     del parsed_line[-1]  # Delete last element
     features = parsed_line  # Everything but last elements are the features
     d = dict(zip(feature_names, features)), label
     return d
开发者ID:danabo,项目名称:models,代码行数:7,代码来源:blog_estimators_dataset.py


示例15: filequeue_to_batch_data

def filequeue_to_batch_data(filename_queue, line_reader, batch_size = BATCH_SIZE):
   # The text file format should be Query Image, Trieve Image, Query label,
   # Trieve Label, Triplet loss Label( 0/1 ) 

    key, next_line = line_reader.read(filename_queue)
    query_image_name, retrieve_image_name, label_1, label_2, label_3 = tf.decode_csv(
        next_line, [tf.constant([], dtype=tf.string),tf.constant([], dtype=tf.string),
            tf.constant([], dtype = tf.int32), tf.constant([], dtype = tf.int32), tf.constant([], dtype = tf.int32)], field_delim=" ")
    
    # batch_query_image, batch_label = tf.train.batch(
    #         [query_image_name, label], batch_size=batch_size)

    reverse_channel = True  # for pre-trained purpose
    query_tensor = image_io.read_image(query_image_name, reverse_channel,   
            FEATURE_ROW, FEATURE_COL)

    retrieve_tensor = image_io.read_image(retrieve_image_name, reverse_channel,   
            FEATURE_ROW, FEATURE_COL)

    if SHUFFLE_DATA:
        min_after_dequeue = 100
        capacity = min_after_dequeue + 3 * batch_size
        batch_query_tensor, batch_retrieve_tensor, batch_label_1, batch_label_2, batch_label_3  = tf.train.shuffle_batch(
                [query_tensor, retrieve_tensor, label_1, label_2, label_3], batch_size = batch_size, capacity=capacity,
                min_after_dequeue=min_after_dequeue)
    else:
        batch_query_tensor,batch_retrieve_tensor, batch_label_1, batch_label_2, batch_label_3 = tf.train.batch(
                [query_tensor, retrieve_tensor, label_1, label_2, label_3], batch_size=batch_size)
    
    batch_tensor = tf.concat(0, [batch_query_tensor, batch_retrieve_tensor]) 
    batch_label = tf.concat(0, [batch_label_1, batch_label_2])
    return batch_tensor, batch_label, batch_label_3
开发者ID:polltooh,项目名称:FineGrainedAction,代码行数:32,代码来源:fine_tune_nn_v3.py


示例16: parse_csv

 def parse_csv(line):
     print("Parsing", data_file)
     # tf.decode_csv会把csv文件转换成很a list of Tensor,一列一个。record_defaults用于指明每一列的缺失值用什么填充
     columns = tf.decode_csv(line, record_defaults=_CSV_COLUMN_DEFAULTS)
     features = dict(zip(_CSV_COLUMNS, columns))
     labels = features.pop('income_bracket')
     return features, tf.equal(labels, '>50K') # tf.equal(x, y) 返回一个bool类型Tensor, 表示x == y, element-wise
开发者ID:chenxingqiang,项目名称:ML_CIA,代码行数:7,代码来源:wide_component.py


示例17: filequeue_to_batch_data

def filequeue_to_batch_data(filename_queue, line_reader, batch_size = BATCH_SIZE):
    
    key, next_line = line_reader.read(filename_queue)
    query_image_name, label = tf.decode_csv(
        next_line, [tf.constant([], dtype=tf.string),
            tf.constant([], dtype = tf.int32)], field_delim=" ")
    
    # batch_query_image, batch_label = tf.train.batch(
    #         [query_image_name, label], batch_size=batch_size)

    reverse_channel = True  # for pre-trained purpose
    query_tensor = image_io.read_image(query_image_name, reverse_channel,   
            FEATURE_ROW, FEATURE_COL)

    if SHUFFLE_DATA:
        min_after_dequeue = 100
        capacity = min_after_dequeue + 3 * batch_size
        batch_query_image, batch_label = tf.train.shuffle_batch(
                [query_tensor, label], batch_size = batch_size, capacity=capacity,
                min_after_dequeue=min_after_dequeue)
    else:
        batch_query_image, batch_label = tf.train.batch(
                [query_tensor, label], batch_size=batch_size)
    
    
    return batch_query_image, batch_label
开发者ID:polltooh,项目名称:FineGrainedAction,代码行数:26,代码来源:fine_tune_nn.py


示例18: record_to_labeled_log_mel_examples

def record_to_labeled_log_mel_examples(csv_record, clip_dir=None, hparams=None,
                                       label_class_index_table=None, num_classes=None):
  """Creates a batch of log mel spectrum examples from a training record.

  Args:
    csv_record: a line from the train.csv file downloaded from Kaggle.
    clip_dir: path to a directory containing clips referenced by csv_record.
    hparams: tf.contrib.training.HParams object containing model hyperparameters.
    label_class_index_table: a lookup table that represents the class map.
    num_classes: number of classes in the class map.

  Returns:
    features: Tensor containing a batch of log mel spectrum examples.
    labels: Tensor containing corresponding labels in 1-hot format.
  """
  [clip, label, _] = tf.decode_csv(csv_record, record_defaults=[[''],[''],[0]])

  features = clip_to_log_mel_examples(clip, clip_dir=clip_dir, hparams=hparams)

  class_index = label_class_index_table.lookup(label)
  label_onehot = tf.one_hot(class_index, num_classes)
  num_examples = tf.shape(features)[0]
  labels = tf.tile([label_onehot], [num_examples, 1])

  return features, labels
开发者ID:ssgalitsky,项目名称:Research-Audio-classification-using-Audioset-Freesound-Databases,代码行数:25,代码来源:inputs.py


示例19: read_pascifar

def read_pascifar(pascifar_path, queue):
    """ Reads and parses files from the queue.
    Args:
        pascifar_path: a constant string tensor representing the path of the PASCIFAR dataset
        queue: A queue of strings in the format: file, label

    Returns:
        image_path: a tf.string tensor. The absolute path of the image in the dataset
        label: a int64 tensor with the label
    """

    # Reader for text lines
    reader = tf.TextLineReader(skip_header_lines=1)

    # read a record from the queue
    _, row = reader.read(queue)

    # file,width,height,label
    record_defaults = [[""], [0]]

    image_path, label = tf.decode_csv(row, record_defaults, field_delim=",")

    image_path = pascifar_path + tf.constant("/") + image_path
    label = tf.cast(label, tf.int64)
    return image_path, label
开发者ID:galeone,项目名称:pgnet,代码行数:25,代码来源:pascifar.py


示例20: read_tensors_from_csv

def read_tensors_from_csv(file_name, defaults=None, num_columns=None, batch_size=1, num_epochs=None,
                          delimiter=',', randomize_input=True, num_threads=4):
    if file_name is None:
        raise ValueError(
            "Invalid file_name. file_name cannot be empty.")

    if defaults is None and num_columns is None:
        raise ValueError(
            "At least one of defaults and num_columns should not be None.")

    if defaults is None:
        defaults = [0.0 for _ in range(num_columns)]

    record_defaults = [[item] for item in defaults]

    examples = tf.contrib.learn.read_batch_examples(
        file_pattern=file_name,
        batch_size=batch_size,
        reader=tf.TextLineReader,
        randomize_input=randomize_input,
        num_threads=num_threads,
        num_epochs=num_epochs)

    columns = tf.decode_csv(
        examples, record_defaults=record_defaults, field_delim=delimiter)

    return columns
开发者ID:ckml,项目名称:tf_learn,代码行数:27,代码来源:util.py



注:本文中的tensorflow.decode_csv函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.decode_raw函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.cumsum函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap