• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.sparse_tensor_to_dense函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.sparse_tensor_to_dense函数的典型用法代码示例。如果您正苦于以下问题:Python sparse_tensor_to_dense函数的具体用法?Python sparse_tensor_to_dense怎么用?Python sparse_tensor_to_dense使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了sparse_tensor_to_dense函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testPrintSparseTensorPassthrough

 def testPrintSparseTensorPassthrough(self):
   a = tf.SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], shape=[3, 4])
   b = tf.SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], shape=[3, 4])
   a = tf.contrib.framework.print_op(a)
   with self.test_session():
     self.assertAllEqual(tf.sparse_tensor_to_dense(a).eval(),
                         tf.sparse_tensor_to_dense(b).eval())
开发者ID:2020zyc,项目名称:tensorflow,代码行数:7,代码来源:prettyprint_ops_test.py


示例2: parse

def parse(serialized):
    """Parse a serialized string into tensors.

    Arguments:
      example: a serialized `tf.train.SequenceExample` (like the one returned
        from the `encode()` method).

    Returns:
      a tuple of 4 tensors:
        `words`: 1D tensor of shape [sentence_length].
        `sentence_length`: 0D tesnor (i.e. scalar) representing the sentence length.
        `formula`: 1D tensor of shape [formula_length].
        `formula_length`: a 0D tensor (i.e. scalar) representing the formula length
    """
    features = {
        SENTENCE_LENGTH_KEY: tf.FixedLenFeature([], tf.int64),
        FORMULA_LENGTH_KEY: tf.FixedLenFeature([], tf.int64),
        WORDS_KEY: tf.VarLenFeature(tf.int64),
        FORMULA_KEY: tf.VarLenFeature(tf.int64),
    }
    parsed = tf.parse_single_example(
        serialized=serialized,
        features=features)
    sentence_length = parsed[SENTENCE_LENGTH_KEY]
    formula_length = parsed[FORMULA_LENGTH_KEY]
    words = tf.sparse_tensor_to_dense(parsed[WORDS_KEY])
    formula = tf.sparse_tensor_to_dense(parsed[FORMULA_KEY])
    return words, sentence_length, formula, formula_length
开发者ID:usman776,项目名称:dket,代码行数:28,代码来源:data.py


示例3: build_model

 def build_model(self):     
     
     dense_masker = tf.sparse_tensor_to_dense(self.mask)
        
     with tf.name_scope('encoding'):
         encoding = tf.add(tf.sparse_tensor_dense_matmul(self.X, self.W) , self.b, name= 'raw_values')
         encoded_values = self.enc_func(encoding, name = 'encoded_values')
     
     with tf.name_scope('decoding'):
         decoding =  tf.nn.xw_plus_b(encoded_values, self.W_prime, self.b_prime)
         decoded_values = self.dec_func(decoding, name = 'decoded_values')
         masked_decoded_values = tf.multiply(dense_masker, decoded_values)
     
     with tf.name_scope('training_process'):
         diff = tf.squared_difference(tf.sparse_tensor_to_dense(self.Y, default_value = 0) , decoded_values)
         error  = tf.reduce_sum( tf.multiply(dense_masker, diff) )
         reg = 0  
         for param in self.params.items():
             reg += tf.nn.l2_loss(param[1])* self.lambda_w
         loss = error + reg
         
     model_params = [p for p in self.params.values()]
        
     train_step = self._optimize(loss, model_params)  
       
     tf.summary.scalar('error', error)
     tf.summary.scalar('loss', loss)        
     for param in self.params.items():
         tf.summary.histogram(param[0], param[1])   
     #tf.summary.histogram('predictions', decoded_values)     
     merged_summary = tf.summary.merge_all()
                    
     return encoded_values, decoded_values, masked_decoded_values, error, loss, train_step, merged_summary
开发者ID:WendyLNU,项目名称:rnn_recsys,代码行数:33,代码来源:DAE.py


示例4: accuracy_instance

def accuracy_instance(predictions, targets, n=[1, 2, 3, 4, 5, 10], nb_classes=5, nb_samples_per_class=10, batch_size=1):
    targets = tf.cast(targets, predictions.dtype)

    accuracy = tf.constant(value=0, shape=(batch_size, nb_samples_per_class), dtype=tf.float32)
    indices = tf.constant(value=0, shape=(batch_size, nb_classes+1), dtype=tf.float32)

    def step_((accuracy, indices), (p, t)):
        """with tf.variable_scope("Metric_step_var", reuse=True):
            accuracy = tf.get_variable(name="accuracy", shape=(batch_size, nb_samples_per_class),
                                       initializer=tf.constant_initializer(0), dtype=tf.float32)
            indices = tf.get_variable(name="indices", shape=(batch_size, nb_classes + 1),
                                      initializer=tf.constant_initializer(0), dtype=tf.float32)"""

        p = tf.cast(p, tf.int32)
        t = tf.cast(t, tf.int32)
        ##Accuracy Update
        batch_range = tf.cast(tf.range(0, batch_size), dtype=tf.int32)
        gather = tf.cast(tf.gather_nd(indices,tf.stack([tf.range(0,p.get_shape().as_list()[0]), t], axis=1)), tf.int32)
        index = tf.cast(tf.stack([batch_range, gather], axis=1), dtype=tf.int64)
        val = tf.cast(tf.equal(p, t), tf.float32)
        delta = tf.SparseTensor(indices=index, values=val, dense_shape=tf.cast(accuracy.get_shape().as_list(), tf.int64))
        accuracy = accuracy + tf.sparse_tensor_to_dense(delta)
        ##Index Update
        index = tf.cast(tf.stack([batch_range, t], axis=1), dtype=tf.int64)
        val = tf.constant(1.0, shape=[batch_size])
        delta = tf.SparseTensor(indices=index, values=val, dense_shape=tf.cast(indices.get_shape().as_list(), dtype=tf.int64))
        indices = indices + tf.sparse_tensor_to_dense(delta)
        return [accuracy, indices]
开发者ID:jayvischeng,项目名称:NTM-One-Shot-TF,代码行数:28,代码来源:Metrics.py


示例5: _parse_example

def _parse_example(serialized_example):
  """Return inputs and targets Tensors from a serialized tf.Example."""
  data_fields = {
      "inputs": tf.VarLenFeature(tf.int64),
      "targets": tf.VarLenFeature(tf.int64)
  }
  parsed = tf.parse_single_example(serialized_example, data_fields)
  inputs = tf.sparse_tensor_to_dense(parsed["inputs"])
  targets = tf.sparse_tensor_to_dense(parsed["targets"])
  return inputs, targets
开发者ID:cybermaster,项目名称:reference,代码行数:10,代码来源:dataset.py


示例6: input_fn

 def input_fn():
   features = learn.read_batch_features(
       filename, BATCH_SIZE, feature_info,
       reader=tf.TFRecordReader)
   target = features.pop('answer_ids')
   target = utils.resize_axis(tf.sparse_tensor_to_dense(target), 1, 1)
   return features, target
开发者ID:dmorr-google,项目名称:wiki-reading,代码行数:7,代码来源:bow.py


示例7: to_matrix

 def to_matrix(sparse_indices, values, dense_shape):
     sparse_tensor = tf.sparse_reorder(tf.SparseTensor(
         indices=sparse_indices,
         values=tf.ones(sparse_indices.get_shape().as_list()[0]),
         #values=tf.reshape(values, [-1]),
         dense_shape=dense_shape))
     return tf.sparse_tensor_to_dense(sparse_tensor)
开发者ID:alexeyche,项目名称:alexeyche-junk,代码行数:7,代码来源:tf_util.py


示例8: _slice_with_actions

def _slice_with_actions(embeddings, actions):
  """Slice a Tensor.

  Take embeddings of the form [batch_size, num_actions, embed_dim]
  and actions of the form [batch_size, 1], and return the sliced embeddings
  like embeddings[:, actions, :].

  Args:
    embeddings: Tensor of embeddings to index.
    actions: int Tensor to use as index into embeddings

  Returns:
    Tensor of embeddings indexed by actions
  """
  shape = tuple(t.value for t in embeddings.get_shape())
  batch_size, num_actions = shape[0], shape[1]

  # Values are the 'values' in a sparse tensor we will be setting
  act_indx = tf.cast(actions, tf.int64)[:, None]
  values = tf.reshape(tf.cast(tf.ones(tf.shape(actions)), tf.bool), [-1])

  # Create a range for each index into the batch
  act_range = tf.range(0, batch_size, dtype=tf.int64)[:, None]
  # Combine this into coordinates with the action indices
  indices = tf.concat([act_range, act_indx], 1)

  actions_mask = tf.SparseTensor(indices, values, [batch_size, num_actions])
  actions_mask = tf.stop_gradient(
      tf.sparse_tensor_to_dense(actions_mask, default_value=False))
  sliced_emb = tf.boolean_mask(embeddings, actions_mask)
  return sliced_emb
开发者ID:wmiao1769,项目名称:trfl,代码行数:31,代码来源:dist_value_ops.py


示例9: test

    def test(self):
        index = 0
        next_idx = 20
        
        for index in range(10):
           next_idx, self.audio_features, self.audio_features_len, self.sparse_labels, wav_files = utils.next_batch(
               next_idx,
               1,
               n_input,
               n_context,
               self.text_labels,
               self.wav_files,
               self.word_num_map)

           print('读入语音文件: ', wav_files[0])
           print('开始识别语音数据......')

           d, train_ler = self.sess.run([self.decoded[0], self.label_err], feed_dict=self.get_feed_dict(dropout=1.0))
           dense_decoded = tf.sparse_tensor_to_dense(d, default_value=-1).eval(session=self.sess)
           dense_labels = utils.trans_tuple_to_texts_ch(self.sparse_labels, self.words)
        
           for orig, decoded_array in zip(dense_labels, dense_decoded):
               # 转成string
               decoded_str = utils.trans_array_to_text_ch(decoded_array, self.words)
               print('语音原始文本: {}'.format(orig))
               print('识别出来的文本:  {}'.format(decoded_str))
               break

        self.sess.close()
开发者ID:hwlwssf,项目名称:speech_recognition,代码行数:29,代码来源:model.py


示例10: __init__

 def __init__(self, config):
     paths, meta = Input._collect(config.path)
     self.dimension_count = meta['dimension_count']
     self.sample_count = meta['sample_count']
     self.batch_size = config.get('batch_size', 1)
     if self.sample_count % self.batch_size > 0:
         raise Exception(
             ('expected the number of samples ({}) to be ' +
              'divisible by the batch size ({})').format(self.sample_count,
                                                         self.batch_size))
     with tf.variable_scope('state'):
         self.state = State()
     with tf.variable_scope('source'):
         paths = tf.Variable(paths, name='paths', dtype=tf.string,
                             trainable=False)
         queue = tf.FIFOQueue(meta['path_count'], [tf.string])
         enqueue = queue.enqueue_many([tf.random_shuffle(paths)])
         tf.train.add_queue_runner(tf.train.QueueRunner(queue, [enqueue]))
         _, record = tf.TFRecordReader().read(queue)
     with tf.variable_scope('x'):
         features = tf.parse_single_example(record, {
             'data': tf.VarLenFeature(tf.float32),
         })
         data = tf.sparse_tensor_to_dense(features['data'])
         if self.batch_size == 1:
             self.x = tf.reshape(data, [1, -1, self.dimension_count])
         else:
             x = tf.reshape(data, [-1, self.dimension_count])
             _, outputs = tf.contrib.training.bucket_by_sequence_length(
                 tf.shape(x)[0], [x], self.batch_size, config.buckets,
                 dynamic_pad=True)
             self.x = outputs[0]
     with tf.variable_scope('y'):
         self.y = tf.pad(self.x[:, 1:, :], [[0, 0], [0, 1], [0, 0]])
开发者ID:learning-on-chip,项目名称:predictor,代码行数:34,代码来源:input.py


示例11: __init__

  def __init__(self,args):
    super(seqMLP, self).__init__()
    self.args = args
    self.batch_size=args.batch_size
    self.input_data = tf.placeholder(tf.float32,[self.args.batch_size,self.args.sentence_length,self.args.word_dim],name='inputdata')
    
    self.output_data = tf.sparse_placeholder(tf.float32, name='outputdata')  #[None, 114]
    self.dense_outputdata= tf.sparse_tensor_to_dense(self.output_data)   
    self.keep_prob = tf.placeholder(tf.float32,name='keep_prob_NER')
    
    self.entMentIndex = tf.placeholder(tf.int32,[None,5],name='ent_mention_index')
    
    self.entCtxLeftIndex = tf.placeholder(tf.int32,[None,10],name='ent_ctxleft_index')
    self.entCtxRightIndex = tf.placeholder(tf.int32,[None,10],name='ent_ctxright_index')
    self.pos_f1 = tf.placeholder(tf.float32,[None,5,1])
    self.pos_f2 = tf.placeholder(tf.float32,[None,10,1])
    self.pos_f3 = tf.placeholder(tf.float32,[None,10,1])
    self.figerHier = np.asarray(cPickle.load(open('data/figer/figerhierarchical.p','rb')),np.float32)  #add the hierarchy features
    
    self.layers={}
    self.layers['fullyConnect'] = layers_lib.FullyConnection(self.args.class_size)
   
    
    used = tf.sign(tf.reduce_max(tf.abs(self.input_data),reduction_indices=2))
    self.length = tf.cast(tf.reduce_sum(used,reduction_indices=1),tf.int32)
      
    with tf.device('/gpu:0'):
      self.prediction,self.loss_lm = self.cl_loss_from_embedding(self.input_data)
      print 'self.loss_lm:',self.loss_lm
      
      _,self.adv_loss = self.adversarial_loss()
      print 'self.adv_loss:',self.adv_loss

      self.loss = tf.add(self.loss_lm,self.adv_loss)
开发者ID:wujsAct,项目名称:TeachingMachineReadAndComprehend,代码行数:34,代码来源:seqMLP.py


示例12: _decode_png_instance_masks

  def _decode_png_instance_masks(self, keys_to_tensors):
    """Decode PNG instance segmentation masks and stack into dense tensor.

    The instance segmentation masks are reshaped to [num_instances, height,
    width].

    Args:
      keys_to_tensors: a dictionary from keys to tensors.

    Returns:
      A 3-D float tensor of shape [num_instances, height, width] with values
        in {0, 1}.
    """

    def decode_png_mask(image_buffer):
      image = tf.squeeze(
          tf.image.decode_image(image_buffer, channels=1), axis=2)
      image.set_shape([None, None])
      image = tf.to_float(tf.greater(image, 0))
      return image

    png_masks = keys_to_tensors['image/object/mask']
    height = keys_to_tensors['image/height']
    width = keys_to_tensors['image/width']
    if isinstance(png_masks, tf.SparseTensor):
      png_masks = tf.sparse_tensor_to_dense(png_masks, default_value='')
    return tf.cond(
        tf.greater(tf.size(png_masks), 0),
        lambda: tf.map_fn(decode_png_mask, png_masks, dtype=tf.float32),
        lambda: tf.zeros(tf.to_int32(tf.stack([0, height, width]))))
开发者ID:forging2012,项目名称:models,代码行数:30,代码来源:tf_example_decoder.py


示例13: read_data_int64

def read_data_int64(input_fname):
    import pdb
    with tictoc():
        input_fname_queue = tf.train.string_input_producer([input_fname], num_epochs=1)
        reader = tf.TFRecordReader()
        _, serialized_example = reader.read(input_fname_queue)
        features = {'bit_features' : tf.VarLenFeature(tf.int64)}
        parsed_example = tf.parse_single_example(serialized_example, features)
        bit_features = parsed_example['bit_features']
        bit_features = tf.sparse_tensor_to_dense(bit_features)
        bit_features = tf.reshape(bit_features, [-1, 62])

        with tf.Session() as sess:
            tf.initialize_all_variables().run()
            tf.initialize_local_variables().run()
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            try:
                i = 0
                while not coord.should_stop():
                    x = bit_features.eval()
                    if i % 10000 == 0: print("substance {}".format(i))
                    i += 1
            except tf.errors.OutOfRangeError:
                pass
            finally:
                coord.request_stop()

            coord.join(threads)
开发者ID:momeara,项目名称:DeepSEA,代码行数:30,代码来源:benchmark_data_reading.py


示例14: tensors_to_item

  def tensors_to_item(self, keys_to_tensors):
    """Maps the given dictionary of tensors to a concatenated list of bboxes.

    Args:
      keys_to_tensors: a mapping of TF-Example keys to parsed tensors.

    Returns:
      [time, num_boxes, 4] tensor of bounding box coordinates, in order
          [y_min, x_min, y_max, x_max]. Whether the tensor is a SparseTensor
          or a dense Tensor is determined by the return_dense parameter. Empty
          positions in the sparse tensor are filled with -1.0 values.
    """
    sides = []
    for key in self._full_keys:
      value = keys_to_tensors[key]
      expanded_dims = tf.concat(
          [tf.to_int64(tf.shape(value)),
           tf.constant([1], dtype=tf.int64)], 0)
      side = tf.sparse_reshape(value, expanded_dims)
      sides.append(side)
    bounding_boxes = tf.sparse_concat(2, sides)
    if self._return_dense:
      bounding_boxes = tf.sparse_tensor_to_dense(
          bounding_boxes, default_value=self._default_value)
    return bounding_boxes
开发者ID:Exscotticus,项目名称:models,代码行数:25,代码来源:tf_sequence_example_decoder.py


示例15: unpool_layer2x2

    def unpool_layer2x2(self, x, raveled_argmax, out_shape):
        argmax = self.unravel_argmax(raveled_argmax, tf.to_int64(out_shape))
        output = tf.zeros([out_shape[1], out_shape[2], out_shape[3]])

        height = tf.shape(output)[0]
        width = tf.shape(output)[1]
        channels = tf.shape(output)[2]

        t1 = tf.to_int64(tf.range(channels))
        t1 = tf.tile(t1, [((width + 1) // 2) * ((height + 1) // 2)])
        t1 = tf.reshape(t1, [-1, channels])
        t1 = tf.transpose(t1, perm=[1, 0])
        t1 = tf.reshape(t1, [channels, (height + 1) // 2, (width + 1) // 2, 1])

        t2 = tf.squeeze(argmax)
        t2 = tf.pack((t2[0], t2[1]), axis=0)
        t2 = tf.transpose(t2, perm=[3, 1, 2, 0])

        t = tf.concat(3, [t2, t1])
        indices = tf.reshape(t, [((height + 1) // 2) * ((width + 1) // 2) * channels, 3])

        x1 = tf.squeeze(x)
        x1 = tf.reshape(x1, [-1, channels])
        x1 = tf.transpose(x1, perm=[1, 0])
        values = tf.reshape(x1, [-1])

        delta = tf.SparseTensor(indices, values, tf.to_int64(tf.shape(output)))
        return tf.expand_dims(tf.sparse_tensor_to_dense(tf.sparse_reorder(delta)), 0)
开发者ID:BenJamesbabala,项目名称:Tensorflow-DeconvNet-Segmentation,代码行数:28,代码来源:DeconvNet.py


示例16: custom_fast_text

def custom_fast_text(features, labels, mode, params):
    vocab_table = lookup.index_table_from_file(vocabulary_file='data/vocab.csv', num_oov_buckets=1, default_value=-1)
    text = features[commons.FEATURE_COL]
    words = tf.string_split(text)
    dense_words = tf.sparse_tensor_to_dense(words, default_value=commons.PAD_WORD)
    word_ids = vocab_table.lookup(dense_words)

    padding = tf.constant([[0, 0], [0, commons.CNN_MAX_DOCUMENT_LENGTH]])
    # Pad all the word_ids entries to the maximum document length
    word_ids_padded = tf.pad(word_ids, padding)
    word_id_vector = tf.slice(word_ids_padded, [0, 0], [-1, commons.CNN_MAX_DOCUMENT_LENGTH])

    if mode == tf.estimator.ModeKeys.TRAIN:
        tf.keras.backend.set_learning_phase(True)
    else:
        tf.keras.backend.set_learning_phase(False)

    embedded_sequences = tf.keras.layers.Embedding(params.N_WORDS, 20, input_length=commons.CNN_MAX_DOCUMENT_LENGTH)(
        word_id_vector)
    f1 = tf.keras.layers.GlobalMaxPooling1D()(embedded_sequences)
    logits = tf.keras.layers.Dense(commons.TARGET_SIZE, activation=None)(f1)

    predictions = tf.nn.sigmoid(logits)

    if mode == tf.estimator.ModeKeys.PREDICT:
        prediction_dict = {
            'class': tf.cast(tf.map_fn(lambda x: tf.cond(x > 0.30, lambda: 1.0, lambda: 0.0),
                                       tf.squeeze(predictions)), dtype=tf.int32),


        }

        export_outputs = {
            'predictions': tf.estimator.export.PredictOutput(prediction_dict)
        }

        return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions, export_outputs=export_outputs)

    loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=labels, logits=logits)

    tf.summary.scalar('loss', loss)

    acc = tf.equal(tf.cast(predictions, dtype=tf.int32), labels)
    acc = tf.reduce_mean(tf.cast(acc, tf.float32))

    tf.summary.scalar('acc', acc)

    if mode == tf.estimator.ModeKeys.TRAIN:
        optimizer = tf.train.AdamOptimizer()

        train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())

        return tf.estimator.EstimatorSpec(mode=mode, train_op=train_op, loss=loss)

    if mode == tf.estimator.ModeKeys.EVAL:
        eval_metrics_ops = {
            'accuracy': tf.metrics.accuracy(labels=labels, predictions=predictions)
        }
        return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metrics_ops)
开发者ID:Aurora11111,项目名称:CloudML-Serving,代码行数:59,代码来源:custom_model.py


示例17: cdk

    def cdk(self, visibles, k, learning_rate=0.001):
        h_start = self.propup(visibles)
        h_t = h_start
        
        for t in range(k):
            v_t = self.propdown(h_t,visibles)
            h_t = self.propup(v_t)
        # adapt to 3D tensor here
        w_positive_grad = matmul_to3D(h_start, tf.sparse_tensor_to_dense(visibles), self.m, self.F, self.K) # formula (5) of the paper
        w_negative_grad = matmul_to3D(h_t, v_t, self.m, self.F, self.K) # formula (5) of the paper
        
        update_w = self.delta_w.assign_add(learning_rate * (w_positive_grad - w_negative_grad))

        update_vb = self.delta_vb.assign_add(learning_rate * (tf.sparse_tensor_to_dense(visibles) - v_t))

        update_hb = self.delta_hb.assign_add(learning_rate * (h_start - h_t))

        return [update_w, update_vb, update_hb]
开发者ID:RomainWarlop,项目名称:RecSys-Learn,代码行数:18,代码来源:rbmcf.py


示例18: _call

 def _call(self, inputs):
     x = tf.cast(inputs, self.dtype)
     if type(x) == tf.SparseTensor: # convert to dense if necessary
         x = tf.sparse_tensor_to_dense(x, validate_indices=False)
     x = tf.nn.dropout(x, tf.cast(1-self.dropout, self.dtype))
     x = tf.matmul(x, self.vars['weights'])
     x = tf.matmul(self.adj, x)
     outputs = self.act(x)
     return outputs
开发者ID:habedi,项目名称:link-prediction,代码行数:9,代码来源:layers.py


示例19: __init__

 def __init__(self, input_dim, output_dim, adj, dropout=0., act=tf.nn.relu, dtype=tf.float32, **kwargs):
     super(GraphConvolution, self).__init__(**kwargs)
     with tf.variable_scope(self.name + '_vars'):
         self.vars['weights'] = weight_variable_glorot(input_dim, output_dim, dtype=dtype, name="weights")
     self.dropout = dropout
     self.adj = adj
     if type(self.adj) == tf.SparseTensor: # convert to dense if necessary
         self.adj = tf.sparse_tensor_to_dense(self.adj, validate_indices=False)
     self.act = act
     self.dtype=dtype
开发者ID:habedi,项目名称:link-prediction,代码行数:10,代码来源:layers.py


示例20: ImageInput

def ImageInput(input_pattern, num_threads, shape, using_ctc, reader=None):
  """Creates an input image tensor from the input_pattern filenames.

  TODO(rays) Expand for 2-d labels, 0-d labels, and logistic targets.
  Args:
    input_pattern:  Filenames of the dataset(s) to read.
    num_threads:    Number of preprocessing threads.
    shape:          ImageShape with the desired shape of the input.
    using_ctc:      Take the unpadded_class labels instead of padded.
    reader:         Function that returns an actual reader to read Examples from
      input files. If None, uses tf.TFRecordReader().
  Returns:
    images:   Float Tensor containing the input image scaled to [-1.28, 1.27].
    heights:  Tensor int64 containing the heights of the images.
    widths:   Tensor int64 containing the widths of the images.
    labels:   Serialized SparseTensor containing the int64 labels.
    sparse_labels:   Serialized SparseTensor containing the int64 labels.
    truths:   Tensor string of the utf8 truth texts.
  Raises:
    ValueError: if the optimizer type is unrecognized.
  """
  data_files = tf.gfile.Glob(input_pattern)
  assert data_files, 'no files found for dataset ' + input_pattern
  queue_capacity = shape.batch_size * num_threads * 2
  filename_queue = tf.train.string_input_producer(
      data_files, capacity=queue_capacity)

  # Create a subgraph with its own reader (but sharing the
  # filename_queue) for each preprocessing thread.
  images_and_label_lists = []
  for _ in range(num_threads):
    image, height, width, labels, text = _ReadExamples(filename_queue, shape,
                                                       using_ctc, reader)
    images_and_label_lists.append([image, height, width, labels, text])
  # Create a queue that produces the examples in batches.
  images, heights, widths, labels, truths = tf.train.batch_join(
      images_and_label_lists,
      batch_size=shape.batch_size,
      capacity=16 * shape.batch_size,
      dynamic_pad=True)
  # Deserialize back to sparse, because the batcher doesn't do sparse.
  labels = tf.deserialize_many_sparse(labels, tf.int64)
  sparse_labels = tf.cast(labels, tf.int32)
  labels = tf.sparse_tensor_to_dense(labels)
  labels = tf.reshape(labels, [shape.batch_size, -1], name='Labels')
  # Crush the other shapes to just the batch dimension.
  heights = tf.reshape(heights, [-1], name='Heights')
  widths = tf.reshape(widths, [-1], name='Widths')
  truths = tf.reshape(truths, [-1], name='Truths')
  # Give the images a nice name as well.
  images = tf.identity(images, name='Images')

  tf.image_summary('Images', images)
  return images, heights, widths, labels, sparse_labels, truths
开发者ID:Peratham,项目名称:models,代码行数:54,代码来源:vgsl_input.py



注:本文中的tensorflow.sparse_tensor_to_dense函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.sparse_to_dense函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.sparse_tensor_dense_matmul函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap