• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.sparse_to_dense函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.sparse_to_dense函数的典型用法代码示例。如果您正苦于以下问题:Python sparse_to_dense函数的具体用法?Python sparse_to_dense怎么用?Python sparse_to_dense使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了sparse_to_dense函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: prepare_calculation

    def prepare_calculation(self):
        self.graph = tf.Graph()
        with self.graph.as_default():
            K = self.K
            mlambda = self.mlambda
            n_nodes = self.n_nodes
            deg_vec = self.get_degree_vector()
            edge_list, weights = self.seperate_nodeid_and_weight(
                                                            self.edge_list)
            const_pairs, const_weights = self.seperate_nodeid_and_weight(
                                                            self.const_pairs)

            pdb.set_trace()
            self.A = A = tf.sparse_to_dense(output_shape=[n_nodes, n_nodes],
                                            sparse_indices=edge_list,
                                            sparse_values=weights)
            self.O = O = tf.sparse_to_dense(output_shape=[n_nodes, n_nodes],
                                            sparse_indices=const_pairs,
                                            sparse_values=const_weights)
            self.P = P = tf.constant(self.get_degree_matrix(O))
            self.L = L = P - O

            degrees = self.get_degree_vector()
            self.U = U = tf.Variable(self.get_initial_U(degrees, K),
                                     name="U")
            self.Z = Z = tf.Variable(self.get_initial_Z(degrees, K),
                                     name="Z")
            U_norm = self.normalize_U(U)
            Z_norm = self.get_positive_variable(Z)

            Y = tf.matmul(U_norm, tf.matmul(Z_norm, U_norm, transpose_b=True))
            self.loss = loss = tf.nn.l2_loss(A - Y)
            adam = tf.AdamOptimizer(self.lr)
            self.opt = adam.minimize(loss)
            self.setup_session()
开发者ID:nukui-s,项目名称:sscomdetection,代码行数:35,代码来源:dcsscd.py


示例2: prepare_calculation

    def prepare_calculation(self):
        self.graph = tf.Graph()
        with self.graph.as_default():
            K = self.K
            n_nodes = self.n_nodes
            edge_list, weights = self.seperate_nodeid_and_weight(
                                                            self.edge_list)
            const_pairs, const_weights = self.seperate_nodeid_and_weight(
                                                            self.const_pairs)
            mlambda = self.mlambda

            self.A = A = tf.sparse_to_dense(output_shape=[n_nodes, n_nodes],
                                            sparse_indices=edge_list,
                                            sparse_values=weights)
            self.O = O = tf.sparse_to_dense(output_shape=[n_nodes, n_nodes],
                                            sparse_indices=const_pairs,
                                            sparse_values=const_weights)

            self.D = D = self.get_degree_matrix(O)
            self.L = L = D - O
            scaler = 2 * np.sqrt(weights.sum() / (n_nodes * n_nodes * K))
            initializer = tf.random_uniform_initializer(maxval=scaler)
            self.H_var = H_var = tf.get_variable("H_var", shape=[n_nodes, K],
                                                  initializer=initializer)
            self.W_var = W_var = tf.get_variable("W_var", shape=[n_nodes, K],
                                                 initializer=initializer,
                                                 trainable=(not self.synmetric))

            #Positivate H
            self.H = H = self.get_positive_variable(H_var)
            self.W = H

            H_norm = self.normalize_H(H, n_nodes)

            self.loss = loss = self.loss_LSE(A, H)
            self.sup_term = sup_term = self.supervisor_term(H_norm, L)

            self.cost = cost = loss + mlambda * sup_term

            self.define_tfsummary()

            if self.optimizer == "adam":
                optimizer = tf.train.AdamOptimizer(self.lr, epsilon=0.1)
            else:
                optimizer = tf.train.GradientDescentOptimizer(self.lr)
            opt = optimizer.minimize(cost)
            if self.positivate != "clip":
                self.opt = opt
            else:
                with tf.control_dependencies([opt]):
                    clipped = tf.maximum(H_var,0)
                    clip_H = H_var.assign(clipped)
                self.opt = tf.group(opt, clip_H)

            config = tf.ConfigProto(inter_op_parallelism_threads=self.threads,
                                  intra_op_parallelism_threads=self.threads)
            self.sess = tf.Session(config=config)
            self.init_op = tf.global_variables_initializer()
开发者ID:nukui-s,项目名称:sscomdetection,代码行数:58,代码来源:ssscd.py


示例3: testShapeInferenceKnownShape

  def testShapeInferenceKnownShape(self):
    with self.test_session(use_gpu=False):
      indices = tf.placeholder(tf.int64)

      shape = [4, 5, 6]
      output = tf.sparse_to_dense(indices, shape, 1, 0)
      self.assertEqual(output.get_shape(), [4, 5, 6])

      shape = tf.placeholder(tf.int64, shape=(3,))
      output = tf.sparse_to_dense(indices, shape, 1, 0)
      self.assertEqual(output.get_shape().as_list(), [None, None, None])
开发者ID:adeelzaman,项目名称:tensorflow,代码行数:11,代码来源:sparse_to_dense_op_py_test.py


示例4: map_box_encodings

 def map_box_encodings(i):
   """Produces box K-hot and score encodings for each class index."""
   box_mask = tf.equal(
       unique_indices, i * tf.ones(num_boxes, dtype=tf.int32))
   box_mask = tf.reshape(box_mask, [-1])
   box_indices = tf.boolean_mask(classes, box_mask)
   box_confidences = tf.boolean_mask(confidences, box_mask)
   box_class_encodings = tf.sparse_to_dense(
       box_indices, [num_classes], 1, validate_indices=False)
   box_confidence_encodings = tf.sparse_to_dense(
       box_indices, [num_classes], box_confidences, validate_indices=False)
   return box_class_encodings, box_confidence_encodings
开发者ID:waterson,项目名称:models,代码行数:12,代码来源:ops.py


示例5: loss

def loss(logits, labels):
    #sparse_labels = tf.reshape(labels, [FLAGS.batch_size, 1])
    #indices = tf.reshape(tf.range(0, FLAGS.batch_size), [FLAGS.batch_size, 1])
    labels = tf.expand_dims(labels, 1)
    indices = tf.expand_dims(tf.range(0, FLAGS.batch_size, 1), 1)
    #concated = tf.concat(1, [indices, sparse_labels])
    concated = tf.concat(1, [indices, labels])
    # sparse_to_dense のクラス数は クラスラベルの最大値+1 とすること
    dense_labels = tf.sparse_to_dense(
        concated,
        [FLAGS.batch_size, NUM_CLASSES],
        1.0,
        0.0
    )

    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
        logits,
        dense_labels,
        name='cross_entropy_per_example'
    )

    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
    tf.add_to_collection('losses', cross_entropy_mean)

    return tf.add_n(tf.get_collection('losses'), name='total_loss')
开发者ID:pmnyc,项目名称:Machine_Learning_Test_Repository,代码行数:25,代码来源:model_alex2010.py


示例6: loss_test

def loss_test(logits, labels, batch_size=None):
    # Reshape the labels into a dense Tensor of
    # shape [FLAGS.batch_size, num_classes].
    sparse_labels = tf.reshape(labels, [batch_size, 1])
    indices = tf.reshape(tf.range(batch_size), [batch_size, 1])
    sparse_labels = tf.cast(sparse_labels, tf.int32)
    concated = tf.concat(1, [indices, sparse_labels])
    num_classes = logits[0].get_shape()[-1].value
    dense_labels = tf.sparse_to_dense(concated,
                                    [batch_size, num_classes],
                                    1.0, 0.0)

    print "-"*10
    print type(logits)
    print len(logits)
    print logits[0].get_shape()
    print logits[1].get_shape()
    print "-"*10

    # Cross entropy loss for the main softmax prediction.
    loss = slim.losses.cross_entropy_loss_without_collection(logits[0],
                                 dense_labels,
                                 label_smoothing=0.1,
                                 weight=1.0)

    # Cross entropy loss for the auxiliary softmax head.
    aux_loss = slim.losses.cross_entropy_loss_without_collection(logits[1],
                                 dense_labels,
                                 label_smoothing=0.1,
                                 weight=0.4,
                                 scope='aux_loss')
    return loss, aux_loss
开发者ID:qiuzhangcheng,项目名称:InceptionV3_TensorFlow,代码行数:32,代码来源:model.py


示例7: _count_matrix_input

  def _count_matrix_input(self, filenames, submatrix_rows, submatrix_cols):
    """Creates ops that read submatrix shards from disk."""
    random.shuffle(filenames)
    filename_queue = tf.train.string_input_producer(filenames)
    reader = tf.WholeFileReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(
        serialized_example,
        features={
            'global_row': tf.FixedLenFeature([submatrix_rows], dtype=tf.int64),
            'global_col': tf.FixedLenFeature([submatrix_cols], dtype=tf.int64),
            'sparse_local_row': tf.VarLenFeature(dtype=tf.int64),
            'sparse_local_col': tf.VarLenFeature(dtype=tf.int64),
            'sparse_value': tf.VarLenFeature(dtype=tf.float32)
        })

    global_row = features['global_row']
    global_col = features['global_col']

    sparse_local_row = features['sparse_local_row'].values
    sparse_local_col = features['sparse_local_col'].values
    sparse_count = features['sparse_value'].values

    sparse_indices = tf.concat(
        axis=1, values=[tf.expand_dims(sparse_local_row, 1),
                        tf.expand_dims(sparse_local_col, 1)])

    count = tf.sparse_to_dense(sparse_indices, [submatrix_rows, submatrix_cols],
                               sparse_count)

    return global_row, global_col, count
开发者ID:ALISCIFP,项目名称:models,代码行数:31,代码来源:swivel.py


示例8: loss

def loss(logits, labels):
  """Calculates the loss from the logits and the labels.

  Args:
    logits: Logits tensor, float - [batch_size, NUM_CLASSES].
    labels: Labels tensor, int32 - [batch_size].

  Returns:
    loss: Loss tensor of type float.
  """
  # Convert from sparse integer labels in the range [0, NUM_CLASSES)
  # to 1-hot dense float vectors (that is we will have batch_size vectors,
  # each with NUM_CLASSES values, all of which are 0.0 except there will
  # be a 1.0 in the entry corresponding to the label).
  batch_size = tf.size(labels)
  labels = tf.expand_dims(labels, 1)
  indices = tf.expand_dims(tf.range(0, batch_size), 1)
  concated = tf.concat(1, [indices, labels])
  onehot_labels = tf.sparse_to_dense(
      concated, tf.pack([batch_size, NUM_CLASSES]), 1.0, 0.0)
  cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits,
                                                          onehot_labels,
                                                          name='xentropy')
  loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
  return loss
开发者ID:deerishi,项目名称:assignment5ML,代码行数:25,代码来源:mnist1HiddenLayer.py


示例9: ce

def ce(model, config, scope, connect, threshold = 1e-5):
	with tf.variable_scope(scope), tf.name_scope(scope):
		with tf.variable_scope('inputs'), tf.name_scope('inputs'):
			model['%s_in0length' %scope] = model['%s_out0length' %connect]
			model['%s_in1length' %scope] = model['%s_out1length' %connect]
			model['%s_in2length' %scope] = model['%s_out2length' %connect]
			model['%s_maxin2length' %scope] = model['%s_maxout2length' %connect]
			model['%s_inputs' %scope] = tf.clip_by_value(tf.nn.softmax(model['%s_outputs' %connect]), threshold, 1. - threshold, name = '%s_inputs' %scope)
			model['%s_out0length' %scope] = model['%s_in0length' %scope]
			model['%s_out1length' %scope] = model['%s_in1length' %scope]
			model['%s_out2length' %scope] = tf.placeholder(tf.int32, [model['%s_in0length' %scope]], '%s_out2length' %scope)
			model['%s_maxout2length' %scope] = model['%s_maxin2length' %scope]

		with tf.variable_scope('labels'), tf.name_scope('labels'):
			model['%s_labels_len' %scope] = tf.placeholder(tf.int32, [model['%s_in0length' %scope]], '%s_labels_len' %scope)
			model['%s_labels_ind' %scope] = tf.placeholder(tf.int64, [None, 2], '%s_labels_ind' %scope)
			model['%s_labels_val' %scope] = tf.placeholder(tf.int32, [None], '%s_labels_val' %scope)
			model['%s_labels_collapsed' %scope] = tf.sparse_to_dense(model['%s_labels_ind' %scope], [model['%s_maxin2length' %scope], model['%s_in0length' %scope]], model['%s_labels_val' %scope], -1, name = '%s_labels_collapsed' %scope)
			model['%s_labels' %scope] = tf.one_hot(model['%s_labels_collapsed' %scope], model['%s_out1length' %scope], name = '%s_labels' %scope)

		with tf.variable_scope('loss'), tf.name_scope('loss'):
			model['%s_loss' %scope] = tf.reduce_sum(-tf.multiply(model['%s_labels' %scope], tf.log(model['%s_inputs' %scope])), name = '%s_loss' %scope)

		with tf.variable_scope('outputs'), tf.name_scope('outputs'):
			model['%s_output' %scope] = model['%s_inputs' %scope]

	return model
开发者ID:aaiijmrtt,项目名称:DEEPSPEECH,代码行数:27,代码来源:ce.py


示例10: default_exchange_proposed_fn_

  def default_exchange_proposed_fn_(num_replica, seed=None):
    """Default function for `exchange_proposed_fn` of `kernel`."""
    num_replica = tf.to_int32(num_replica)

    seed = distributions_util.gen_new_seed(seed, 'default_exchange_proposed_fn')
    random_uniform = tf.random_uniform([], seed=seed)
    accept_proposed_exchange = random_uniform < probs

    seed = distributions_util.gen_new_seed(seed, 'default_exchange_proposed_fn')
    zero_start = tf.random_uniform([], seed=seed) > 0.5
    if num_replica % 2 == 0:
      exchange_proposed = tf.where(
          zero_start, tf.range(num_replica),
          tf.sparse_to_dense(tf.range(num_replica - 2), (num_replica,),
                             tf.range(1, num_replica - 1)))
      exchange_proposed_n = tf.where(zero_start, num_replica // 2,
                                     num_replica // 2 - 1)
    else:
      exchange_proposed = tf.where(
          zero_start, tf.range(num_replica - 1), tf.range(1, num_replica))
      exchange_proposed_n = num_replica // 2

    exchange_proposed = tf.reshape(exchange_proposed, (num_replica // 2, 2))
    exchange_proposed = tf.where(accept_proposed_exchange, exchange_proposed,
                                 tf.zeros_like(exchange_proposed))
    exchange_proposed_n = tf.where(accept_proposed_exchange,
                                   exchange_proposed_n,
                                   tf.zeros_like(exchange_proposed_n))
    return exchange_proposed, exchange_proposed_n
开发者ID:lewisKit,项目名称:probability,代码行数:29,代码来源:replica_exchange_mc.py


示例11: one_hot_matrix

def one_hot_matrix(tensor_in, num_classes, on_value=1.0, off_value=0.0):
    """Encodes indices from given tensor as one-hot tensor.

    TODO(ilblackdragon): Ideally implementation should be
    part of TensorFlow with Eigen-native operation.

    Args:
        tensor_in: Input tensor of shape [N1, N2].
        num_classes: Number of classes to expand index into.
        on_value: Tensor or float, value to fill-in given index.
        off_value: Tensor or float, value to fill-in everything else.
    Returns:
        Tensor of shape [N1, N2, num_classes] with 1.0 for each id in original
        tensor.
    """
    tensor_in = tf.convert_to_tensor(tensor_in)
    sparse_values = tf.to_int64(tf.reshape(tensor_in, [-1, 1]))
    size = tf.shape(sparse_values)[0]
    dims = tf.shape(tensor_in)
    indices = tf.to_int64(tf.reshape(tf.range(0, size), [-1, 1]))
    indices_values = tf.concat(1, [indices, sparse_values])
    outshape = tf.to_int64(expand_concat(0, [size, num_classes]))
    one_hot_vector = tf.sparse_to_dense(indices_values, outshape, on_value, off_value)
    ret = tf.reshape(one_hot_vector, tf.concat(0, [dims, [num_classes]]))
    ret.set_shape(tensor_in.get_shape().concatenate(num_classes))
    return ret
开发者ID:twinklestar93,项目名称:skflow,代码行数:26,代码来源:array_ops.py


示例12: loss

def loss(logits, labels):
    """Add L2Loss to all the trainable variables.

    Add summary for for "Loss" and "Loss/avg".
    Args:
        logits: Logits from inference().
        labels: Labels from distorted_inputs or inputs(). 1-D tensor
                of shape [batch_size]

    Returns:
    Loss tensor of type float.
    """
    # Convert from sparse integer labels in the range [0, NUM_CLASSES)
    # to 1-hot dense float vectors (that is we will have batch_size vectors,
    # each with NUM_CLASSES values, all of which are 0.0 except there will
    # be a 1.0 in the entry corresponding to the label).
    batch_size = tf.size(labels)
    labels = tf.expand_dims(labels, 1)
    indices = tf.expand_dims(tf.range(0, batch_size), 1)
    concated = tf.concat(1, [indices, labels])
    onehot_labels = tf.sparse_to_dense(
        concated, tf.pack([batch_size, NUM_CLASSES]), 1.0, 0.0)
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits,
                                                            onehot_labels,
                                                            name='xentropy')
    # Calculate the average cross entropy loss across the batch.
    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='xentropy_mean')
    tf.add_to_collection('losses', cross_entropy_mean)

    # The total loss is defined as the cross entropy loss plus all of the weight
    # decay terms (L2 loss).
    return tf.add_n(tf.get_collection('losses'), name='total_loss')
开发者ID:AliMiraftab,项目名称:ROICNN,代码行数:32,代码来源:rsvp_quick_cnn_model.py


示例13: read_data

def read_data(filename_queue):
    """
    read_data is an access object to take a .tfrecord and transform it for modeling purposes. it hs both
    a label and an image associated with it
    :param filename_queue: The queue runner created by tensorflow
    :return: An object of the class CIFAR10Record that has both an label and an image value
    """
    class CIFAR10Record(object):
        pass
    result = CIFAR10Record()

    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(
        serialized_example,
        #dense_keys=['image_raw', 'label'],
        #dense_types=[tf.string, tf.int64]
        features={'image_raw': tf.FixedLenFeature([], tf.string),
                  'label': tf.FixedLenFeature([], tf.int64)}
    )
    image = tf.decode_raw(features['image_raw'], tf.uint8)
    image.set_shape([input_image_size * input_image_size * input_image_channels])
    image = tf.cast(image, tf.float32)
    result.image = tf.reshape(image, [input_image_size, input_image_size, input_image_channels])

    label = tf.cast(features['label'], tf.int32)
    result.label = tf.sparse_to_dense(label, [num_labels], 1.0, 0.0)

    return result
开发者ID:pspitler3,项目名称:CFAR10,代码行数:29,代码来源:read_data.py


示例14: build_generator

    def build_generator(self):
	
	# placeholder is for feeding data
	image = tf.placeholder(tf.float32, [self.batch_size, self.dim_image])  # (batch_size, dim_image)
	local_image = tf.placeholder(tf.float32, [self.batch_size, self.dim_image])
	query = tf.placeholder(tf.int32, [self.batch_size, MAX_QUERY_WORDS])
	query_mask = tf.placeholder(tf.float32, [self.batch_size, MAX_QUERY_WORDS])
	bbox = tf.placeholder(tf.float32, [self.batch_size, self.dim_coordinates])

	# [image] embed image feature to dim_hidden
        image_emb = tf.nn.bias_add(tf.matmul(image, self.embed_image_W), self.embed_image_b) # (batch_size, dim_hidden)
	local_image_emb = tf.nn.bias_add(tf.matmul(local_image, self.embed_local_W), self.embed_local_b) # (batch_size, dim_hidden)
	
        score = tf.zeros([self.batch_size], tf.float32)

	state_lang = tf.zeros([self.batch_size, self.lstm_lang.state_size])
	state_context = tf.zeros([self.batch_size, self.lstm_context.state_size])
	state_local = tf.zeros([self.batch_size, self.lstm_local.state_size])
	query_emb = tf.zeros([self.batch_size, self.dim_hidden])
	for j in range(MAX_QUERY_WORDS): 


	    # language lstm
            with tf.variable_scope("lstm_lang"):
                output_lang, state_lang = self.lstm_lang(query_emb, state_lang)
            lang = tf.slice(state_lang, [0,0], [self.batch_size, self.dim_hidden])
            # context lstm

            with tf.variable_scope("lstm_context"):
                output_context, state_context = self.lstm_context(tf.concat(1,[image_emb, lang]), state_context)
            context = tf.slice(state_context, [0,0], [self.batch_size, self.dim_hidden])

            # local lstm
            with tf.variable_scope("lstm_local"):
                output_local, state_local = self.lstm_local(tf.concat(1,[local_image_emb, lang, bbox]), state_local)
            local = tf.slice(state_local, [0,0], [self.batch_size, self.dim_hidden])

            context_emb = tf.nn.xw_plus_b(context, self.W_context, self.B_context)
            local_emb = tf.nn.xw_plus_b(local, self.W_local, self.B_local)
            word_pred = tf.add(context_emb, local_emb)

	    max_prob_index = tf.argmax(word_pred, 1) # b

	    labels = tf.expand_dims(query[:,j], 1)
            indices = tf.expand_dims(tf.range(0, self.batch_size, 1), 1)
            concated = tf.concat(1, [indices, labels])
            with tf.device('/cpu:0'):
                onehot_labels = tf.sparse_to_dense(concated, tf.pack([self.batch_size, self.dict_words]), 1.0, 0.0)
	    current_score = tf.mul(onehot_labels, word_pred)
	    current_score = tf.reduce_sum(current_score, 1)
	    current_score = tf.mul(current_score, query_mask[:,j])
	    current_score = tf.reshape(current_score, [1,self.batch_size])
	    current_score = tf.nn.softmax(current_score)
	    score = tf.add(score, current_score)

            with tf.device("/cpu:0"):
                tf.get_variable_scope().reuse_variables()
                query_emb = tf.nn.embedding_lookup(self.query_emb_W, max_prob_index)

	return score, image, local_image, query, query_mask, bbox
开发者ID:andrewliao11,项目名称:Natural-Language-Object-Retrieval-tensorflow,代码行数:60,代码来源:test.py


示例15: __init__

    def __init__(self, is_training, config):
        self._batch_size = batch_size = FLAGS.batch_size
        self.num_skills = num_skills = config.num_skills
        self.hidden_size = size = FLAGS.hidden_size
        self.num_steps = num_steps = config.num_steps
        input_size = num_skills*2

        inputs = self._input_data = tf.placeholder(tf.int32, [batch_size, num_steps])
        self._target_id = target_id = tf.placeholder(tf.int32, [None])
        self._target_correctness = target_correctness = tf.placeholder(tf.float32, [None])
        final_hidden_size = size

        hidden_layers = []
        for i in range(FLAGS.hidden_layer_num):
            final_hidden_size = size/(i+1)
            hidden1 = tf.nn.rnn_cell.LSTMCell(final_hidden_size, state_is_tuple=True)
            if is_training and config.keep_prob < 1:
                hidden1 = tf.nn.rnn_cell.DropoutWrapper(hidden1, output_keep_prob=FLAGS.keep_prob)
            hidden_layers.append(hidden1)

        cell = tf.nn.rnn_cell.MultiRNNCell(hidden_layers, state_is_tuple=True)

        input_data = tf.reshape(self._input_data, [-1])
        #one-hot encoding
        with tf.device("/cpu:0"):
            labels = tf.expand_dims(input_data, 1)
            indices = tf.expand_dims(tf.range(0, batch_size*num_steps, 1), 1)
            concated = tf.concat(1, [indices, labels])
            inputs = tf.sparse_to_dense(concated, tf.pack([batch_size*num_steps, input_size]), 1.0, 0.0)
            inputs.set_shape([batch_size*num_steps, input_size])

        # [batch_size, num_steps, input_size]
        inputs = tf.reshape(inputs, [-1, num_steps, input_size])
        x = tf.transpose(inputs, [1, 0, 2])
        # Reshape to (n_steps*batch_size, n_input)
        x = tf.reshape(x, [-1, input_size])
        # Split to get a list of 'n_steps'
        # tensors of shape (doc_num, n_input)
        x = tf.split(0, num_steps, x)
        #inputs = [tf.squeeze(input_, [1]) for input_ in tf.split(1, num_steps, inputs)]
        #outputs, state = tf.nn.rnn(hidden1, x, dtype=tf.float32)
        outputs, state = tf.nn.rnn(cell, x, dtype=tf.float32)
        output = tf.reshape(tf.concat(1, outputs), [-1, final_hidden_size])
        # calculate the logits from last hidden layer to output layer
        sigmoid_w = tf.get_variable("sigmoid_w", [final_hidden_size, num_skills])
        sigmoid_b = tf.get_variable("sigmoid_b", [num_skills])
        logits = tf.matmul(output, sigmoid_w) + sigmoid_b

        # from output nodes to pick up the right one we want
        logits = tf.reshape(logits, [-1])
        selected_logits = tf.gather(logits, self.target_id)

        #make prediction
        self._pred = self._pred_values = pred_values = tf.sigmoid(selected_logits)

        # loss function
        loss = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(selected_logits, target_correctness))

        #self._cost = cost = tf.reduce_mean(loss)
        self._cost = cost = loss
开发者ID:siyuanzhao,项目名称:2016-EDM,代码行数:60,代码来源:student_model.py


示例16: softmax_loss_layer

def softmax_loss_layer(name, score_bottom, label_bottom):
    """
    Calculates cumulative Softmax Cross Entropy Loss along the last dimension
    *This function does not divide the loss by batch size*

    Once tensorflow has SparseCrossEntropy function, this one will be replaced
    """
    # Check shape
    score_shape = score_bottom.get_shape().as_list()
    label_shape = label_bottom.get_shape().as_list()
    assert len(score_shape) == len(label_shape) + 1
    assert score_shape[:-1] == label_shape

    # Compute the outer dimensions dimensions in label
    inner_dim = score_shape[-1]
    outer_dim = 1
    for d in label_shape: outer_dim *= d

    # flatten score and label
    flat_score = tf.reshape(score_bottom, [outer_dim, inner_dim])
    flat_label = tf.reshape(label_bottom, [outer_dim, 1])

    # Reshape the labels into a dense Tensor of
    # shape [batch_size, NUM_CLASSES].
    sparse_labels = tf.reshape(labels, [FLAGS.batch_size, 1])
    indices = tf.reshape(tf.range(FLAGS.batch_size), [FLAGS.batch_size, 1])
    concated = tf.concat(1, [indices, sparse_labels])
    dense_labels = tf.sparse_to_dense(concated, [FLAGS.batch_size, NUM_CLASSES],
        1.0, 0.0)
开发者ID:OwalnutO,项目名称:TURN-TAP,代码行数:29,代码来源:cnn.py


示例17: _sparse_to_dense

def _sparse_to_dense(labels, num_classes):
    sparse_labels = tf.reshape(labels, [-1, 1])
    batch_size = sparse_labels.get_shape().as_list()[0]
    indices = tf.reshape(tf.range(batch_size), [batch_size, 1])
    concated = tf.concat(1, [indices, sparse_labels])
    dense_labels = tf.sparse_to_dense(concated, [batch_size, num_classes], 1.0, 0.0)
    return dense_labels
开发者ID:wbaek,项目名称:tensorflow-tutorials,代码行数:7,代码来源:helper.py


示例18: loss

def loss(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for for "Loss" and "Loss/avg".
  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]

  Returns:
    Loss tensor of type float.
  """
  # Reshape the labels into a dense Tensor of
  # shape [batch_size, NUM_CLASSES].
  sparse_labels = tf.reshape(labels, [FLAGS.batch_size, 1])
  indices = tf.reshape(tf.range(0, FLAGS.batch_size, 1), [FLAGS.batch_size, 1])
  concated = tf.concat(1, [indices, sparse_labels])
  dense_labels = tf.sparse_to_dense(concated,
                                    [FLAGS.batch_size, NUM_CLASSES],
                                    1.0, 0.0)

  # Calculate the average cross entropy loss across the batch.
  cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
      logits, dense_labels, name='cross_entropy_per_example')
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)

  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  return tf.add_n(tf.get_collection('losses'), name='total_loss')
开发者ID:bicimsiz,项目名称:tensorflow,代码行数:30,代码来源:cifar10.py


示例19: _testGraphExtensionRestore

  def _testGraphExtensionRestore(self):
    test_dir = os.path.join(self.get_temp_dir(), "graph_extension")
    filename = os.path.join(test_dir, "metafile")
    saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
    with self.test_session(graph=tf.Graph()) as sess:
      # Restores from MetaGraphDef.
      new_saver = tf.train.import_meta_graph(filename)
      # Generates a new MetaGraphDef.
      new_saver.export_meta_graph()
      # Restores from checkpoint.
      new_saver.restore(sess, saver0_ckpt)
      # Addes loss and train.
      labels = tf.constant(0, tf.int32, shape=[100], name="labels")
      batch_size = tf.size(labels)
      labels = tf.expand_dims(labels, 1)
      indices = tf.expand_dims(tf.range(0, batch_size), 1)
      concated = tf.concat(1, [indices, labels])
      onehot_labels = tf.sparse_to_dense(
          concated, tf.pack([batch_size, 10]), 1.0, 0.0)
      logits = tf.get_collection("logits")[0]
      cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits,
                                                              onehot_labels,
                                                              name="xentropy")
      loss = tf.reduce_mean(cross_entropy, name="xentropy_mean")

      tf.scalar_summary(loss.op.name, loss)
      # Creates the gradient descent optimizer with the given learning rate.
      optimizer = tf.train.GradientDescentOptimizer(0.01)

      # Runs train_op.
      train_op = optimizer.minimize(loss)
      sess.run(train_op)
开发者ID:2er0,项目名称:tensorflow,代码行数:32,代码来源:saver_test.py


示例20: ced

def ced(model, config, scope, connect, threshold = 1e-5):
	with tf.variable_scope(scope), tf.name_scope(scope):
		with tf.variable_scope('inputs'), tf.name_scope('inputs'):
			model['%s_in0length' %scope] = model['%s_out0length' %connect]
			model['%s_in1length' %scope] = model['%s_out1length' %connect]
			model['%s_in2length' %scope] = model['%s_out2length' %connect]
			model['%s_maxin2length' %scope] = model['%s_maxout2length' %connect]
			model['%s_inputs' %scope] = tf.clip_by_value(model['%s_outputs' %connect], threshold, 1. - threshold, name = '%s_inputs' %scope)
			model['%s_out0length' %scope] = model['%s_in0length' %scope]
			model['%s_out1length' %scope] = model['%s_in1length' %scope]
			model['%s_out2length' %scope] = tf.placeholder(tf.int32, [model['%s_in0length' %scope]], '%s_out2length' %scope)
			model['%s_maxout2length' %scope] = model['%s_maxin2length' %scope]

		with tf.variable_scope('labels'), tf.name_scope('labels'):
			model['%s_labels_len' %scope] = tf.placeholder(tf.int32, [model['%s_in0length' %scope]], '%s_labels_len' %scope)
			model['%s_labels_ind' %scope] = tf.placeholder(tf.int64, [None, 3], '%s_labels_ind' %scope)
			model['%s_labels_val' %scope] = tf.placeholder(tf.float32, [None], '%s_labels_val' %scope)
			model['%s_labels' %scope] = tf.sparse_to_dense(model['%s_labels_ind' %scope], [model['%s_in0length' %scope], model['%s_maxin2length' %scope], model['%s_maxin2length' %scope]], model['%s_labels_val' %scope], -1, name = '%s_labels' %scope)

		with tf.variable_scope('loss'), tf.name_scope('loss'):
			model['%s_loss' %scope] = tf.reduce_sum(tf.where(tf.less(model['%s_labels' %scope], tf.zeros([model['%s_in0length' %scope], model['%s_maxin2length' %scope], model['%s_maxin2length' %scope]], tf.float32)), tf.zeros([model['%s_in0length' %scope], model['%s_maxin2length' %scope], model['%s_maxin2length' %scope]], tf.float32), -tf.add(tf.multiply(model['%s_labels' %scope], tf.log(model['%s_inputs' %scope])), tf.multiply(tf.subtract(1., model['%s_labels' %scope]), tf.log(tf.subtract(1., model['%s_inputs' %scope]))))), name = '%s_loss' %scope)

		with tf.variable_scope('outputs'), tf.name_scope('outputs'):
			model['%s_output' %scope] = model['%s_inputs' %scope]

	return model
开发者ID:aaiijmrtt,项目名称:DEEPSPEECH,代码行数:26,代码来源:ced.py



注:本文中的tensorflow.sparse_to_dense函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.split函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.sparse_tensor_to_dense函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap