• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.not_equal函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.not_equal函数的典型用法代码示例。如果您正苦于以下问题:Python not_equal函数的具体用法?Python not_equal怎么用?Python not_equal使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了not_equal函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: rpn_cls_loss

def rpn_cls_loss(rpn_cls_score,rpn_labels):
    '''
    Calculate the Region Proposal Network classifier loss. Measures how well 
    the RPN is able to propose regions by the performance of its "objectness" 
    classifier.
    
    Standard cross-entropy loss on logits
    '''
    with tf.variable_scope('rpn_cls_loss'):
        # input shape dimensions
        shape = tf.shape(rpn_cls_score)
        
        # Stack all classification scores into 2D matrix
        rpn_cls_score = tf.transpose(rpn_cls_score,[0,3,1,2])
        rpn_cls_score = tf.reshape(rpn_cls_score,[shape[0],2,shape[3]//2*shape[1],shape[2]])
        rpn_cls_score = tf.transpose(rpn_cls_score,[0,2,3,1])
        rpn_cls_score = tf.reshape(rpn_cls_score,[-1,2])
        
        # Stack labels
        rpn_labels = tf.reshape(rpn_labels,[-1])
        
        # Ignore label=-1 (Neither object nor background: IoU between 0.3 and 0.7)
        rpn_cls_score = tf.reshape(tf.gather(rpn_cls_score,tf.where(tf.not_equal(rpn_labels,-1))),[-1,2])
        rpn_labels = tf.reshape(tf.gather(rpn_labels,tf.where(tf.not_equal(rpn_labels,-1))),[-1])
        
        # Cross entropy error
        rpn_cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=rpn_cls_score, labels=rpn_labels))
    
    return rpn_cross_entropy
开发者ID:zymale,项目名称:tf-Faster-RCNN,代码行数:29,代码来源:loss_functions.py


示例2: print_mask_parameter_counts

def print_mask_parameter_counts():
    print("# Mask Parameter Counts")
    print("  - Mask1: {0}".format(
        sess.run(tf.reduce_sum(tf.to_float(tf.not_equal(indicator_matrix1, tf.zeros_like(indicator_matrix1)))))))
    print("  - Mask2: {0}".format(
        sess.run(tf.reduce_sum(tf.to_float(tf.not_equal(indicator_matrix2, tf.zeros_like(indicator_matrix2)))))))
    print("  - Mask3: {0}".format(
        sess.run(tf.reduce_sum(tf.to_float(tf.not_equal(indicator_matrix3, tf.zeros_like(indicator_matrix3)))))))
开发者ID:gstaff,项目名称:tfzip,代码行数:8,代码来源:compression_test.py


示例3: retrieve_seq_length_op3

def retrieve_seq_length_op3(data, pad_val=0):
    """An op to compute the length of a sequence, the data shape can be [batch_size, n_step(max)] or
    [batch_size, n_step(max), n_features].

    If the data has type of tf.string and pad_val is assigned as empty string (''), this op will compute the
    length of the string sequence.

    Parameters:
    -----------
    data : tensor
        [batch_size, n_step(max)] or [batch_size, n_step(max), n_features] with zero padding on the right hand side.
    pad_val:
        By default 0. If the data is tf.string, please assign this as empty string ('')

    Examples
    -----------
    >>> data = [[[1],[2],[0],[0],[0]],
    >>>         [[1],[2],[3],[0],[0]],
    >>>         [[1],[2],[6],[1],[0]]]
    >>> data = tf.convert_to_tensor(data, dtype=tf.float32)
    >>> length = tl.layers.retrieve_seq_length_op3(data)
    [2, 3, 4]
    >>> data = [[[1,2],[2,2],[1,2],[1,2],[0,0]],
    >>>         [[2,3],[2,4],[3,2],[0,0],[0,0]],
    >>>         [[3,3],[2,2],[5,3],[1,2],[0,0]]]
    >>> data = tf.convert_to_tensor(data, dtype=tf.float32)
    >>> length = tl.layers.retrieve_seq_length_op3(data)
    [4, 3, 4]
    >>> data = [[1,2,0,0,0],
    >>>         [1,2,3,0,0],
    >>>         [1,2,6,1,0]]
    >>> data = tf.convert_to_tensor(data, dtype=tf.float32)
    >>> length = tl.layers.retrieve_seq_length_op3(data)
    [2, 3, 4]
    >>> data = [['hello','world','','',''],
    >>>         ['hello','world','tensorlayer','',''],
    >>>         ['hello','world','tensorlayer','2.0','']]
    >>> data = tf.convert_to_tensor(data, dtype=tf.string)
    >>> length = tl.layers.retrieve_seq_length_op3(data, pad_val='')
    [2, 3, 4]

    """
    data_shape_size = data.get_shape().ndims
    if data_shape_size == 3:
        return tf.reduce_sum(
            input_tensor=tf.cast(tf.reduce_any(input_tensor=tf.not_equal(data, pad_val), axis=2), dtype=tf.int32),
            axis=1
        )
    elif data_shape_size == 2:
        return tf.reduce_sum(input_tensor=tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32), axis=1)
    elif data_shape_size == 1:
        raise ValueError("retrieve_seq_length_op3: data has wrong shape! Shape got ", data.get_shape().as_list())
    else:
        raise ValueError(
            "retrieve_seq_length_op3: handling data with num of dims %s hasn't been implemented!" % (data_shape_size)
        )
开发者ID:zsdonghao,项目名称:tensorlayer,代码行数:56,代码来源:recurrent.py


示例4: padded_sequence_accuracy

def padded_sequence_accuracy(logits, labels):
  """Percentage of times that predictions matches labels everywhere (non-0)."""
  with tf.variable_scope("padded_sequence_accuracy", values=[logits, labels]):
    logits, labels = _pad_tensors_to_same_length(logits, labels)
    weights = tf.to_float(tf.not_equal(labels, 0))
    outputs = tf.to_int32(tf.argmax(logits, axis=-1))
    padded_labels = tf.to_int32(labels)
    not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights
    axis = list(range(1, len(outputs.get_shape())))
    correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
    return correct_seq, tf.constant(1.0)
开发者ID:812864539,项目名称:models,代码行数:11,代码来源:metrics.py


示例5: target_mask_op

def target_mask_op(data, pad_val=0):  # HangSheng: return tensor for mask,if input is tf.string
    """Return tensor for mask, if input is ``tf.string``."""
    data_shape_size = data.get_shape().ndims
    if data_shape_size == 3:
        return tf.cast(tf.reduce_any(input_tensor=tf.not_equal(data, pad_val), axis=2), dtype=tf.int32)
    elif data_shape_size == 2:
        return tf.cast(tf.not_equal(data, pad_val), dtype=tf.int32)
    elif data_shape_size == 1:
        raise ValueError("target_mask_op: data has wrong shape!")
    else:
        raise ValueError("target_mask_op: handling data_shape_size %s hasn't been implemented!" % (data_shape_size))
开发者ID:zsdonghao,项目名称:tensorlayer,代码行数:11,代码来源:recurrent.py


示例6: compute_error

 def compute_error(self):
   #Sets mask variables and performs batch processing
   self.batch_gold_select = self.batch_print_answer > 0.0
   self.full_column_mask = tf.concat(
       axis=1, values=[self.batch_number_column_mask, self.batch_word_column_mask])
   self.full_processed_column = tf.concat(
       axis=1,
       values=[self.batch_processed_number_column, self.batch_processed_word_column])
   self.full_processed_sorted_index_column = tf.concat(axis=1, values=[
       self.batch_processed_sorted_index_number_column,
       self.batch_processed_sorted_index_word_column
   ])
   self.select_bad_number_mask = tf.cast(
       tf.logical_and(
           tf.not_equal(self.full_processed_column,
                        self.utility.FLAGS.pad_int),
           tf.not_equal(self.full_processed_column,
                        self.utility.FLAGS.bad_number_pre_process)),
       self.data_type)
   self.select_mask = tf.cast(
       tf.logical_not(
           tf.equal(self.batch_number_column, self.utility.FLAGS.pad_int)),
       self.data_type)
   self.select_word_mask = tf.cast(
       tf.logical_not(
           tf.equal(self.batch_word_column_entry_mask,
                    self.utility.dummy_token_id)), self.data_type)
   self.select_full_mask = tf.concat(
       axis=1, values=[self.select_mask, self.select_word_mask])
   self.select_whole_mask = tf.maximum(
       tf.reshape(
           tf.slice(self.select_mask, [0, 0, 0],
                    [self.batch_size, 1, self.max_elements]),
           [self.batch_size, self.max_elements]),
       tf.reshape(
           tf.slice(self.select_word_mask, [0, 0, 0],
                    [self.batch_size, 1, self.max_elements]),
           [self.batch_size, self.max_elements]))
   self.invert_select_full_mask = tf.cast(
       tf.concat(axis=1, values=[
           tf.equal(self.batch_number_column, self.utility.FLAGS.pad_int),
           tf.equal(self.batch_word_column_entry_mask,
                    self.utility.dummy_token_id)
       ]), self.data_type)
   self.batch_lookup_answer = tf.zeros(tf.shape(self.batch_gold_select))
   self.reset_select = self.select_whole_mask
   self.rows = tf.reduce_sum(self.select_whole_mask, 1)
   self.num_entries = tf.reshape(
       tf.reduce_sum(tf.reduce_sum(self.select_full_mask, 1), 1),
       [self.batch_size])
   self.final_error, self.final_correct = self.batch_process()
   return self.final_error
开发者ID:Hukongtao,项目名称:models,代码行数:52,代码来源:model.py


示例7: add_embedding

    def add_embedding(self):

        #embed=np.load('glove{0}_uniform.npy'.format(self.emb_dim))
        with tf.variable_scope("Embed",regularizer=None):
            embedding=tf.get_variable('embedding',[self.num_emb,
                                                   self.emb_dim]
                        ,initializer=tf.random_uniform_initializer(-0.05,0.05),trainable=True,regularizer=None)
            ix=tf.to_int32(tf.not_equal(self.input,-1))*self.input
            emb_tree=tf.nn.embedding_lookup(embedding,ix)
            emb_tree=emb_tree*(tf.expand_dims(
                        tf.to_float(tf.not_equal(self.input,-1)),2))

            return emb_tree
开发者ID:Chelz,项目名称:RecursiveNN,代码行数:13,代码来源:tf_tree_lstm.py


示例8: add_placeholders

    def add_placeholders(self):
        dim2=self.config.maxnodesize
        dim1=self.config.batch_size
        self.input = tf.placeholder(tf.int32,[dim1,dim2],name='input')
        self.treestr = tf.placeholder(tf.int32,[dim1,dim2,2],name='tree')
        self.labels = tf.placeholder(tf.int32,[dim1,dim2],name='labels')
        self.dropout = tf.placeholder(tf.float32,name='dropout')

        self.n_inodes = tf.reduce_sum(tf.to_int32(tf.not_equal(self.treestr,-1)),[1,2])
        self.n_inodes = self.n_inodes/2

        self.num_leaves = tf.reduce_sum(tf.to_int32(tf.not_equal(self.input,-1)),[1])
        self.batch_len = tf.placeholder(tf.int32,name="batch_len")
开发者ID:Chelz,项目名称:RecursiveNN,代码行数:13,代码来源:tf_tree_lstm.py


示例9: add_embedding

    def add_embedding(self):
        #embed=np.load('glove{0}_uniform.npy'.format(self.emb_dim))

        with tf.device('/cpu:0'):
            with tf.variable_scope("Embed"):
                embedding=tf.get_variable('embedding',[self.num_emb,
                                                        self.emb_dim]
                                             ,initializer=
                                             tf.random_uniform_initializer(-0.05,0.05),trainable=True,
                                             regularizer=tf.contrib.layers.l2_regularizer(0.0))
                ix=tf.to_int32(tf.not_equal(self.input,-1))*self.input
                emb = tf.nn.embedding_lookup(embedding,ix)
                emb = emb * tf.to_float(tf.not_equal(tf.expand_dims(self.input,2),-1))
                return emb
开发者ID:Chelz,项目名称:RecursiveNN,代码行数:14,代码来源:tf_seq_lstm.py


示例10: get_mask

def get_mask(gt, num_classes, ignore_label):
    less_equal_class = tf.less_equal(gt, num_classes-1)
    not_equal_ignore = tf.not_equal(gt, ignore_label)
    mask = tf.logical_and(less_equal_class, not_equal_ignore)
    indices = tf.squeeze(tf.where(mask), 1)

    return indices
开发者ID:ascenoputing,项目名称:SemanticSegmentation_DL,代码行数:7,代码来源:train.py


示例11: classification_costs

def classification_costs(logits, labels, name=None):
    """Compute classification cost mean and classification cost per sample

    Assume unlabeled examples have label == -1. For unlabeled examples, cost == 0.
    Compute the mean over all examples.
    Note that unlabeled examples are treated differently in error calculation.
    """
    with tf.name_scope(name, "classification_costs") as scope:
        applicable = tf.not_equal(labels, -1)

        # Change -1s to zeros to make cross-entropy computable
        labels = tf.where(applicable, labels, tf.zeros_like(labels))

        # This will now have incorrect values for unlabeled examples
        per_sample = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)

        # Retain costs only for labeled
        per_sample = tf.where(applicable, per_sample, tf.zeros_like(per_sample))

        # Take mean over all examples, not just labeled examples.
        labeled_sum = tf.reduce_sum(per_sample)
        total_count = tf.to_float(tf.shape(per_sample)[0])
        mean = tf.div(labeled_sum, total_count, name=scope)

        return mean, per_sample
开发者ID:ys2899,项目名称:mean-teacher,代码行数:25,代码来源:model.py


示例12: _add_rpn_losses

    def _add_rpn_losses(self, sigma_rpn=3.0):
        with tf.variable_scope('loss_' + self._tag) as scope:
            # RPN, class loss
            rpn_cls_score = tf.reshape(self._predictions['rpn_cls_score_reshape'], [-1, 2])
            rpn_label = tf.reshape(self._anchor_targets['rpn_labels'], [-1])
            rpn_select = tf.where(tf.not_equal(rpn_label, -1))
            rpn_cls_score = tf.reshape(tf.gather(rpn_cls_score, rpn_select), [-1, 2])
            rpn_label = tf.reshape(tf.gather(rpn_label, rpn_select), [-1])
            rpn_cross_entropy = tf.reduce_mean(
                tf.nn.sparse_softmax_cross_entropy_with_logits(logits=rpn_cls_score, labels=rpn_label))

            # RPN, bbox loss
            rpn_bbox_pred = self._predictions['rpn_bbox_pred']
            rpn_bbox_targets = self._anchor_targets['rpn_bbox_targets']
            rpn_bbox_inside_weights = self._anchor_targets['rpn_bbox_inside_weights']
            rpn_bbox_outside_weights = self._anchor_targets['rpn_bbox_outside_weights']

            rpn_loss_box = self._smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights,
                                                rpn_bbox_outside_weights, sigma=sigma_rpn, dim=[1, 2, 3])

            self._losses['rpn_cross_entropy'] = rpn_cross_entropy
            self._losses['rpn_loss_box'] = rpn_loss_box

            self._losses['rpn_loss'] = rpn_loss_box + rpn_cross_entropy

            self._event_summaries.update(self._losses)

        return self._losses['rpn_loss']
开发者ID:jacke121,项目名称:tf_rfcn,代码行数:28,代码来源:network_rfcn.py


示例13: measure

def measure():
    E = tf.reduce_mean(energy(layers))
    C = tf.reduce_mean(cost(layers))
    y_prediction = tf.argmax(layers[-1], 1)
    error        = tf.reduce_mean(tf.cast(tf.not_equal(y_prediction, tf.cast(y, tf.int64)), tf.float32))

    return E, C, error
开发者ID:alexeyche,项目名称:alexeyche-junk,代码行数:7,代码来源:baseline.py


示例14: padded_sequence_accuracy

def padded_sequence_accuracy(predictions,
                             labels,
                             weights_fn=common_layers.weights_nonzero):
  """Percentage of times that predictions matches labels everywhere (non-0)."""
  # If the last dimension is 1 then we're using L1/L2 loss.
  if common_layers.shape_list(predictions)[-1] == 1:
    return rounding_sequence_accuracy(
        predictions, labels, weights_fn=weights_fn)
  with tf.variable_scope(
      "padded_sequence_accuracy", values=[predictions, labels]):
    padded_predictions, padded_labels = common_layers.pad_with_zeros(
        predictions, labels)
    weights = weights_fn(padded_labels)

    # Flatten, keeping batch dim (and num_classes dim for predictions)
    # TPU argmax can only deal with a limited number of dimensions
    predictions_shape = common_layers.shape_list(padded_predictions)
    batch_size = predictions_shape[0]
    num_classes = predictions_shape[-1]
    flat_size = common_layers.list_product(
        common_layers.shape_list(padded_labels)[1:])
    padded_predictions = tf.reshape(
        padded_predictions,
        [batch_size, common_layers.list_product(predictions_shape[1:-1]),
         num_classes])
    padded_labels = tf.reshape(padded_labels, [batch_size, flat_size])
    weights = tf.reshape(weights, [batch_size, flat_size])

    outputs = tf.to_int32(tf.argmax(padded_predictions, axis=-1))
    padded_labels = tf.to_int32(padded_labels)
    not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights
    axis = list(range(1, len(outputs.get_shape())))
    correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
    return correct_seq, tf.constant(1.0)
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:34,代码来源:metrics.py


示例15: compute_loss

    def compute_loss(self,emb_batch,curr_batch_size=None):
        outloss=[]
        prediction=[]
        for idx_batch in range(self.config.batch_size):

            tree_states=self.compute_states(emb_batch,idx_batch)
            logits = self.create_output(tree_states)

            labels1=tf.gather(self.labels,idx_batch)
            labels2=tf.reduce_sum(tf.to_int32(tf.not_equal(labels1,-1)))
            labels=tf.gather(labels1,tf.range(labels2))
            loss = self.calc_loss(logits,labels)


            pred = tf.nn.softmax(logits)

            pred_root=tf.gather(pred,labels2-1)


            prediction.append(pred_root)
            outloss.append(loss)

        batch_loss=tf.pack(outloss)
        self.pred = tf.pack(prediction)

        return batch_loss
开发者ID:Chelz,项目名称:RecursiveNN,代码行数:26,代码来源:tf_tree_lstm.py


示例16: dynamic_decode_and_search

  def dynamic_decode_and_search(self,
                                embedding,
                                start_tokens,
                                end_token,
                                vocab_size,
                                initial_state=None,
                                beam_width=5,
                                length_penalty=0.0,
                                maximum_iterations=250,
                                mode=tf.estimator.ModeKeys.PREDICT,
                                memory=None,
                                memory_sequence_length=None,
                                dtype=None):
    cache = self._init_cache(memory, memory_sequence_length=memory_sequence_length)
    symbols_to_logits_fn = self._symbols_to_logits_fn(embedding, vocab_size, mode)

    outputs, log_probs = beam_search(
        symbols_to_logits_fn,
        start_tokens,
        beam_width,
        maximum_iterations,
        vocab_size,
        length_penalty,
        states=cache,
        eos_id=end_token)
    outputs = tf.slice(outputs, [0, 0, 1], [-1, -1, -1]) # Ignore <s>.

    lengths = tf.not_equal(outputs, 0)
    lengths = tf.cast(lengths, tf.int32)
    lengths = tf.reduce_sum(lengths, axis=-1)

    return (outputs, None, lengths, log_probs)
开发者ID:yhgon,项目名称:OpenNMT-tf,代码行数:32,代码来源:self_attention_decoder.py


示例17: loss

def loss(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for "Loss" and "Loss/avg".
  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 3-D tensor
            of shape [batch_size,IMAGE_SIZE,IMAGE_SIZE]

  Returns:
    Loss tensor of type float.
  """
  labels = tf.cast(labels, tf.int64)
  label_shape = labels.get_shape().as_list()
  reshaped_labels = tf.reshape(labels,
                              [label_shape[0]*label_shape[1]*label_shape[2]])
  print(reshaped_labels.get_shape())
  logits_shape =logits.get_shape().as_list()
  reshaped_logits = tf.reshape(logits,
                              [logits_shape[0]*logits_shape[1]*logits_shape[2],
                              logits_shape[3]]) 
  cross_entropy_per_pixel = tf.nn.sparse_softmax_cross_entropy_with_logits(
                                  reshaped_logits, reshaped_labels,
                                  name='cross_entropy_per_pixel')
  no_loss_mask = tf.not_equal(reshaped_labels, -1)

  filtered_cross_entropy = tf.boolean_mask(cross_entropy_per_pixel,
                                           no_loss_mask,
                                           name='no_loss_mask')
  cross_entropy_mean = tf.reduce_mean(filtered_cross_entropy, name='cross_entropy')
#  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)

  return tf.add_n(tf.get_collection('losses'), name='total_loss')
开发者ID:Thrasi,项目名称:thesis-project,代码行数:34,代码来源:cifar10.py


示例18: while_exit_cond

    def while_exit_cond(result, logits, loss):  # pylint: disable=unused-argument
      """Exit the loop either if reach decode_length or EOS."""
      length = common_layers.shape_list(result)[1]

      not_overflow = length < decode_length

      if self._problem_hparams.stop_at_eos:

        def fn_not_eos():
          return tf.not_equal(  # Check if the last predicted element is a EOS
              tf.squeeze(result[:, -1, :, :]), text_encoder.EOS_ID)

        not_eos = tf.cond(
            # We only check for early stoping if there is at least 1 element (
            # otherwise not_eos will crash)
            tf.not_equal(length, 0),
            fn_not_eos,
            lambda: True,
        )

        return tf.cond(
            tf.equal(batch_size, 1),
            # If batch_size == 1, we check EOS for early stoping
            lambda: tf.logical_and(not_overflow, not_eos),
            # Else, just wait for max length
            lambda: not_overflow)
      return not_overflow
开发者ID:AranKomat,项目名称:tensor2tensor,代码行数:27,代码来源:t2t_model.py


示例19: char_accuracy

def char_accuracy(predictions, targets, rej_char, streaming=False):
  """Computes character level accuracy.

  Both predictions and targets should have the same shape
  [batch_size x seq_length].

  Args:
    predictions: predicted characters ids.
    targets: ground truth character ids.
    rej_char: the character id used to mark an empty element (end of sequence).
    streaming: if True, uses the streaming mean from the slim.metric module.

  Returns:
    a update_ops for execution and value tensor whose value on evaluation
    returns the total character accuracy.
  """
  with tf.variable_scope('CharAccuracy'):
    predictions.get_shape().assert_is_compatible_with(targets.get_shape())

    targets = tf.to_int32(targets)
    const_rej_char = tf.constant(rej_char, shape=targets.get_shape())
    weights = tf.to_float(tf.not_equal(targets, const_rej_char))
    correct_chars = tf.to_float(tf.equal(predictions, targets))
    accuracy_per_example = tf.div(
        tf.reduce_sum(tf.multiply(correct_chars, weights), 1),
        tf.reduce_sum(weights, 1))
    if streaming:
      return tf.contrib.metrics.streaming_mean(accuracy_per_example)
    else:
      return tf.reduce_mean(accuracy_per_example)
开发者ID:812864539,项目名称:models,代码行数:30,代码来源:metrics.py


示例20: loss

    def loss(self, logits, labels, regularization):
        """Adds to the inference model the layers required to generate loss."""
        with tf.name_scope('loss'):
            with tf.name_scope('var_loss'):
                labels = tf.cast(labels, tf.float32)
                shape = labels.get_shape()

                same_class = tf.boolean_mask(logits, tf.equal(labels, tf.ones(shape)))
                diff_class = tf.boolean_mask(logits, tf.not_equal(labels, tf.ones(shape)))
                same_mean, same_var = tf.nn.moments(same_class, [0])
                diff_mean, diff_var = tf.nn.moments(diff_class, [0])
                var_loss = same_var + diff_var

            with tf.name_scope('mean_loss'):
                mean_loss = self.lamda * tf.where(tf.greater(self.mu - (same_mean - diff_mean), 0),
                                                  self.mu - (same_mean - diff_mean), 0)

            with tf.name_scope('regularization'):
                regularization *= tf.add_n(self.regularizers)

            loss = var_loss + mean_loss + regularization

            # Summaries for TensorBoard.
            tf.summary.scalar('loss/total', loss)
            with tf.name_scope('averages'):
                averages = tf.train.ExponentialMovingAverage(0.9)
                op_averages = averages.apply([var_loss, mean_loss, regularization, loss])
                tf.summary.scalar('loss/avg/var_loss', averages.average(var_loss))
                tf.summary.scalar('loss/avg/mean_loss', averages.average(mean_loss))
                tf.summary.scalar('loss/avg/regularization', averages.average(regularization))
                tf.summary.scalar('loss/avg/total', averages.average(loss))
                with tf.control_dependencies([op_averages]):
                    loss_average = tf.identity(averages.average(loss), name='control')
            return loss, loss_average
开发者ID:parisots,项目名称:gcn_metric_learning,代码行数:34,代码来源:models_siamese.py



注:本文中的tensorflow.not_equal函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.one_hot函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.norm函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap