• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.reduce_mean函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.reduce_mean函数的典型用法代码示例。如果您正苦于以下问题:Python reduce_mean函数的具体用法?Python reduce_mean怎么用?Python reduce_mean使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了reduce_mean函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __init__

    def __init__(self, config):
        self.config = config

        self.input = tf.placeholder('int32', [self.config.batch_size, config.max_seq_len], name='input')
        self.labels = tf.placeholder('int64', [self.config.batch_size], name='labels')
        self.labels_one_hot = tf.one_hot(indices=self.labels,
                                         depth=config.output_dim,
                                         on_value=1.0,
                                         off_value=0.0,
                                         axis=-1)

        self.gru = GRUCell(config.hidden_state_dim)

        embeddings_we = tf.get_variable('word_embeddings', initializer=tf.random_uniform([config.vocab_size, config.embedding_dim], -1.0, 1.0))
        self.emb = embed_input = tf.nn.embedding_lookup(embeddings_we, self.input)
        inputs = [tf.squeeze(i, squeeze_dims=[1]) for i in tf.split(1, config.max_seq_len, embed_input)]

        outputs, last_slu_state = tf.nn.rnn(
            cell=self.gru,
            inputs=inputs,
            dtype=tf.float32,)

        w_project = tf.get_variable('project2labels', initializer=tf.random_uniform([config.hidden_state_dim, config.output_dim], -1.0, 1.0))
        self.logits = logits_bo = tf.matmul(last_slu_state, w_project)
        tf.histogram_summary('logits', logits_bo)
        self.probabilities = tf.nn.softmax(logits_bo)
        self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits_bo, self.labels_one_hot))
        self.predict = tf.nn.softmax(logits_bo)

        # TensorBoard
        self.accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.predict, 1), self.labels), 'float32'), name='accuracy')
        tf.scalar_summary('CCE loss', self.loss)
        tf.scalar_summary('Accuracy', self.accuracy)
        self.tb_info = tf.merge_all_summaries()
开发者ID:vojtsek,项目名称:sds-tracker,代码行数:34,代码来源:fat_model.py


示例2: fprop_noscope

 def fprop_noscope(self, x):
     mean = tf.reduce_mean(x, (1, 2), keep_dims=True)
     x = x - mean
     std = tf.sqrt(1e-7 +
                   tf.reduce_mean(tf.square(x), (1, 2), keep_dims=True))
     x = x / std
     return x * self.gamma + self.beta
开发者ID:limin24kobe,项目名称:cleverhans,代码行数:7,代码来源:model.py


示例3: get_rebar_gradient

  def get_rebar_gradient(self):
    """Get the rebar gradient."""
    hardELBO, nvil_gradient, logQHard = self._create_hard_elbo()
    if self.hparams.quadratic:
      gumbel_cv, _ = self._create_gumbel_control_variate_quadratic(logQHard)
    else:
      gumbel_cv, _ = self._create_gumbel_control_variate(logQHard)

    f_grads = self.optimizer_class.compute_gradients(tf.reduce_mean(-nvil_gradient))

    eta = {}
    h_grads, eta_statistics = self.multiply_by_eta_per_layer(
        self.optimizer_class.compute_gradients(tf.reduce_mean(gumbel_cv)),
        eta)

    model_grads = U.add_grads_and_vars(f_grads, h_grads)
    total_grads = model_grads

    # Construct the variance objective
    variance_objective = tf.reduce_mean(tf.square(U.vectorize(model_grads, set_none_to_zero=True)))

    debug = { 'ELBO': hardELBO,
             'etas': eta_statistics,
             'variance_objective': variance_objective,
             }
    return total_grads, debug, variance_objective
开发者ID:ALISCIFP,项目名称:models,代码行数:26,代码来源:rebar.py


示例4: standard_reg

def standard_reg():
    reg = tf.constant(0.0, dtype=tf.float32)
    reg = reg + standard_w_weight_reg * tf.reduce_mean(tf.square(net_params['sDW1']))
    #reg = reg + standard_w_weight_reg * tf.reduce_mean(tf.square(net_params['sDW2']))    
    reg = reg + regressor_w_weight_reg * tf.reduce_mean(tf.square(net_params['sRW']))

    return reg
开发者ID:rbharath,项目名称:deepchem,代码行数:7,代码来源:bondvolution.py


示例5: func_for_scan

        def func_for_scan(prev_output, current_element):

            # Sample decoder weights  __, [1], [1]
            W, log_pW, log_qW = decoder.sample_weights()

            # Sample z   [P,B,Z], [P,B], [P,B]
            z, log_pz, log_qz = self.sample_z(x, encoder, decoder, W)
            # z: [PB,Z]
            z = tf.reshape(z, [self.n_z_particles*self.batch_size, self.z_size])

            # Decode [PB,X]
            y = decoder.feedforward(W, z)
            # y: [P,B,X]
            y = tf.reshape(y, [self.n_z_particles, self.batch_size, self.x_size])

            # Likelihood p(x|z)  [P,B]
            log_px = log_bern(x,y)

            #Store for later
            # log_pW_list.append(tf.reduce_mean(log_pW))
            # log_qW_list.append(tf.reduce_mean(log_qW))
            # log_pz_list.append(tf.reduce_mean(log_pz))
            # log_qz_list.append(tf.reduce_mean(log_qz))
            # log_px_list.append(tf.reduce_mean(log_px))

            to_output = []
            to_output.append(tf.reduce_mean(log_px))
            to_output.append(tf.reduce_mean(log_pz))
            to_output.append(tf.reduce_mean(log_qz))   
            to_output.append(tf.reduce_mean(log_pW))
            to_output.append(tf.reduce_mean(log_qW))
                    
            return tf.stack(to_output)
开发者ID:chriscremer,项目名称:Other_Code,代码行数:33,代码来源:BVAE.py


示例6: __init__

    def __init__(self, nA,
                 learning_rate,decay,grad_clip,entropy_beta,
                 state_shape=[84,84,4],
                 master=None, device_name='/gpu:0', scope_name='master'):
        with tf.device(device_name) :
            self.state = tf.placeholder(tf.float32,[None]+state_shape)
            block, self.scope  = ActorCritic._build_shared_block(self.state,scope_name)
            self.policy, self.log_softmax_policy = ActorCritic._build_policy(block,nA,scope_name)
            self.value = ActorCritic._build_value(block,scope_name)

            self.train_vars = sorted(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope.name), key=lambda v:v.name)
            if( master is not None ) :
                self.sync_op= self._sync_op(master)
                self.action = tf.placeholder(tf.int32,[None,])
                self.target_value = tf.placeholder(tf.float32,[None,])

                advantage = self.target_value - self.value
                entropy = tf.reduce_sum(-1. * self.policy * self.log_softmax_policy,axis=1)
                log_p_s_a = tf.reduce_sum(self.log_softmax_policy * tf.one_hot(self.action,nA),axis=1)

                self.policy_loss = tf.reduce_mean(tf.stop_gradient(advantage)*log_p_s_a)
                self.entropy_loss = tf.reduce_mean(entropy)
                self.value_loss = tf.reduce_mean(advantage**2)

                loss = -self.policy_loss - entropy_beta* self.entropy_loss + self.value_loss
                self.gradients = tf.gradients(loss,self.train_vars)
                clipped_gs = [tf.clip_by_average_norm(g,grad_clip) for g in self.gradients]
                self.train_op = master.optimizer.apply_gradients(zip(clipped_gs,master.train_vars))
            else :
                #self.optimizer = tf.train.AdamOptimizer(learning_rate,beta1=BETA)
                self.optimizer = tf.train.RMSPropOptimizer(learning_rate,decay=decay,use_locking=True)
开发者ID:JoyDoSun,项目名称:tf-a3c-gpu,代码行数:31,代码来源:network.py


示例7: eval_summary

  def eval_summary(self, ground_truth, prediction):
    """
      Compute evaluation metrics (for EVAL mode).

    Args:
      ground_truth: Ground truth, shape: (?, #priors, 4 + #classes).
      prediction: Dictionary of predicted tensors, shape: {'locs'  : (?, #priors, 4), \
                                                           'confs' : (?, #priors, #classes), \
                                                           'logits': (?, #priors, #classes)}.
    Returns:
      Loss stub, shape: (1,).
    """
    localization_loss = self._localization_loss(ground_truth[:, :, :4],
                                                prediction['locs'])  # shape: (batch_size, num_priors)
    classification_loss = self._classification_loss(ground_truth[:, :, 4:],
                                                    prediction['logits'])  # shape: (batch_size, num_priors)
    positives = tf.reduce_max(ground_truth[:, :, 5:], axis=-1)  # shape: (batch_size, num_priors)
    num_positives = tf.reduce_sum(positives)  # shape: (1,)
    loc_loss = tf.reduce_sum(localization_loss * positives, axis=-1)  # shape: (batch_size,)
    classification_loss = tf.reduce_sum(classification_loss, axis=-1)  # shape: (batch_size,)

    evaluation_tensors = {
      'total_classification_loss':  tf.reduce_mean(classification_loss),
      'total_localization_loss': tf.reduce_mean(loc_loss),
    }

    self.__add_evaluation(evaluation_tensors)

    total_loss = tf.reduce_mean(classification_loss + self.loc_weight * loc_loss) / tf.maximum(1.0, num_positives)
    return total_loss
开发者ID:undeadinu,项目名称:training_toolbox_tensorflow,代码行数:30,代码来源:loss.py


示例8: get_train

def get_train(train_ph_dict,var_dict,var_ph_dict):
    mid0 = tf.one_hot(train_ph_dict['choice_0'], 9, axis=-1, dtype=tf.float32)
    mid0 = mid0 * get_q(train_ph_dict['state_0'],var_dict)
    mid0 = tf.reduce_sum(mid0, reduction_indices=[1])

    mid1 = get_q(train_ph_dict['state_1'],var_ph_dict)
    mid1 = tf.reduce_max(mid1, reduction_indices=[1])  
    mid1 = mid1 * train_ph_dict['cont']
    mid1 = mid1 * tf.constant(TRAIN_BETA)

    l2r = tf.constant(0.0)
    cell_count = tf.constant(0.0)
    for v in var_dict.values():
        l2r = l2r + get_l2(v)
        cell_count = cell_count + tf.to_float(tf.size(v))
    l2r = l2r / cell_count
    l2r = l2r / tf.constant(ELEMENT_L2_FACTOR*ELEMENT_L2_FACTOR)
    l2r = l2r * tf.constant(L2_WEIGHT)
    
    mid = mid0-mid1-train_ph_dict['reward_1']
#    mid = mid * mid
    mid = tf.abs(mid)
    mid = tf.reduce_mean(mid)
    score_diff = mid
    mid = mid + l2r
    mid = mid + ( tf.abs( tf.reduce_mean(var_dict['b5']) ) * tf.constant(L2_WEIGHT) )

    loss = mid

    mid = tf.train.GradientDescentOptimizer(0.5).minimize(mid,var_list=var_dict.values())
    train = mid
    
    return train, loss, score_diff
开发者ID:luzi82,项目名称:codelog.tensorflow.tictactoe,代码行数:33,代码来源:deeplearn2.py


示例9: _potential_scale_reduction_single_state

def _potential_scale_reduction_single_state(state, independent_chain_ndims):
  """potential_scale_reduction for one single state `Tensor`."""
  with tf.name_scope(
      'potential_scale_reduction_single_state',
      values=[state, independent_chain_ndims]):
    # We assume exactly one leading dimension indexes e.g. correlated samples
    # from each Markov chain.
    state = tf.convert_to_tensor(state, name='state')
    sample_ndims = 1

    sample_axis = tf.range(0, sample_ndims)
    chain_axis = tf.range(sample_ndims,
                          sample_ndims + independent_chain_ndims)
    sample_and_chain_axis = tf.range(
        0, sample_ndims + independent_chain_ndims)

    n = _axis_size(state, sample_axis)
    m = _axis_size(state, chain_axis)

    # In the language of Brooks and Gelman (1998),
    # B / n is the between chain variance, the variance of the chain means.
    # W is the within sequence variance, the mean of the chain variances.
    b_div_n = _reduce_variance(
        tf.reduce_mean(state, sample_axis, keepdims=True),
        sample_and_chain_axis,
        biased=False)
    w = tf.reduce_mean(
        _reduce_variance(state, sample_axis, keepdims=True, biased=True),
        sample_and_chain_axis)

    # sigma^2_+ is an estimate of the true variance, which would be unbiased if
    # each chain was drawn from the target.  c.f. "law of total variance."
    sigma_2_plus = w + b_div_n

    return ((m + 1.) / m) * sigma_2_plus / w - (n - 1.) / (m * n)
开发者ID:asudomoeva,项目名称:probability,代码行数:35,代码来源:diagnostic.py


示例10: init_opt

    def init_opt(self):
        is_recurrent = int(self.policy.recurrent)
        obs_var = self.env.observation_space.new_tensor_variable(
            'obs',
            extra_dims=1 + is_recurrent,
        )
        action_var = self.env.action_space.new_tensor_variable(
            'action',
            extra_dims=1 + is_recurrent,
        )
        advantage_var = tensor_utils.new_tensor(
            'advantage',
            ndim=1 + is_recurrent,
            dtype=tf.float32,
        )
        dist = self.policy.distribution

        old_dist_info_vars = {
            k: tf.placeholder(tf.float32, shape=[None] * (1 + is_recurrent) + list(shape), name='old_%s' % k)
            for k, shape in dist.dist_info_specs
            }
        old_dist_info_vars_list = [old_dist_info_vars[k] for k in dist.dist_info_keys]

        state_info_vars = {
            k: tf.placeholder(tf.float32, shape=[None] * (1 + is_recurrent) + list(shape), name=k)
            for k, shape in self.policy.state_info_specs
            }
        state_info_vars_list = [state_info_vars[k] for k in self.policy.state_info_keys]

        if is_recurrent:
            valid_var = tf.placeholder(tf.float32, shape=[None, None], name="valid")
        else:
            valid_var = None

        dist_info_vars = self.policy.dist_info_sym(obs_var, state_info_vars)
        kl = dist.kl_sym(old_dist_info_vars, dist_info_vars)
        lr = dist.likelihood_ratio_sym(action_var, old_dist_info_vars, dist_info_vars)
        if is_recurrent:
            mean_kl = tf.reduce_sum(kl * valid_var) / tf.reduce_sum(valid_var)
            surr_loss = - tf.reduce_sum(lr * advantage_var * valid_var) / tf.reduce_sum(valid_var)
        else:
            mean_kl = tf.reduce_mean(kl)
            surr_loss = - tf.reduce_mean(lr * advantage_var)

        input_list = [
                         obs_var,
                         action_var,
                         advantage_var,
                     ] + state_info_vars_list + old_dist_info_vars_list
        if is_recurrent:
            input_list.append(valid_var)

        self.optimizer.update_opt(
            loss=surr_loss,
            target=self.policy,
            leq_constraint=(mean_kl, self.step_size),
            inputs=input_list,
            constraint_name="mean_kl"
        )
        return dict()
开发者ID:QuantCollective,项目名称:maml_rl,代码行数:60,代码来源:npo.py


示例11: build_graph

    def build_graph(self, image_pos):
        image_pos = image_pos / 128.0 - 1

        z = tf.random_normal([self.batch, self.zdim], name='z_train')
        z = tf.placeholder_with_default(z, [None, self.zdim], name='z')

        with argscope([Conv2D, Conv2DTranspose, FullyConnected],
                      kernel_initializer=tf.truncated_normal_initializer(stddev=0.02)):
            with tf.variable_scope('gen'):
                image_gen = self.generator(z)
            tf.summary.image('generated-samples', image_gen, max_outputs=30)

            alpha = tf.random_uniform(shape=[self.batch, 1, 1, 1],
                                      minval=0., maxval=1., name='alpha')
            interp = image_pos + alpha * (image_gen - image_pos)

            with tf.variable_scope('discrim'):
                vecpos = self.discriminator(image_pos)
                vecneg = self.discriminator(image_gen)
                vec_interp = self.discriminator(interp)

        # the Wasserstein-GAN losses
        self.d_loss = tf.reduce_mean(vecneg - vecpos, name='d_loss')
        self.g_loss = tf.negative(tf.reduce_mean(vecneg), name='g_loss')

        # the gradient penalty loss
        gradients = tf.gradients(vec_interp, [interp])[0]
        gradients = tf.sqrt(tf.reduce_sum(tf.square(gradients), [1, 2, 3]))
        gradients_rms = symbolic_functions.rms(gradients, 'gradient_rms')
        gradient_penalty = tf.reduce_mean(tf.square(gradients - 1), name='gradient_penalty')
        add_moving_summary(self.d_loss, self.g_loss, gradient_penalty, gradients_rms)

        self.d_loss = tf.add(self.d_loss, 10 * gradient_penalty)

        self.collect_variables()
开发者ID:quanlzheng,项目名称:tensorpack,代码行数:35,代码来源:Improved-WGAN.py


示例12: testSampleConsistentStats

  def testSampleConsistentStats(self):
    loc = np.float32([[-1., 1], [1, -1]])
    scale = np.float32([1., 0.5])
    n_samp = 1e4
    with self.test_session() as sess:
      ind = tfd.Independent(
          distribution=tfd.MultivariateNormalDiag(
              loc=loc, scale_identity_multiplier=scale),
          reinterpreted_batch_ndims=1)

      x = ind.sample(int(n_samp), seed=42)
      sample_mean = tf.reduce_mean(x, axis=0)
      sample_var = tf.reduce_mean(tf.squared_difference(x, sample_mean), axis=0)
      sample_std = tf.sqrt(sample_var)
      sample_entropy = -tf.reduce_mean(ind.log_prob(x), axis=0)

      [
          sample_mean_, sample_var_, sample_std_, sample_entropy_,
          actual_mean_, actual_var_, actual_std_, actual_entropy_,
          actual_mode_,
      ] = sess.run([
          sample_mean, sample_var, sample_std, sample_entropy,
          ind.mean(), ind.variance(), ind.stddev(), ind.entropy(), ind.mode(),
      ])

      self.assertAllClose(sample_mean_, actual_mean_, rtol=0.02, atol=0.)
      self.assertAllClose(sample_var_, actual_var_, rtol=0.04, atol=0.)
      self.assertAllClose(sample_std_, actual_std_, rtol=0.02, atol=0.)
      self.assertAllClose(sample_entropy_, actual_entropy_, rtol=0.01, atol=0.)
      self.assertAllClose(loc, actual_mode_, rtol=1e-6, atol=0.)
开发者ID:lewisKit,项目名称:probability,代码行数:30,代码来源:independent_test.py


示例13: batchnormalize

def batchnormalize(X, eps=1e-8, g=None, b=None):
    if X.get_shape().ndims == 4:
        mean = tf.reduce_mean(X, [0,1,2])
        std = tf.reduce_mean( tf.square(X-mean), [0,1,2] )
        X = (X-mean) / tf.sqrt(std+eps)

        if g is not None and b is not None:
            g = tf.reshape(g, [1,1,1,-1])
            b = tf.reshape(b, [1,1,1,-1])
            X = X*g + b

    elif X.get_shape().ndims == 2:
        mean = tf.reduce_mean(X, 0)
        std = tf.reduce_mean(tf.square(X-mean), 0)
        X = (X-mean) / tf.sqrt(std+eps)#std

        if g is not None and b is not None:
            g = tf.reshape(g, [1,-1])
            b = tf.reshape(b, [1,-1])
            X = X*g + b

    else:
        raise NotImplementedError

    return X
开发者ID:yihui-he,项目名称:GAN-MNIST,代码行数:25,代码来源:model.py


示例14: create_graph

    def create_graph(self):
        with self.__graph.as_default():
            self.__featurePlaceHolder = tf.placeholder(dtype=tf.int32, shape=[None, self.__window_size * 2])
            self.__labelPlaceHolder = tf.placeholder(dtype=tf.int32, shape=[None, 1])

            onehot_lookup_tables = tf.Variable(
                initial_value=tf.truncated_normal(shape=[self.__vocabulary_size, self.__embedding_size])
            )

            embedding = tf.nn.embedding_lookup(params=onehot_lookup_tables, ids = self.__featurePlaceHolder)

            projection_out = tf.reduce_mean(embedding, axis=1)

            softmax_weight = tf.Variable(initial_value=tf.truncated_normal(
                shape=[self.__vocabulary_size, self.__embedding_size]
            ))
            softmax_biases = tf.Variable(initial_value=tf.zeros([self.__vocabulary_size]))

            sampled_loss_per_batch = tf.nn.sampled_softmax_loss(
                weights=softmax_weight,
                biases=softmax_biases,
                inputs=projection_out,
                labels=self.__labelPlaceHolder,
                num_sampled=self.__num_sampled,
                num_classes=self.__vocabulary_size
            )

            self.__loss = tf.reduce_mean(sampled_loss_per_batch)
            self.__optimizer = tf.train.AdagradOptimizer(1.0).minimize(self.__loss)

            norm = tf.sqrt(tf.reduce_sum(tf.square(onehot_lookup_tables), 1, keep_dims=True))
            self.__normalized_embedding = onehot_lookup_tables / norm
开发者ID:caoyujiALgLM,项目名称:machine-learn,代码行数:32,代码来源:cbow.py


示例15: _summarize_input

  def _summarize_input(self, groundtruth_boxes_list, match_list):
    """Creates tensorflow summaries for the input boxes and anchors.

    This function creates four summaries corresponding to the average
    number (over images in a batch) of (1) groundtruth boxes, (2) anchors
    marked as positive, (3) anchors marked as negative, and (4) anchors marked
    as ignored.

    Args:
      groundtruth_boxes_list: a list of 2-D tensors of shape [num_boxes, 4]
        containing corners of the groundtruth boxes.
      match_list: a list of matcher.Match objects encoding the match between
        anchors and groundtruth boxes for each image of the batch,
        with rows of the Match objects corresponding to groundtruth boxes
        and columns corresponding to anchors.
    """
    num_boxes_per_image = tf.stack(
        [tf.shape(x)[0] for x in groundtruth_boxes_list])
    pos_anchors_per_image = tf.stack(
        [match.num_matched_columns() for match in match_list])
    neg_anchors_per_image = tf.stack(
        [match.num_unmatched_columns() for match in match_list])
    ignored_anchors_per_image = tf.stack(
        [match.num_ignored_columns() for match in match_list])
    tf.summary.scalar('Input/AvgNumGroundtruthBoxesPerImage',
                      tf.reduce_mean(tf.to_float(num_boxes_per_image)))
    tf.summary.scalar('Input/AvgNumPositiveAnchorsPerImage',
                      tf.reduce_mean(tf.to_float(pos_anchors_per_image)))
    tf.summary.scalar('Input/AvgNumNegativeAnchorsPerImage',
                      tf.reduce_mean(tf.to_float(neg_anchors_per_image)))
    tf.summary.scalar('Input/AvgNumIgnoredAnchorsPerImage',
                      tf.reduce_mean(tf.to_float(ignored_anchors_per_image)))
开发者ID:Peterwangcn,项目名称:object_detector_app,代码行数:32,代码来源:ssd_meta_arch.py


示例16: build_graph

    def build_graph(self, image, label):
        assert tf.test.is_gpu_available()

        MEAN_IMAGE = tf.constant([0.4914, 0.4822, 0.4465], dtype=tf.float32)
        STD_IMAGE = tf.constant([0.2023, 0.1994, 0.2010], dtype=tf.float32)
        image = ((image / 255.0) - MEAN_IMAGE) / STD_IMAGE
        image = tf.transpose(image, [0, 3, 1, 2])

        pytorch_default_init = tf.variance_scaling_initializer(scale=1.0 / 3, mode='fan_in', distribution='uniform')
        with argscope([Conv2D, BatchNorm, GlobalAvgPooling], data_format='channels_first'), \
                argscope(Conv2D, kernel_initializer=pytorch_default_init):
            net = Conv2D('conv0', image, 64, kernel_size=3, strides=1, use_bias=False)
            for i, blocks_in_module in enumerate(MODULE_SIZES):
                for j in range(blocks_in_module):
                    stride = 2 if j == 0 and i > 0 else 1
                    with tf.variable_scope("res%d.%d" % (i, j)):
                        net = preactivation_block(net, FILTER_SIZES[i], stride)
            net = GlobalAvgPooling('gap', net)
            logits = FullyConnected('linear', net, CLASS_NUM,
                                    kernel_initializer=tf.random_normal_initializer(stddev=1e-3))

        ce_cost = tf.nn.softmax_cross_entropy_with_logits(labels=label, logits=logits)
        ce_cost = tf.reduce_mean(ce_cost, name='cross_entropy_loss')

        single_label = tf.to_int32(tf.argmax(label, axis=1))
        wrong = tf.to_float(tf.logical_not(tf.nn.in_top_k(logits, single_label, 1)), name='wrong_vector')
        # monitor training error
        add_moving_summary(tf.reduce_mean(wrong, name='train_error'), ce_cost)
        add_param_summary(('.*/W', ['histogram']))

        # weight decay on all W matrixes. including convolutional layers
        wd_cost = tf.multiply(WEIGHT_DECAY, regularize_cost('.*', tf.nn.l2_loss), name='wd_cost')

        return tf.add_n([ce_cost, wd_cost], name='cost')
开发者ID:quanlzheng,项目名称:tensorpack,代码行数:34,代码来源:cifar10-preact18-mixup.py


示例17: calc_reward

def calc_reward(outputs):
  outputs = outputs[-1]  # look at ONLY THE END of the sequence
  outputs = tf.reshape(outputs, (batch_size, cell_out_size))
  h_a_out = weight_variable((cell_out_size, n_classes))

  p_y = tf.nn.softmax(tf.matmul(outputs, h_a_out))
  max_p_y = tf.arg_max(p_y, 1)
  correct_y = tf.cast(labels_placeholder, tf.int64)

  R = tf.cast(tf.equal(max_p_y, correct_y), tf.float32)  # reward per example

  reward = tf.reduce_mean(R)  # overall reward

  p_loc = gaussian_pdf(mean_locs, sampled_locs)
  p_loc = tf.reshape(p_loc, (batch_size, glimpses * 2))

  R = tf.reshape(R, (batch_size, 1))
  J = tf.concat(1, [tf.log(p_y + 1e-5) * onehot_labels_placeholder, tf.log(
      p_loc + 1e-5) * R])
  J = tf.reduce_sum(J, 1)
  J = tf.reduce_mean(J, 0)
  cost = -J

  optimizer = tf.train.AdamOptimizer(lr)
  train_op = optimizer.minimize(cost)

  return cost, reward, max_p_y, correct_y, train_op
开发者ID:ffmpbgrnn,项目名称:tensorflow_mnist_ram,代码行数:27,代码来源:ram.py


示例18: soft_triplet_loss

def soft_triplet_loss(anchor, positive, negative, extra=True, scope="soft_triplet_loss"):
    r"""Loss for triplet networks as described in the paper:
    `Deep Metric Learning using Triplet Network
    <https://arxiv.org/abs/1412.6622>`_ by Hoffer et al.

    It is a softmax loss using :math:`(anchor-positive)^2` and
    :math:`(anchor-negative)^2` as logits.

    Args:
        anchor (tf.Tensor): anchor feature vectors of shape [Batch, N].
        positive (tf.Tensor): features of positive match of the same shape.
        negative (tf.Tensor): features of negative match of the same shape.
        extra (bool): also return distances for pos and neg.

    Returns:
        tf.Tensor: triplet-loss as scalar (and optionally average_pos_dist, average_neg_dist)
    """

    eps = 1e-10
    with tf.name_scope(scope):
        d_pos = tf.sqrt(tf.reduce_sum(tf.square(anchor - positive), 1) + eps)
        d_neg = tf.sqrt(tf.reduce_sum(tf.square(anchor - negative), 1) + eps)

        logits = tf.stack([d_pos, d_neg], axis=1)
        ones = tf.ones_like(tf.squeeze(d_pos), dtype="int32")

        loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=ones))

        if extra:
            pos_dist = tf.reduce_mean(d_pos, name='pos-dist')
            neg_dist = tf.reduce_mean(d_neg, name='neg-dist')
            return loss, pos_dist, neg_dist
        else:
            return loss
开发者ID:tobyma,项目名称:tensorpack,代码行数:34,代码来源:mnist-embeddings.py


示例19: cnn_setup

def cnn_setup(x, y, keep_prob, lr, stddev):
    first_hidden = 32
    second_hidden = 64
    fc_hidden = 1024
    W_conv1 = weight([5, 5, 1, first_hidden], stddev)
    B_conv1 = bias([first_hidden])
    x_image = tf.reshape(x, [-1, 28, 28, 1])
    h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + B_conv1)
    h_pool1 = max_pool_2x2(h_conv1)
    W_conv2 = weight([5, 5, first_hidden, second_hidden], stddev)
    b_conv2 = bias([second_hidden])
    h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
    h_pool2 = max_pool_2x2(h_conv2)
    W_fc1 = weight([7 * 7 * second_hidden, fc_hidden], stddev)
    b_fc1 = bias([fc_hidden])
    h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * second_hidden])
    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
    W_fc2 = weight([fc_hidden, 10], stddev)
    b_fc2 = bias([10])
    y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
    cross_entropy = tf.reduce_mean(
        -tf.reduce_sum(y * tf.log(y_conv), reduction_indices=[1]))
    correct_pred = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y, 1))
    return (tf.train.AdamOptimizer(lr).minimize(cross_entropy),
            tf.reduce_mean(tf.cast(correct_pred, tf.float32)), cross_entropy)
开发者ID:robertnishihara,项目名称:ray,代码行数:26,代码来源:objective.py


示例20: get_losses

                def get_losses(obj_mask):
                  """Get motion constraint loss."""
                  # Find height of segment.
                  coords = tf.where(tf.greater(  # Shape (num_true, 2=yx)
                      obj_mask[:, :, 0], tf.constant(0.5, dtype=tf.float32)))
                  y_max = tf.reduce_max(coords[:, 0])
                  y_min = tf.reduce_min(coords[:, 0])
                  seg_height = y_max - y_min
                  f_y = self.intrinsic_mat[i, 0, 1, 1]
                  approx_depth = ((f_y * self.global_scale_var) /
                                  tf.to_float(seg_height))
                  reference_pred = tf.boolean_mask(
                      depth_pred, tf.greater(
                          tf.reshape(obj_mask[:, :, 0],
                                     (self.img_height, self.img_width, 1)),
                          tf.constant(0.5, dtype=tf.float32)))

                  # Establish loss on approx_depth, a scalar, and
                  # reference_pred, our dense prediction. Normalize both to
                  # prevent degenerative depth shrinking.
                  global_mean_depth_pred = tf.reduce_mean(depth_pred)
                  reference_pred /= global_mean_depth_pred
                  approx_depth /= global_mean_depth_pred
                  spatial_err = tf.abs(reference_pred - approx_depth)
                  mean_spatial_err = tf.reduce_mean(spatial_err)
                  return mean_spatial_err
开发者ID:pcm17,项目名称:models,代码行数:26,代码来源:model.py



注:本文中的tensorflow.reduce_mean函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.reduce_min函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.reduce_max函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap