• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python moving_averages.assign_moving_average函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.training.moving_averages.assign_moving_average函数的典型用法代码示例。如果您正苦于以下问题:Python assign_moving_average函数的具体用法?Python assign_moving_average怎么用?Python assign_moving_average使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了assign_moving_average函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _delay_updates

 def _delay_updates():
   """Internal function that delay updates moving_vars if is_training."""
   update_moving_mean = moving_averages.assign_moving_average(
       moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
   update_moving_variance = moving_averages.assign_moving_average(
       moving_variance, variance, decay, zero_debias=False)
   return update_moving_mean, update_moving_variance
开发者ID:mkabra,项目名称:poseTF,代码行数:7,代码来源:batch_norm.py


示例2: bn

def bn(x, c):
    x_shape = x.get_shape()
    params_shape = x_shape[-1:]

    if c["use_bias"]:
        bias = _get_variable("bias", params_shape, initializer=tf.zeros_initializer)
        return x + bias

    axis = list(range(len(x_shape) - 1))

    beta = _get_variable("beta", params_shape, initializer=tf.zeros_initializer)
    gamma = _get_variable("gamma", params_shape, initializer=tf.ones_initializer)

    moving_mean = _get_variable("moving_mean", params_shape, initializer=tf.zeros_initializer, trainable=False)
    moving_variance = _get_variable("moving_variance", params_shape, initializer=tf.ones_initializer, trainable=False)

    # These ops will only be preformed when training.
    mean, variance = tf.nn.moments(x, axis)
    update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, BN_DECAY)
    update_moving_variance = moving_averages.assign_moving_average(moving_variance, variance, BN_DECAY)
    tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
    tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)

    mean, variance = control_flow_ops.cond(
        c["is_training"], lambda: (mean, variance), lambda: (moving_mean, moving_variance)
    )

    x = tf.nn.batch_normalization(x, mean, variance, beta, gamma, BN_EPSILON)
    # x.set_shape(inputs.get_shape()) ??

    return x
开发者ID:yaowenwu,项目名称:tensorflow-resnet,代码行数:31,代码来源:resnet.py


示例3: __call__

    def __call__(self, input_layer, epsilon=1e-5, decay=0.9, name="batch_norm",
                 in_dim=None, phase=Phase.train):
        shape = input_layer.shape
        shp = in_dim or shape[-1]
        with tf.variable_scope(name) as scope:
            self.mean = self.variable('mean', [shp], init=tf.constant_initializer(0.), train=False)
            self.variance = self.variable('variance', [shp], init=tf.constant_initializer(1.0), train=False)

            self.gamma = self.variable("gamma", [shp], init=tf.random_normal_initializer(1., 0.02))
            self.beta = self.variable("beta", [shp], init=tf.constant_initializer(0.))

            if phase == Phase.train:
                mean, variance = tf.nn.moments(input_layer.tensor, [0, 1, 2])
                mean.set_shape((shp,))
                variance.set_shape((shp,))

                update_moving_mean = moving_averages.assign_moving_average(self.mean, mean, decay)
                update_moving_variance = moving_averages.assign_moving_average(self.variance, variance, decay)

                with tf.control_dependencies([update_moving_mean, update_moving_variance]):
                    normalized_x = tf.nn.batch_norm_with_global_normalization(
                        input_layer.tensor, mean, variance, self.beta, self.gamma, epsilon,
                        scale_after_normalization=True)
            else:
                normalized_x = tf.nn.batch_norm_with_global_normalization(
                    input_layer.tensor, self.mean, self.variance,
                    self.beta, self.gamma, epsilon,
                    scale_after_normalization=True)
            return input_layer.with_tensor(normalized_x, parameters=self.vars)
开发者ID:Soledad89,项目名称:StackGAN,代码行数:29,代码来源:custom_ops.py


示例4: mean_var_with_update

		def mean_var_with_update():
			mean, variance = tf.nn.moments(x, list(range(len(x.shape) - 1)), name='moments')
			with tf.control_dependencies([
				assign_moving_average(moving_mean, mean, decay),
				assign_moving_average(moving_var, variance, decay)
			]):
				return tf.identity(mean), tf.identity(variance)
开发者ID:USTCzxm,项目名称:U-net,代码行数:7,代码来源:TestSomeFunction.py


示例5: train_phase

		def train_phase():
			mean, variance = tf.nn.moments(inputs, axis)
			update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, decay)
			update_moving_variance = moving_averages.assign_moving_average(moving_variance, 
									variance, decay)
			with tf.control_dependencies([update_moving_mean, update_moving_variance]):
				return tf.identity(mean), tf.identity(variance)
开发者ID:polltooh,项目名称:CNN_LSTM,代码行数:7,代码来源:model_func.py


示例6: batch_norm

def batch_norm(x, decay=0.999, epsilon=1e-03, is_training=True,
               scope="scope"):
    x_shape = x.get_shape()
    num_inputs = x_shape[-1]
    reduce_dims = list(range(len(x_shape) - 1))
    with tf.variable_scope(scope):
        beta = create_var("beta", [num_inputs,],
                               initializer=tf.zeros_initializer())
        gamma = create_var("gamma", [num_inputs,],
                                initializer=tf.ones_initializer())
        # for inference
        moving_mean = create_var("moving_mean", [num_inputs,],
                                 initializer=tf.zeros_initializer(),
                                 trainable=False)
        moving_variance = create_var("moving_variance", [num_inputs],
                                     initializer=tf.ones_initializer(),
                                     trainable=False)
    if is_training:
        mean, variance = tf.nn.moments(x, axes=reduce_dims)
        update_move_mean = moving_averages.assign_moving_average(moving_mean,
                                                mean, decay=decay)
        update_move_variance = moving_averages.assign_moving_average(moving_variance,
                                                variance, decay=decay)
        tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_move_mean)
        tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_move_variance)
    else:
        mean, variance = moving_mean, moving_variance
    return tf.nn.batch_normalization(x, mean, variance, beta, gamma, epsilon)
开发者ID:kaka7,项目名称:DeepLearning_tutorials,代码行数:28,代码来源:ResNet50.py


示例7: bacthnorm

def bacthnorm(inputs, scope, epsilon=1e-05, momentum=0.99, is_training=True):
    inputs_shape = inputs.get_shape().as_list()# 输出 形状尺寸
    params_shape = inputs_shape[-1:]# 输入参数的长度
    axis = list(range(len(inputs_shape) - 1))

    with tf.variable_scope(scope):
        beta = create_variable("beta", params_shape,
                               initializer=tf.zeros_initializer())
        gamma = create_variable("gamma", params_shape,
                                initializer=tf.ones_initializer())
        # 均值 常量 不需要训练 for inference
        moving_mean = create_variable("moving_mean", params_shape,
                            initializer=tf.zeros_initializer(), trainable=False)
		# 方差 常量 不需要训练
        moving_variance = create_variable("moving_variance", params_shape,
                            initializer=tf.ones_initializer(), trainable=False)
    if is_training:
        mean, variance = tf.nn.moments(inputs, axes=axis)# 计算均值和方差
		# 移动平均求 均值和 方差  考虑上一次的量 xt = a * x_t-1 +(1-a)*x_now
        update_move_mean = moving_averages.assign_moving_average(moving_mean,
                                                mean, decay=momentum)
        update_move_variance = moving_averages.assign_moving_average(moving_variance,
                                                variance, decay=momentum)
        tf.add_to_collection(UPDATE_OPS_COLLECTION, update_move_mean)
        tf.add_to_collection(UPDATE_OPS_COLLECTION, update_move_variance)
    else:
        mean, variance = moving_mean, moving_variance
    return tf.nn.batch_normalization(inputs, mean, variance, beta, gamma, epsilon)
开发者ID:dyz-zju,项目名称:MVision,代码行数:28,代码来源:MobileNet_tf.py


示例8: moving_average_update

def moving_average_update(variable, value, momentum):
  try:
    return moving_averages.assign_moving_average(
        variable, value, momentum, zero_debias=False)
  except TypeError:
    return moving_averages.assign_moving_average(
        variable, value, momentum)
开发者ID:bowenliu16,项目名称:deepchem,代码行数:7,代码来源:model_ops.py


示例9: update_mean_var

        def update_mean_var():
            mean, variance = tf.nn.moments(x=incoming, axes=axis)
            update_moving_mean = moving_averages.assign_moving_average(
                variable=moving_mean, value=mean, decay=self.decay, zero_debias=False)
            update_moving_variance = moving_averages.assign_moving_average(
                variable=moving_variance, value=variance, decay=self.decay, zero_debias=False)

            with tf.control_dependencies([update_moving_mean, update_moving_variance]):
                return tf.identity(mean), tf.identity(variance)
开发者ID:AlexMikhalev,项目名称:polyaxon,代码行数:9,代码来源:normalizations.py


示例10: __init__

    def __init__(self, value,
                            decay,
                            weight,
                            truediv=True,
                            collections=None,
                            name=None):

        """Compute the weighted moving average of `value`.
        Conceptually, the weighted moving average is:
          `moving_average(value * weight) / moving_average(weight)`,
        where a moving average updates by the rule
          `new_value = decay * old_value + (1 - decay) * update`
        Internally, this Op keeps moving average variables of both `value * weight`
        and `weight`.
        Args:
          value: A numeric `Tensor`.
          decay: A float `Tensor` or float value.  The moving average decay.
          weight:  `Tensor` that keeps the current value of a weight.
            Shape should be able to multiply `value`.
          truediv:  Boolean, if `True`, dividing by `moving_average(weight)` is
            floating point division.  If `False`, use division implied by dtypes.
          collections:  List of graph collections keys to add the internal variables
            `value * weight` and `weight` to.  Defaults to `[GraphKeys.VARIABLES]`.
          name: Optional name of the returned operation.
            Defaults to "WeightedMovingAvg".
        Returns:
          An Operation that updates and returns the weighted moving average.
        """
        # Unlike assign_moving_average, the weighted moving average doesn't modify
        # user-visible variables. It is the ratio of two internal variables, which are
        # moving averages of the updates.  Thus, the signature of this function is
        # quite different than assign_moving_average.
        if collections is None:
            collections = [ops.GraphKeys.VARIABLES]
        with variable_scope.variable_op_scope(
                [value, weight, decay], name, "WeightedMovingAvg") as scope:
            value_x_weight_var = variable_scope.get_variable(
                "value_x_weight",
                initializer=init_ops.zeros_initializer(value.get_shape(),
                                                       dtype=value.dtype),
                trainable=False,
                collections=collections)
            weight_var = variable_scope.get_variable(
                "weight",
                initializer=init_ops.zeros_initializer(weight.get_shape(),
                                                       dtype=weight.dtype),
                trainable=False,
                collections=collections)
            numerator = assign_moving_average(value_x_weight_var, value * weight, decay)
            denominator = assign_moving_average(weight_var, weight, decay)

            if truediv:
                div = math_ops.truediv
            else:
                div = math_ops.div
            self.average_with_update = div(numerator, denominator+1e-8, name=scope.name)
            self.average = div(value_x_weight_var, weight_var)
开发者ID:NoahDStein,项目名称:NeuralNetSandbox,代码行数:57,代码来源:better_weighted_moving_average.py


示例11: _force_updates

 def _force_updates():
   """Internal function forces updates moving_vars if is_training."""
   update_moving_mean = moving_averages.assign_moving_average(
       moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
   update_moving_variance = moving_averages.assign_moving_average(
       moving_variance, variance, decay, zero_debias=False)
   with ops.control_dependencies([update_moving_mean,
                                  update_moving_variance]):
     return array_ops.identity(mean), array_ops.identity(variance)
开发者ID:mkabra,项目名称:poseTF,代码行数:9,代码来源:batch_norm.py


示例12: _batch_norm_without_layers

 def _batch_norm_without_layers(self, input_layer, decay, use_scale,
                                epsilon):
     """Batch normalization on `input_layer` without tf.layers."""
     shape = input_layer.shape
     num_channels = shape[3] if self.data_format == 'NHWC' else shape[1]
     beta = self.get_variable(
         'beta', [num_channels],
         tf.float32,
         tf.float32,
         initializer=tf.zeros_initializer())
     if use_scale:
         gamma = self.get_variable(
             'gamma', [num_channels],
             tf.float32,
             tf.float32,
             initializer=tf.ones_initializer())
     else:
         gamma = tf.constant(1.0, tf.float32, [num_channels])
     moving_mean = tf.get_variable(
         'moving_mean', [num_channels],
         tf.float32,
         initializer=tf.zeros_initializer(),
         trainable=False)
     moving_variance = tf.get_variable(
         'moving_variance', [num_channels],
         tf.float32,
         initializer=tf.ones_initializer(),
         trainable=False)
     if self.phase_train:
         bn, batch_mean, batch_variance = tf.nn.fused_batch_norm(
             input_layer,
             gamma,
             beta,
             epsilon=epsilon,
             data_format=self.data_format,
             is_training=True)
         mean_update = moving_averages.assign_moving_average(
             moving_mean, batch_mean, decay=decay, zero_debias=False)
         variance_update = moving_averages.assign_moving_average(
             moving_variance,
             batch_variance,
             decay=decay,
             zero_debias=False)
         tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, mean_update)
         tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, variance_update)
     else:
         bn, _, _ = tf.nn.fused_batch_norm(
             input_layer,
             gamma,
             beta,
             mean=moving_mean,
             variance=moving_variance,
             epsilon=epsilon,
             data_format=self.data_format,
             is_training=False)
     return bn
开发者ID:jamescasbon,项目名称:ray,代码行数:56,代码来源:convnet_builder.py


示例13: _update_mean_var

 def _update_mean_var():
   """Internal function that updates mean and variance during training."""
   axis = [0, 1, 2] if convnet else [0]
   mean, var = nn.moments(tensor_in, axis)
   update_moving_mean = moving_averages.assign_moving_average(
       moving_mean, mean, decay)
   update_moving_var = moving_averages.assign_moving_average(
       moving_var, var, decay)
   with ops.control_dependencies([update_moving_mean, update_moving_var]):
     return array_ops_.identity(mean), array_ops_.identity(var)
开发者ID:Assassin0028,项目名称:tensorflow,代码行数:10,代码来源:batch_norm_ops.py


示例14: _update_renorm_variable

 def _update_renorm_variable(var, weight, value):
   """Updates a moving average and weight, returns the unbiased value."""
   # Update the variables without zero debiasing. The debiasing will be
   # accomplished by dividing the exponential moving average by the weight.
   # For example, after a single update, the moving average would be
   # (1-decay) * value. and the weight will be 1-decay, with their ratio
   # giving value.
   new_var = moving_averages.assign_moving_average(
       var, value, decay, zero_debias=False)
   new_weight = moving_averages.assign_moving_average(
       weight, 1., decay, zero_debias=False)
   return new_var / new_weight
开发者ID:liaha,项目名称:tensorflow,代码行数:12,代码来源:normalization.py


示例15: _batch_norm

  def _batch_norm(self, name, x):
    with tf.variable_scope(name):
      # 输入通道维数
      params_shape = [x.get_shape()[-1]]
      # offset
      beta = tf.get_variable('beta', 
                             params_shape, 
                             tf.float32,
                             initializer=tf.constant_initializer(0.0, tf.float32))
      # scale
      gamma = tf.get_variable('gamma', 
                              params_shape, 
                              tf.float32,
                              initializer=tf.constant_initializer(1.0, tf.float32))

      if self.mode == 'train':
        # 为每个通道计算均值、标准差
        mean, variance = tf.nn.moments(x, [0, 1, 2], name='moments')
        # 新建或建立测试阶段使用的batch均值、标准差
        moving_mean = tf.get_variable('moving_mean', 
                                      params_shape, tf.float32,
                                      initializer=tf.constant_initializer(0.0, tf.float32),
                                      trainable=False)
        moving_variance = tf.get_variable('moving_variance', 
                                          params_shape, tf.float32,
                                          initializer=tf.constant_initializer(1.0, tf.float32),
                                          trainable=False)
        # 添加batch均值和标准差的更新操作(滑动平均)
        # moving_mean = moving_mean * decay + mean * (1 - decay)
        # moving_variance = moving_variance * decay + variance * (1 - decay)
        self._extra_train_ops.append(moving_averages.assign_moving_average(
                                                        moving_mean, mean, 0.9))
        self._extra_train_ops.append(moving_averages.assign_moving_average(
                                                        moving_variance, variance, 0.9))
      else:
        # 获取训练中积累的batch均值、标准差
        mean = tf.get_variable('moving_mean', 
                               params_shape, tf.float32,
                               initializer=tf.constant_initializer(0.0, tf.float32),
                               trainable=False)
        variance = tf.get_variable('moving_variance', 
                                   params_shape, tf.float32,
                                   initializer=tf.constant_initializer(1.0, tf.float32),
                                   trainable=False)
        # 添加到直方图总结
        tf.summary.histogram(mean.op.name, mean)
        tf.summary.histogram(variance.op.name, variance)

      # BN层:((x-mean)/var)*gamma+beta
      y = tf.nn.batch_normalization(x, mean, variance, beta, gamma, 0.001)
      y.set_shape(x.get_shape())
      return y
开发者ID:Npccc,项目名称:Study,代码行数:52,代码来源:resnet_model.py


示例16: testAssignMovingAverageNewNamingMultipleCalls

 def testAssignMovingAverageNewNamingMultipleCalls(self):
   with variable_scope.variable_scope("scope1") as vs1:
     with variable_scope.variable_scope("scope2"):
       var = variables.Variable(1.0, name="Var")
       moving_averages.assign_moving_average(var, 0.0, 0.99)
       moving_averages.assign_moving_average(var, 0.0, 0.99)
   expected_names = ["scope1/scope2/Var:0",
                     "scope1/scope2/scope1/scope2/Var/biased:0",
                     "scope1/scope2/scope1/scope2/Var/local_step:0",
                     "scope1/scope2/scope1/scope2/Var/biased_1:0",
                     "scope1/scope2/scope1/scope2/Var/local_step_1:0"]
   actual_names = [v.name for v in vs1.global_variables()]
   self.assertSetEqual(set(expected_names), set(actual_names))
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:13,代码来源:moving_averages_test.py


示例17: update_bn_ema

def update_bn_ema(xn, batch_mean, batch_var, moving_mean, moving_var, decay):
    # TODO is there a way to use zero_debias in multi-GPU?
    update_op1 = moving_averages.assign_moving_average(
        moving_mean, batch_mean, decay, zero_debias=False,
        name='mean_ema_op')
    update_op2 = moving_averages.assign_moving_average(
        moving_var, batch_var, decay, zero_debias=False,
        name='var_ema_op')
    add_model_variable(moving_mean)
    add_model_variable(moving_var)

    # seems faster than delayed update, but might behave otherwise in distributed settings.
    with tf.control_dependencies([update_op1, update_op2]):
        return tf.identity(xn, name='output')
开发者ID:j50888,项目名称:tensorpack,代码行数:14,代码来源:batch_norm.py


示例18: _do_update

 def _do_update():
   # Update the variables without zero debiasing. The debiasing will be
   # accomplished by dividing the exponential moving average by the weight.
   # For example, after a single update, the moving average would be
   # (1-decay) * value. and the weight will be 1-decay, with their ratio
   # giving the value.
   # Make sure the weight is not updated until before r and d computation.
   with ops.control_dependencies([value]):
     weight_value = array_ops.constant(1., dtype=weight.dtype)
   new_var = moving_averages.assign_moving_average(
       var, value, self.renorm_momentum, zero_debias=False)
   new_weight = moving_averages.assign_moving_average(
       weight, weight_value, self.renorm_momentum, zero_debias=False)
   return new_var / new_weight
开发者ID:dansbecker,项目名称:tensorflow,代码行数:14,代码来源:normalization.py


示例19: __init__

    def __init__(self, value, decay,
                 truediv=True,
                 collections=None,
                 reduction_indices=None,
                 name=None):
        self.value = value
        self.reduction_indices = reduction_indices or [0]

        eps = 1e-8
        if truediv:
            div = math_ops.truediv
        else:
            div = math_ops.div
        if collections is None:
            collections = [ops.GraphKeys.VARIABLES]

        value_shape = value.get_shape().as_list()
        shape = []
        for dim in range(len(value_shape)):
            if dim in self.reduction_indices:
                shape.append(1)
            else:
                shape.append(value_shape[dim])

        with variable_scope.variable_op_scope(
                [value, decay], name, "MomentTracker") as scope:

            mean_x_weight_var = variable_scope.get_variable("mean_x_weight", trainable=False, collections=collections,
                initializer=init_ops.zeros_initializer(shape, dtype=value.dtype))

            variance_x_weight_var = variable_scope.get_variable("variance_x_weight", trainable=False,
                collections=collections, initializer=init_ops.zeros_initializer(shape, dtype=value.dtype))

            weight_var = variable_scope.get_variable("weight", trainable=False, collections=collections,
                initializer=init_ops.zeros_initializer([1], dtype=tf.float32))

            self.tracked_mean = div(mean_x_weight_var, weight_var + eps)
            self.tracked_variance = div(variance_x_weight_var, weight_var + eps)

            self.batch_mean, self.batch_variance = tf.nn.moments(self.value, axes=self.reduction_indices,
                                                                 shift=self.tracked_mean, keep_dims=True)

            mean_numerator = assign_moving_average(mean_x_weight_var, self.batch_mean, decay)
            variance_numerator = assign_moving_average(variance_x_weight_var, self.batch_variance, decay)
            denominator = assign_moving_average(weight_var, 1.0, decay)

            self.update_mean = div(mean_numerator, denominator + eps, name=scope.name)
            self.update_variance = div(variance_numerator, denominator + eps, name=scope.name)
开发者ID:NoahDStein,项目名称:NeuralNetSandbox,代码行数:48,代码来源:moment_tracker.py


示例20: weighted_resample

def weighted_resample(inputs, weights, overall_rate, scope=None,
                      mean_decay=0.999, warmup=10, seed=None):
  """Performs an approximate weighted resampling of `inputs`.

  This method chooses elements from `inputs` where each item's rate of
  selection is proportional to its value in `weights`, and the average
  rate of selection across all inputs (and many invocations!) is
  `overall_rate`.

  Args:
    inputs: A list of tensors whose first dimension is `batch_size`.
    weights: A `[batch_size]`-shaped tensor with each batch member's weight.
    overall_rate: Desired overall rate of resampling.
    scope: Scope to use for the op.
    mean_decay: How quickly to decay the running estimate of the mean weight.
    warmup: Until the resulting tensor has been evaluated `warmup`
      times, the resampling menthod uses the true mean over all calls
      as its weight estimate, rather than a decayed mean.
    seed: Random seed.

  Returns:
    A list of tensors exactly like `inputs`, but with an unknown (and
      possibly zero) first dimension.
    A tensor containing the effective resampling rate used for each output.

  """
  # Algorithm: Just compute rates as weights/mean_weight *
  # overall_rate. This way the average weight corresponds to the
  # overall rate, and a weight twice the average has twice the rate,
  # etc.
  with ops.name_scope(scope, 'weighted_resample', inputs) as opscope:
    # First: Maintain a running estimated mean weight, with decay
    # adjusted (by also maintaining an invocation count) during the
    # warmup period so that at the beginning, there aren't too many
    # zeros mixed in, throwing the average off.

    with variable_scope.variable_scope(scope, 'estimate_mean', inputs):
      count_so_far = variable_scope.get_local_variable(
          'resample_count', initializer=0)

      estimated_mean = variable_scope.get_local_variable(
          'estimated_mean', initializer=0.0)

      count = count_so_far.assign_add(1)
      real_decay = math_ops.minimum(
          math_ops.truediv((count - 1), math_ops.minimum(count, warmup)),
          mean_decay)

      batch_mean = math_ops.reduce_mean(weights)
      mean = moving_averages.assign_moving_average(
          estimated_mean, batch_mean, real_decay, zero_debias=False)

    # Then, normalize the weights into rates using the mean weight and
    # overall target rate:
    rates = weights * overall_rate / mean

    results = resample_at_rate([rates] + inputs, rates,
                               scope=opscope, seed=seed, back_prop=False)

    return (results[1:], results[0])
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:60,代码来源:resample.py



注:本文中的tensorflow.python.training.moving_averages.assign_moving_average函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python queue_runner.add_queue_runner函数代码示例发布时间:2022-05-27
下一篇:
Python monitored_session._HookedSession函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap