• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python backend.get_value函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.keras.backend.get_value函数的典型用法代码示例。如果您正苦于以下问题:Python get_value函数的具体用法?Python get_value怎么用?Python get_value使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了get_value函数的16个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: on_epoch_end

  def on_epoch_end(self, epoch, logs=None):
    logs = logs or {}
    logs['lr'] = K.get_value(self.model.optimizer.lr)
    current = logs.get(self.monitor)
    if current is None:
      logging.warning('Reduce LR on plateau conditioned on metric `%s` '
                      'which is not available. Available metrics are: %s',
                      self.monitor, ','.join(list(logs.keys())))

    else:
      if self.in_cooldown():
        self.cooldown_counter -= 1
        self.wait = 0

      if self.monitor_op(current, self.best):
        self.best = current
        self.wait = 0
      elif not self.in_cooldown():
        self.wait += 1
        if self.wait >= self.patience:
          old_lr = float(K.get_value(self.model.optimizer.lr))
          if old_lr > self.min_lr:
            new_lr = old_lr * self.factor
            new_lr = max(new_lr, self.min_lr)
            K.set_value(self.model.optimizer.lr, new_lr)
            if self.verbose > 0:
              print('\nEpoch %05d: ReduceLROnPlateau reducing learning '
                    'rate to %s.' % (epoch + 1, new_lr))
            self.cooldown_counter = self.cooldown
            self.wait = 0
开发者ID:xman,项目名称:tensorflow,代码行数:30,代码来源:callbacks.py


示例2: get_config

 def get_config(self):
   config = {
       'lr': float(K.get_value(self.lr)),
       'decay': float(K.get_value(self.decay)),
       'epsilon': self.epsilon
   }
   base_config = super(Adagrad, self).get_config()
   return dict(list(base_config.items()) + list(config.items()))
开发者ID:sonnyhu,项目名称:tensorflow,代码行数:8,代码来源:optimizers.py


示例3: show_batch_normalization_layer

def show_batch_normalization_layer(layer):
    """Serialize batch normalization layer to dict"""
    moving_mean = K.get_value(layer.moving_mean)
    moving_variance = K.get_value(layer.moving_variance)
    result = {}
    result['moving_mean'] = encode_floats(moving_mean)
    result['moving_variance'] = encode_floats(moving_variance)
    if layer.center:
        beta = K.get_value(layer.beta)
        result['beta'] = encode_floats(beta)
    if layer.scale:
        gamma = K.get_value(layer.gamma)
        result['gamma'] = encode_floats(gamma)
    return result
开发者ID:Telecommunication-Telemedia-Assessment,项目名称:V-BMS360,代码行数:14,代码来源:convert_model.py


示例4: on_epoch_begin

  def on_epoch_begin(self, epoch, logs=None):
    # TODO(yashkatariya): Change the property checking when the learning
    # rate attribute is unified across all TF Optimizers.
    if isinstance(self.model.optimizer, optimizers.TFOptimizer):
      if not hasattr(self.model.optimizer.optimizer, '_lr') and not hasattr(
          self.model.optimizer.optimizer, '_learning_rate'):
        raise ValueError(
            'TF Optimizer must have a "_lr" or "_learning_rate" attribute.')
      else:
        opt = self.model.optimizer.optimizer
        if hasattr(opt, '_lr'):
          opt_lr = Variable(opt._lr)  # pylint: disable=protected-access
        elif hasattr(opt, '_learning_rate'):
          opt_lr = Variable(opt._learning_rate)  # pylint: disable=protected-access
    else:
      if not hasattr(self.model.optimizer, 'lr'):
        raise ValueError('Optimizer must have a "lr" attribute.')
      else:
        opt = self.model.optimizer
        opt_lr = opt.lr

    try:  # new API
      lr = float(K.get_value(opt_lr))
      lr = self.schedule(epoch, lr)
    except TypeError:  # Support for old API for backward compatibility
      lr = self.schedule(epoch)
    if not isinstance(lr, (float, np.float32, np.float64)):
      raise ValueError('The output of the "schedule" function '
                       'should be float.')
    K.set_value(opt_lr, lr)
    if self.verbose > 0:
      print('\nEpoch %05d: LearningRateScheduler reducing learning '
            'rate to %s.' % (epoch + 1, lr))
开发者ID:LongJun123456,项目名称:tensorflow,代码行数:33,代码来源:callbacks.py


示例5: testOptimizerWithCallbacks

  def testOptimizerWithCallbacks(self):
    np.random.seed(1331)
    input_np = np.random.random((10, 3))
    output_np = np.random.random((10, 4))
    a = input_layer.Input(shape=(3,), name='input_a')
    model = sequential.Sequential()
    model.add(core.Dense(4, name='dense'))
    model.add(core.Dropout(0.5, name='dropout'))
    model(a)
    optimizer = gradient_descent.SGD(learning_rate=0.1)
    model.compile(optimizer, loss='mse', metrics=['mae'])
    # This does not reduce the LR after the first epoch (due to low delta).
    cbks = [
        callbacks.ReduceLROnPlateau(
            monitor='val_loss', factor=0.1, min_delta=0, patience=1, cooldown=5)
    ]
    model.fit(
        input_np,
        output_np,
        batch_size=10,
        validation_data=(input_np, output_np),
        callbacks=cbks,
        epochs=2,
        verbose=0)
    self.assertAllClose(
        float(backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)

    # This should reduce the LR after the first epoch (due to high delta).
    cbks = [
        callbacks.ReduceLROnPlateau(
            monitor='val_loss',
            factor=0.1,
            min_delta=10,
            patience=1,
            cooldown=5)
    ]
    model.fit(
        input_np,
        output_np,
        batch_size=10,
        validation_data=(input_np, output_np),
        callbacks=cbks,
        epochs=2,
        verbose=2)
    self.assertAllClose(
        float(backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4)
开发者ID:aritratony,项目名称:tensorflow,代码行数:46,代码来源:optimizer_v2_test.py


示例6: _serialize_hyperparameter

 def _serialize_hyperparameter(self, hyperparameter_name):
   """Serialize a hyperparameter that can be a float, callable, or Tensor."""
   value = self._get_hyper(hyperparameter_name)
   if callable(value):
     return value()
   if isinstance(value, (ops.Tensor, tf_variables.Variable)):
     return backend.get_value(value)
   return value
开发者ID:aeverall,项目名称:tensorflow,代码行数:8,代码来源:optimizer_v2.py


示例7: _serialize_hyperparameter

 def _serialize_hyperparameter(self, hyperparameter_name):
   """Serialize a hyperparameter that can be a float, callable, or Tensor."""
   value = self._hyper[hyperparameter_name]
   if isinstance(value, learning_rate_schedule.LearningRateSchedule):
     return learning_rate_schedule.serialize(value)
   if callable(value):
     return value()
   if tensor_util.is_tensor(value):
     return backend.get_value(value)
   return value
开发者ID:aritratony,项目名称:tensorflow,代码行数:10,代码来源:optimizer_v2.py


示例8: _serialize_hyperparameter

 def _serialize_hyperparameter(self, hyperparameter_name):
   """Serialize a hyperparameter that can be a float, callable, or Tensor."""
   value = self._hyper[hyperparameter_name]
   if isinstance(value, learning_rate_schedule.LearningRateSchedule):
     return learning_rate_schedule.serialize(value)
   if callable(value):
     return value()
   if isinstance(value, (ops.Tensor, tf_variables.Variable,
                         distributed_values.TPUMirroredVariable)):
     return backend.get_value(value)
   return value
开发者ID:terrytangyuan,项目名称:tensorflow,代码行数:11,代码来源:optimizer_v2.py


示例9: test_save_weights_with_autocast_vars

  def test_save_weights_with_autocast_vars(self, strategy_fn, h5=False):
    with strategy_fn().scope():
      with policy.policy_scope('infer_float32_vars'):
        x = layers.Input(shape=(1,), batch_size=2, dtype=dtypes.float16)
        layer = AddLayer(assert_type=dtypes.float16)
        y = layer(x)
        y = math_ops.cast(y, dtypes.float32)
        model = models.Model(inputs=x, outputs=y)

    model.set_weights([np.array(100.)])
    x = np.ones((2, 1), dtype=np.float16)
    self.assertAllClose(backend.get_value(model(x)), x + 100.)
    suffix = '.h5' if h5 else ''
    weights_file = os.path.join(self.get_temp_dir(), 'weights' + suffix)
    model.save_weights(weights_file)

    model.set_weights([np.array(200.)])
    self.assertAllClose(backend.get_value(model(x)), x + 200.)
    model.load_weights(weights_file)
    self.assertAllClose(backend.get_value(model(x)), x + 100.)
    self.assertEqual(model.get_weights(), [np.array(100.)])
开发者ID:aritratony,项目名称:tensorflow,代码行数:21,代码来源:keras_test.py


示例10: on_epoch_begin

 def on_epoch_begin(self, epoch, logs=None):
   if not hasattr(self.model.optimizer, 'lr'):
     raise ValueError('Optimizer must have a "lr" attribute.')
   try:  # new API
     lr = float(K.get_value(self.model.optimizer.lr))
     lr = self.schedule(epoch, lr)
   except TypeError:  # Support for old API for backward compatibility
     lr = self.schedule(epoch)
   if not isinstance(lr, (float, np.float32, np.float64)):
     raise ValueError('The output of the "schedule" function '
                      'should be float.')
   K.set_value(self.model.optimizer.lr, lr)
   if self.verbose > 0:
     print('\nEpoch %05d: LearningRateScheduler reducing learning '
           'rate to %s.' % (epoch + 1, lr))
开发者ID:meteorcloudy,项目名称:tensorflow,代码行数:15,代码来源:callbacks.py


示例11: convert_all_kernels_in_model

def convert_all_kernels_in_model(model):
  """Converts all convolution kernels in a model from Theano to TensorFlow.

  Also works from TensorFlow to Theano.

  Arguments:
      model: target model for the conversion.
  """
  # Note: SeparableConvolution not included
  # since only supported by TF.
  conv_classes = {
      'Conv1D',
      'Conv2D',
      'Conv3D',
      'Conv2DTranspose',
  }
  to_assign = []
  for layer in model.layers:
    if layer.__class__.__name__ in conv_classes:
      original_kernel = K.get_value(layer.kernel)
      converted_kernel = convert_kernel(original_kernel)
      to_assign.append((layer.kernel, converted_kernel))
  K.batch_set_value(to_assign)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:23,代码来源:layer_utils.py


示例12: apply_mask

  def apply_mask(self, prediction_result):
    """Removes prediction output that corresponds to padded input."""
    padding_mask = K.get_value(self.padding_mask)
    assert len(padding_mask.shape) == 1

    if len(self.output_shape) == 1:
      prediction = np.take(prediction_result,
                           np.nonzero(
                               padding_mask[:len(prediction_result)]),
                           axis=0)
      if prediction.shape[0] == 1:
        prediction = np.squeeze(prediction, axis=0)
      return prediction

    else:
      predictions = []
      for i in range(len(self.output_shape)):
        prediction = prediction_result[i]
        prediction = np.take(prediction, np.nonzero(
            padding_mask[:len(prediction)]), axis=0)
        predictions.append(np.squeeze(prediction))

      return predictions
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:23,代码来源:partial_batch_padding_handler.py


示例13: test_save_weights_with_dynamic_loss_scaling

  def test_save_weights_with_dynamic_loss_scaling(self, strategy_fn):
    with context.eager_mode():
      strategy = strategy_fn()
      if (isinstance(strategy, mirrored_strategy.MirroredStrategy) and
          not context.executing_eagerly()):
        # TODO(b/121381184): Enable running the test in this case.
        return

      # Create and run model.
      with strategy.scope():
        x = layers.Input(shape=(2,), batch_size=2, dtype=dtypes.float32)
        y = AddLayer(assert_type=dtypes.float32)(x)
        model = models.Model(inputs=x, outputs=y)

        loss_scale = loss_scale_module.DynamicLossScale(
            initial_loss_scale=1., increment_period=2., multiplier=2.)
        opt = gradient_descent.SGD(1.)
        opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
        model.compile(optimizer=opt, loss='mse')
      # Run for 3 steps (6 examples with a batch size of 2)
      model.fit(np.zeros((6, 2)), np.zeros((6, 2)), batch_size=2)
      self.assertEqual(backend.get_value(loss_scale()), 2)
      self.assertEqual(backend.get_value(loss_scale._num_good_steps), 1)

      # Save model weights.
      save_prefix = os.path.join(self.get_temp_dir(), 'ckpt')
      model.save_weights(save_prefix)

      # Run model again for 1 step (2 examples with a batch size of 2)
      model.fit(np.zeros((2, 2)), np.zeros((2, 2)), batch_size=2)
      self.assertEqual(backend.get_value(loss_scale()), 4)
      self.assertEqual(backend.get_value(loss_scale._num_good_steps), 0)

      # Load model weights and ensure loss scale weights are restored.
      model.load_weights(save_prefix)
      self.assertEqual(backend.get_value(loss_scale()), 2)
      self.assertEqual(backend.get_value(loss_scale._num_good_steps), 1)
开发者ID:aritratony,项目名称:tensorflow,代码行数:37,代码来源:keras_test.py


示例14: test_dynamic_loss_scaling

  def test_dynamic_loss_scaling(self, strategy_fn, cloning=True):
    strategy = strategy_fn()
    initial_loss_scale = 2.
    batch_size = 4
    expected_gradient = backend.variable([initial_loss_scale / batch_size],
                                         dtype=dtypes.float16)
    # If this variable is set to True, the model below will have NaN gradients
    have_nan_gradients = backend.variable(False, dtype=dtypes.bool)
    with strategy.scope():
      with policy.policy_scope(policy.Policy('infer_float32_vars')):
        x = layers.Input(shape=(1,), batch_size=batch_size,
                         dtype=dtypes.float16)
        layer = AddLayer(assert_type=dtypes.float16)
        y = layer(x)
        identity_with_nan_grads = (
            mp_test_util.create_identity_with_nan_gradients_fn(
                have_nan_gradients))
        y = core.Lambda(identity_with_nan_grads)(y)
        identity_with_grad_check_fn = (
            mp_test_util.create_identity_with_grad_check_fn(
                expected_dtype=dtypes.float16,
                expected_gradient=expected_gradient))
        y = core.Lambda(identity_with_grad_check_fn)(y)
        y = math_ops.cast(y, dtypes.float32)
        model = models.Model(inputs=x, outputs=y)

        def loss_fn(y_true, y_pred):
          del y_true
          return math_ops.reduce_mean(y_pred)

        opt = gradient_descent.SGD(1.)
        loss_scale = loss_scale_module.DynamicLossScale(
            initial_loss_scale=initial_loss_scale, increment_period=2)
        opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
        model.compile(opt, loss=loss_fn, cloning=cloning)

    self.assertEqual(backend.eval(layer.v), 1)
    x = np.ones((batch_size, 1))
    y = np.ones((batch_size, 1))
    dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(batch_size)
    model.fit(dataset)
    # The variables starts with 1 and has a gradient of 1, so will go down by 1
    # each step.
    self.assertEqual(backend.eval(layer.v), 0)

    model.fit(dataset)
    self.assertEqual(backend.eval(layer.v), -1)

    # There have been two steps without NaNs, so the loss scale will double
    backend.set_value(expected_gradient,
                      backend.get_value(expected_gradient * 2))
    model.fit(dataset)
    self.assertEqual(backend.eval(layer.v), -2)

    # Next test with NaN gradients.
    backend.set_value(have_nan_gradients, True)
    model.fit(dataset)
    # Variable should not be updated
    self.assertEqual(backend.eval(layer.v), -2)

    # Test with finite gradients again
    backend.set_value(have_nan_gradients, False)
    # The loss scale will be halved due to the NaNs, so the gradient will also
    # be halved
    backend.set_value(expected_gradient,
                      backend.get_value(expected_gradient / 2))
    model.fit(dataset)
    self.assertEqual(backend.eval(layer.v), -3)
开发者ID:aritratony,项目名称:tensorflow,代码行数:68,代码来源:keras_test.py


示例15: iterator_predict_loop

def iterator_predict_loop(model, inputs, steps, verbose=0):
  """Predict function for eager execution when input is dataset iterator.

  Arguments:
      model: Instance of `Model`.
      inputs: Input dataset iterator.
      steps: Total number of steps (batches of samples) before declaring
          `_predict_loop` finished.
      verbose: Verbosity mode.

  Returns:
      Array of predictions (if the model has a single output)
      or list of arrays of predictions (if the model has multiple outputs).

  Raises:
      ValueError: In case of mismatch between given number of inputs and
        expectations of the model.
  """
  assert isinstance(inputs, iterator_ops.EagerIterator)
  if not isinstance(inputs.output_shapes,
                    (list, tuple)) or len(inputs.output_shapes) > 3:
    raise ValueError(
        'Please provide data as a list or tuple of 1, 2, or 3 elements '
        ' - `(input)`, or `(input, target)`, or `(input, target,'
        'sample_weights)`. Received %s. We do not use the `target` or'
        '`sample_weights` value here.' % inputs.output_shapes)
  outs = []
  if verbose == 1:
    progbar = generic_utils.Progbar(target=steps)
  for step_index in range(steps):
    # Get data from the iterator.
    try:
      next_element = inputs.get_next()
    except errors.OutOfRangeError:
      logging.warning(
          'Your dataset iterator ran out of data; interrupting prediction. '
          'Make sure that your dataset can generate at least `steps` batches '
          '(in this case, %d batches). You may need to use the repeat() '
          'function when building your dataset.', steps)
      break

    # expects a tuple, where first element of tuple represents inputs
    x = next_element[0]

    # Validate and standardize data.
    x, _, _ = model._standardize_user_data(x)
    x = training_utils.cast_if_floating_dtype(x)

    if isinstance(x, list) and len(x) == 1:
      x = x[0]

    if model._expects_training_arg:
      batch_outs = model.call(x, training=False)
    else:
      batch_outs = model.call(x)
    if not isinstance(batch_outs, list):
      batch_outs = [batch_outs]

    # We collect the results from every step and then concatenate them once
    # in the end. This is an expensive process. We are doing this because we
    # do not know the number of samples beforehand.
    if step_index == 0:
      for _ in batch_outs:
        outs.append([])
    for i, batch_out in enumerate(batch_outs):
      outs[i].append(backend.get_value(batch_out))

    if verbose == 1:
      progbar.update(step_index + 1)
  for i, out in enumerate(outs):
    outs[i] = np.concatenate(tuple(out), axis=0)
  if len(outs) == 1:
    return outs[0]
  return outs
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:74,代码来源:training_eager.py


示例16: iterator_predict_loop

def iterator_predict_loop(model, inputs, steps, verbose=0):
  """Predict function for eager execution when input is dataset iterator.

  Arguments:
      model: Instance of `Model`.
      inputs: Input dataset iterator.
      steps: Total number of steps (batches of samples) before declaring
          `_predict_loop` finished.
      verbose: Verbosity mode.

  Returns:
      Array of predictions (if the model has a single output)
      or list of arrays of predictions (if the model has multiple outputs).

  Raises:
      ValueError: In case of mismatch between given number of inputs and
        expectations of the model.
  """
  assert isinstance(inputs, iterator_ops.EagerIterator)
  outs = []
  if verbose == 1:
    progbar = generic_utils.Progbar(target=steps)
  for step_index in range(steps):
    # Get data from the iterator.
    try:
      next_element = inputs.get_next()
    except errors.OutOfRangeError:
      logging.warning(
          'Your dataset iterator ran out of data; '
          'interrupting prediction. Make sure that your '
          'dataset can generate at least `steps` '
          'batches (in this case, %d batches).', steps)
      break

    if not isinstance(next_element, (list, tuple)) or len(next_element) != 2:
      raise ValueError(
          'Please provide data as a list or tuple of 2 elements '
          ' - input and target pair. Received %s. We do not use the '
          '`target` value here.' % next_element)
    x, _ = next_element

    # Validate and standardize data.
    x, _, _ = model._standardize_user_data(x)

    if model._expects_training_arg:
      batch_outs = model.call(x[0] if len(x) == 1 else x, training=False)
    else:
      batch_outs = model.call(x[0] if len(x) == 1 else x)
    if not isinstance(batch_outs, list):
      batch_outs = [batch_outs]

    # We collect the results from every step and then concatenate them once
    # in the end. This is an expensive process. We are doing this because we
    # do not know the number of samples beforehand.
    if step_index == 0:
      for _ in batch_outs:
        outs.append([])
    for i, batch_out in enumerate(batch_outs):
      outs[i].append(backend.get_value(batch_out))

    if verbose == 1:
      progbar.update(step_index + 1)
  for i, out in enumerate(outs):
    outs[i] = np.concatenate(tuple(out), axis=0)
  if len(outs) == 1:
    return outs[0]
  return outs
开发者ID:didukhle,项目名称:tensorflow,代码行数:67,代码来源:training_eager.py



注:本文中的tensorflow.python.keras.backend.get_value函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python backend.image_data_format函数代码示例发布时间:2022-05-27
下一篇:
Python backend.get_session函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap