• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python backend.batch_get_value函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.keras.backend.batch_get_value函数的典型用法代码示例。如果您正苦于以下问题:Python batch_get_value函数的具体用法?Python batch_get_value怎么用?Python batch_get_value使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了batch_get_value函数的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: set_weights

  def set_weights(self, weights):
    """Sets the weights of the optimizer, from Numpy arrays.

    Should only be called after computing the gradients
    (otherwise the optimizer has no weights).

    Arguments:
        weights: a list of Numpy arrays. The number
            of arrays and their shape must match
            number of the dimensions of the weights
            of the optimizer (i.e. it should match the
            output of `get_weights`).

    Raises:
        ValueError: in case of incompatible weight shapes.
    """
    params = self.weights
    if len(params) != len(weights):
      raise ValueError(
          'Length of the specified weight list (' + str(len(weights)) +
          ') does not match the number of weights '
          'of the optimizer (' + str(len(params)) + ')')
    weight_value_tuples = []
    param_values = K.batch_get_value(params)
    for pv, p, w in zip(param_values, params, weights):
      if pv.shape != w.shape:
        raise ValueError(
            'Optimizer weight shape ' + str(pv.shape) + ' not compatible with '
            'provided weight shape ' + str(w.shape))
      weight_value_tuples.append((p, w))
    K.batch_set_value(weight_value_tuples)
开发者ID:sonnyhu,项目名称:tensorflow,代码行数:31,代码来源:optimizers.py


示例2: save_weights_to_hdf5_group

def save_weights_to_hdf5_group(f, layers):
  """Saves the weights of a list of layers to a HDF5 group.

  Arguments:
      f: HDF5 group.
      layers: List of layer instances.
  """
  from tensorflow.python.keras import __version__ as keras_version  # pylint: disable=g-import-not-at-top

  save_attributes_to_hdf5_group(
      f, 'layer_names', [layer.name.encode('utf8') for layer in layers])
  f.attrs['backend'] = K.backend().encode('utf8')
  f.attrs['keras_version'] = str(keras_version).encode('utf8')

  for layer in layers:
    g = f.create_group(layer.name)
    weights = _legacy_weights(layer)
    weight_values = K.batch_get_value(weights)
    weight_names = [w.name.encode('utf8') for w in weights]
    save_attributes_to_hdf5_group(g, 'weight_names', weight_names)
    for name, val in zip(weight_names, weight_values):
      param_dset = g.create_dataset(name, val.shape, dtype=val.dtype)
      if not val.shape:
        # scalar
        param_dset[()] = val
      else:
        param_dset[:] = val
开发者ID:aritratony,项目名称:tensorflow,代码行数:27,代码来源:hdf5_format.py


示例3: get_weights

  def get_weights(self):
    """Returns the current value of the weights of the optimizer.

    Returns:
        A list of numpy arrays.
    """
    return K.batch_get_value(self.weights)
开发者ID:sonnyhu,项目名称:tensorflow,代码行数:7,代码来源:optimizers.py


示例4: save_weights_to_hdf5_group

def save_weights_to_hdf5_group(f, layers):
  """Saves the weights of a list of layers to a HDF5 group.

  Arguments:
      f: HDF5 group.
      layers: List of layer instances.
  """
  from tensorflow.python.keras import __version__ as keras_version  # pylint: disable=g-import-not-at-top

  save_attributes_to_hdf5_group(
      f, 'layer_names', [layer.name.encode('utf8') for layer in layers])
  f.attrs['backend'] = K.backend().encode('utf8')
  f.attrs['keras_version'] = str(keras_version).encode('utf8')

  for layer in layers:
    g = f.create_group(layer.name)
    symbolic_weights = layer.weights
    weight_values = K.batch_get_value(symbolic_weights)
    weight_names = []
    for i, (w, val) in enumerate(zip(symbolic_weights, weight_values)):
      if hasattr(w, 'name') and w.name:
        name = str(w.name)
      else:
        name = 'param_' + str(i)
      weight_names.append(name.encode('utf8'))
    save_attributes_to_hdf5_group(g, 'weight_names', weight_names)
    for name, val in zip(weight_names, weight_values):
      param_dset = g.create_dataset(name, val.shape, dtype=val.dtype)
      if not val.shape:
        # scalar
        param_dset[()] = val
      else:
        param_dset[:] = val
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:33,代码来源:saving.py


示例5: set_weights

 def set_weights(self, weights):
   params = self.weights
   if len(params) != len(weights):
     raise ValueError(
         "You called `set_weights(weights)` on optimizer " + self._name +
         " with a  weight list of length " + str(len(weights)) +
         ", but the optimizer was expecting " + str(len(params)) +
         " weights. Provided weights: " + str(weights)[:50] + "...")
   if not params:
     return
   weight_value_tuples = []
   param_values = backend.batch_get_value(params)
   for pv, p, w in zip(param_values, params, weights):
     if pv.shape != w.shape:
       raise ValueError("Optimizer weight shape " + str(pv.shape) +
                        " not compatible with "
                        "provided weight shape " + str(w.shape))
     weight_value_tuples.append((p, w))
   backend.batch_set_value(weight_value_tuples)
开发者ID:terrytangyuan,项目名称:tensorflow,代码行数:19,代码来源:optimizer_v2.py


示例6: save_weights_to_hdf5_group

def save_weights_to_hdf5_group(f, layers):
  """Saves the weights of a list of layers to a HDF5 group.

  Arguments:
      f: HDF5 group.
      layers: List of layer instances.
  """
  from tensorflow.python.keras import __version__ as keras_version  # pylint: disable=g-import-not-at-top

  save_attributes_to_hdf5_group(
      f, 'layer_names', [layer.name.encode('utf8') for layer in layers])
  f.attrs['backend'] = K.backend().encode('utf8')
  f.attrs['keras_version'] = str(keras_version).encode('utf8')

  # On TPUs, modifying the graph between session.runs() triggers some expensive
  # recompilation overhead. To avoid this, we build up the full set of tensors
  # to save before fetching weights, thus only modifying the graph once.
  layer_weights_dict = {}
  for layer in layers:
    layer_weights_dict[layer.name] = [ops.convert_to_tensor(w)
                                      for w in layer.weights]

  for layer in layers:
    g = f.create_group(layer.name)
    symbolic_weights = layer_weights_dict[layer.name]
    weight_values = K.batch_get_value(symbolic_weights)
    weight_names = []
    for i, (w, val) in enumerate(zip(symbolic_weights, weight_values)):
      if hasattr(w, 'name') and w.name:
        name = str(w.name)
      else:
        name = 'param_' + str(i)
      weight_names.append(name.encode('utf8'))
    save_attributes_to_hdf5_group(g, 'weight_names', weight_names)
    for name, val in zip(weight_names, weight_values):
      param_dset = g.create_dataset(name, val.shape, dtype=val.dtype)
      if not val.shape:
        # scalar
        param_dset[()] = val
      else:
        param_dset[:] = val
开发者ID:rmlarsen,项目名称:tensorflow,代码行数:41,代码来源:hdf5_format.py


示例7: save_optimizer_weights_to_hdf5_group

def save_optimizer_weights_to_hdf5_group(hdf5_group, optimizer):
  """Saves optimizer weights of a optimizer to a HDF5 group.

  Arguments:
      hdf5_group: HDF5 group.
      optimizer: optimizer instance.
  """

  symbolic_weights = getattr(optimizer, 'weights')
  if symbolic_weights:
    weights_group = hdf5_group.create_group('optimizer_weights')
    weight_names = [str(w.name).encode('utf8') for w in symbolic_weights]
    save_attributes_to_hdf5_group(weights_group, 'weight_names', weight_names)
    weight_values = K.batch_get_value(symbolic_weights)
    for name, val in zip(weight_names, weight_values):
      param_dset = weights_group.create_dataset(
          name, val.shape, dtype=val.dtype)
      if not val.shape:
        # scalar
        param_dset[()] = val
      else:
        param_dset[:] = val
开发者ID:rmlarsen,项目名称:tensorflow,代码行数:22,代码来源:hdf5_format.py


示例8: get_weights

 def get_weights(self):
   params = self.weights
   return backend.batch_get_value(params)
开发者ID:terrytangyuan,项目名称:tensorflow,代码行数:3,代码来源:optimizer_v2.py


示例9: save_model

def save_model(model, filepath, overwrite=True, include_optimizer=True):
  """Saves a model to a HDF5 file.

  The saved model contains:
      - the model's configuration (topology)
      - the model's weights
      - the model's optimizer's state (if any)

  Thus the saved model can be reinstantiated in
  the exact same state, without any of the code
  used for model definition or training.

  Arguments:
      model: Keras model instance to be saved.
      filepath: One of the following:
          - String, path where to save the model
          - `h5py.File` object where to save the model
      overwrite: Whether we should overwrite any existing
          model at the target location, or instead
          ask the user with a manual prompt.
      include_optimizer: If True, save optimizer's state together.

  Raises:
      ImportError: if h5py is not available.
  """

  if h5py is None:
    raise ImportError('`save_model` requires h5py.')

  from tensorflow.python.keras import __version__ as keras_version  # pylint: disable=g-import-not-at-top

  if not isinstance(filepath, h5py.File):
    # If file exists and should not be overwritten.
    if not overwrite and os.path.isfile(filepath):
      proceed = ask_to_proceed_with_overwrite(filepath)
      if not proceed:
        return

    f = h5py.File(filepath, mode='w')
    opened_new_file = True
  else:
    f = filepath
    opened_new_file = False

  try:
    f.attrs['keras_version'] = str(keras_version).encode('utf8')
    f.attrs['backend'] = K.backend().encode('utf8')
    f.attrs['model_config'] = json.dumps(
        {
            'class_name': model.__class__.__name__,
            'config': model.get_config()
        },
        default=serialization.get_json_type).encode('utf8')

    model_weights_group = f.create_group('model_weights')
    model_layers = model.layers
    save_weights_to_hdf5_group(model_weights_group, model_layers)

    if include_optimizer and model.optimizer:
      if isinstance(model.optimizer, optimizers.TFOptimizer):
        logging.warning(
            'TensorFlow optimizers do not '
            'make it possible to access '
            'optimizer attributes or optimizer state '
            'after instantiation. '
            'As a result, we cannot save the optimizer '
            'as part of the model save file.'
            'You will have to compile your model again after loading it. '
            'Prefer using a Keras optimizer instead '
            '(see keras.io/optimizers).')
      else:
        f.attrs['training_config'] = json.dumps(
            {
                'optimizer_config': {
                    'class_name': model.optimizer.__class__.__name__,
                    'config': model.optimizer.get_config()
                },
                'loss': model.loss,
                'metrics': model.metrics,
                'sample_weight_mode': model.sample_weight_mode,
                'loss_weights': model.loss_weights,
            },
            default=serialization.get_json_type).encode('utf8')

        # Save optimizer weights.
        symbolic_weights = getattr(model.optimizer, 'weights')
        if symbolic_weights:
          optimizer_weights_group = f.create_group('optimizer_weights')
          weight_values = K.batch_get_value(symbolic_weights)
          weight_names = []
          for w, val in zip(symbolic_weights, weight_values):
            name = str(w.name)
            weight_names.append(name.encode('utf8'))
          optimizer_weights_group.attrs['weight_names'] = weight_names
          for name, val in zip(weight_names, weight_values):
            param_dset = optimizer_weights_group.create_dataset(
                name, val.shape, dtype=val.dtype)
            if not val.shape:
              # scalar
              param_dset[()] = val
#.........这里部分代码省略.........
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:101,代码来源:saving.py


示例10: experimental_tpu_predict_loop


#.........这里部分代码省略.........
      dataset = dataset.prefetch(prefetch_buffer)

  current_strategy = model._distribution_strategy
  iterator = distributed_training_utils.get_iterator(dataset, current_strategy)

  scope = distributed_training_utils.distributed_scope(
      strategy=current_strategy, learning_phase=0)
  scope.__enter__()

  out_labels = model.output_names
  step_fn = _make_step_fn(model, ModeKeys.PREDICT, current_strategy, out_labels)

  # Add initial dummy values for outputs.
  initial_loop_values = {}
  batch_dimension = distributed_training_utils.get_batch_dimension(iterator)
  for name, tensor in zip(model.output_names, model.outputs):
    # TODO(priyag): This is a workaround as we do not know the batch dimension
    # of the model's output at this point.
    shape = tensor_shape.TensorShape(tensor.shape.dims)
    shape.dims = [batch_dimension] + shape.dims[1:]
    initial_loop_values[name] = array_ops.zeros(shape, tensor.dtype)

  # TODO(priyag, sourabhbajaj): Support steps_per_run if/when we add outfeed.
  ctx = current_strategy.extended.experimental_run_steps_on_iterator(
      step_fn, iterator, iterations=1,
      initial_loop_values=initial_loop_values)

  predict_op = ctx.run_op
  output_tensors = ctx.last_step_outputs

  if verbose == 1:
    progbar = Progbar(target=steps)

  if model._compile_distribution:
    distributed_training_utils._copy_weights_to_distributed_model(model, mode)

  distributed_training_utils._reset_metrics(model)

  callbacks = cbks.configure_callbacks(
      callbacks,
      model,
      do_validation=False,
      epochs=1,
      steps_per_epoch=steps,
      verbose=verbose,
      count_mode='steps',
      mode=mode)
  callbacks._call_begin_hook(mode)

  # Since we do not know how many samples we will see, we cannot pre-allocate
  # the returned Numpy arrays. Instead, we store one array per batch seen
  # and concatenate them upon returning.
  unconcatenated_outs = [[] for _ in model.outputs]
  if steps is not None:
    target_steps = steps
  else:
    target_steps = np.inf

  current_step = 0
  while current_step < target_steps:
    batch_logs = {'batch': current_step, 'size': 1}
    callbacks._call_batch_hook(mode, 'begin', current_step, batch_logs)
    try:
      _, batch_outs = K.batch_get_value([predict_op, output_tensors])
    except errors.OutOfRangeError:
      if steps is not None:
        warning_msg = 'Make sure that your dataset can generate at least '
        '`steps` batches (in this case, {} batches).'.format(steps)
      else:
        warning_msg = 'Number of steps ran: {} steps'.format(current_step)

      logging.warning('Your dataset iterator ran out of data; '
                      'interrupting evaluation. ' + warning_msg)
      break

    # TODO(priyag): maybe need to unwrap the outputs first for MirroredStrategy.
    for i, label in enumerate(model.output_names):
      unconcatenated_outs[i].extend(batch_outs[label])
    batch_logs = cbks.make_logs(model, batch_logs, batch_outs, mode)
    callbacks._call_batch_hook(mode, 'end', current_step, batch_logs)
    if verbose >= 1:
      progbar.update(current_step + 1)
    current_step += 1

  callbacks._call_end_hook(mode)

  scope.__exit__(None, None, None)

  if len(unconcatenated_outs) == 1:
    prediction_result = np.concatenate(unconcatenated_outs[0], axis=0)
  else:
    prediction_result = [
        np.concatenate(unconcatenated_outs[i], axis=0)
        for i in range(len(unconcatenated_outs))
    ]

  if padding_handler:
    prediction_result = padding_handler.apply_mask(prediction_result)

  return prediction_result
开发者ID:kylin9872,项目名称:tensorflow,代码行数:101,代码来源:training_distributed.py


示例11: experimental_tpu_test_loop

def experimental_tpu_test_loop(model,
                               dataset,
                               verbose=0,
                               steps=None,
                               callbacks=None):
  """Test loop for evaluating with TPU DistributionStrategy.

  Arguments:
      model: Keras Model instance.
      dataset: Dataset for input data.
      verbose: Integer, Verbosity mode 0 or 1.
      steps: Total number of steps (batches of samples)
          before declaring predictions finished.
          Ignored with the default value of `None`.
      callbacks: List of callbacks to be called during training

  Returns:
      Scalar loss (if the model has a single output and no metrics)
      or list of scalars (if the model has multiple outputs
      and/or metrics). The attribute `model.metrics_names` will give you
      the display labels for the outputs.
  """
  mode = ModeKeys.TEST
  current_strategy = model._distribution_strategy
  iterator = distributed_training_utils.get_iterator(dataset,
                                                     current_strategy)
  steps = training_utils.infer_steps_for_dataset(dataset, steps,
                                                 steps_name='steps')

  scope = distributed_training_utils.distributed_scope(
      strategy=current_strategy, learning_phase=0)
  scope.__enter__()

  out_labels = model.metrics_names
  step_fn = _make_step_fn(model, ModeKeys.TEST, current_strategy, out_labels)

  # Add initial dummy values for loss and other metric tensors.
  initial_loop_values = {}
  initial_loop_values['loss'] = constant_op.constant(1e7)
  for name in model.metrics_names[1:]:
    tensor = model._all_stateful_metrics_tensors[name]
    initial_loop_values[name] = array_ops.zeros(tensor.shape, tensor.dtype)

  # TODO(priyag): Use steps_per_run when we use new metrics as they will
  # allow handling metric computation at each step using variables.
  ctx = current_strategy.extended.experimental_run_steps_on_iterator(
      step_fn, iterator, iterations=1,
      initial_loop_values=initial_loop_values)

  test_op = ctx.run_op
  output_tensors = ctx.last_step_outputs

  if verbose == 1:
    progbar = Progbar(target=steps)

  if model._compile_distribution:
    distributed_training_utils._copy_weights_to_distributed_model(model, mode)

  distributed_training_utils._reset_metrics(model)

  callbacks = cbks.configure_callbacks(
      callbacks,
      model,
      do_validation=False,
      epochs=1,
      steps_per_epoch=steps,
      verbose=verbose,
      count_mode='steps',
      mode=ModeKeys.TEST)
  callbacks._call_begin_hook(mode)

  outs = [0.] * len(model.metrics_names)
  if steps is not None:
    target_steps = steps
  else:
    target_steps = np.inf

  current_step = 0
  while current_step < target_steps:
    batch_logs = {'batch': current_step, 'size': 1}
    callbacks._call_batch_hook(mode, 'begin', current_step, batch_logs)
    try:
      _, batch_outs = K.batch_get_value([test_op, output_tensors])
    except errors.OutOfRangeError:
      if steps is not None:
        warning_msg = 'Make sure that your dataset can generate at least '
        '`steps` batches (in this case, {} batches).'.format(steps)
      else:
        warning_msg = 'Number of steps ran: {} steps'.format(current_step)

      logging.warning('Your dataset iterator ran out of data; '
                      'interrupting evaluation. ' + warning_msg)
      target_steps = current_step
      break
    for i, label in enumerate(model.metrics_names):
      if i == 0:
        # Loss is stateless metrics.
        outs[i] += batch_outs[label]
      else:
        # For all stateful metrics, the aggregation is handled by mirrored vars.
#.........这里部分代码省略.........
开发者ID:kylin9872,项目名称:tensorflow,代码行数:101,代码来源:training_distributed.py


示例12: experimental_tpu_fit_loop


#.........这里部分代码省略.........
  do_validation = bool(validation_steps)

  if model._compile_distribution:
    distributed_training_utils._copy_weights_to_distributed_model(model, mode)

  callbacks = cbks.configure_callbacks(
      callbacks,
      model,
      do_validation=do_validation,
      epochs=epochs,
      steps_per_epoch=steps_per_epoch,
      verbose=verbose,
      count_mode='steps',
      mode=mode)

  # Calculate the steps each time on the device.
  if use_steps:
    steps_to_run = ([current_strategy.extended.steps_per_run] *
                    (steps_per_epoch //
                     current_strategy.extended.steps_per_run))
    if steps_per_epoch % current_strategy.extended.steps_per_run:
      steps_to_run.append(
          steps_per_epoch % current_strategy.extended.steps_per_run)
    target_steps = len(steps_to_run)
  else:
    target_steps = np.inf

  callbacks._call_begin_hook(mode)
  for epoch in range(initial_epoch, epochs):
    distributed_training_utils._reset_metrics(model)
    callbacks.on_epoch_begin(epoch)
    epoch_logs = {}
    step_index = 0
    prev_step_count = None
    current_step = 0
    while current_step < target_steps:
      step_count = steps_to_run[current_step] if use_steps else 1
      batch_logs = {'batch': step_index, 'size': 1, 'num_steps': step_count}
      callbacks._call_batch_hook(mode, 'begin', step_index, batch_logs)
      if prev_step_count is None or step_count != prev_step_count:
        steps_per_run.load(step_count, K.get_session())
        prev_step_count = step_count
      try:
        _, outputs = K.batch_get_value([train_op, output_tensors])
      except errors.OutOfRangeError:
        if use_steps:
          logging.warning('Your dataset iterator ran out of data; '
                          'interrupting training. Make sure that your dataset '
                          'can generate at least `steps_per_epoch * epochs` '
                          'batches (in this case, %d batches).' %
                          steps_per_epoch * epochs)
        else:
          target_steps = current_step
          logging.info('Dataset iterator ran out of data. Inferring the '
                       'value of `steps_per_epoch` as %s  .' % target_steps)
          distributed_training_utils.initialize_iterator(iterator,
                                                         current_strategy)
        break

      batch_logs.update(outputs)
      callbacks._call_batch_hook(mode, 'end', step_index, batch_logs)
      step_index = step_index + step_count
      current_step += 1

      if callbacks.model.stop_training:
        break

    if (do_validation and
        training_utils.should_run_validation(validation_freq, epoch)):
      logging.info('Running validation at fit epoch: %s', epoch)

      if model._compile_distribution:
        # Since we create a new clone from the original model we need to copy
        # the weights back to the original model before we can run validation.
        distributed_training_utils._copy_weights_to_original_model(
            model, ModeKeys.TRAIN)

      val_outs = experimental_tpu_test_loop(  # pylint: disable=undefined-variable
          model,
          val_dataset,
          steps=validation_steps,
          verbose=verbose,
          callbacks=callbacks)
      if not isinstance(val_outs, list):
        val_outs = [val_outs]
      # Same labels assumed.
      for label, val_out in zip(out_labels, val_outs):
        epoch_logs['val_' + label] = val_out

    callbacks.on_epoch_end(epoch, epoch_logs)
    if callbacks.model.stop_training:
      break
  callbacks._call_end_hook(mode)

  if model._compile_distribution:
    # Copy the weights back from the replicated model to the original model.
    distributed_training_utils._copy_weights_to_original_model(
        model, ModeKeys.TRAIN)
  scope.__exit__(None, None, None)
  return model.history
开发者ID:kylin9872,项目名称:tensorflow,代码行数:101,代码来源:training_distributed.py



注:本文中的tensorflow.python.keras.backend.batch_get_value函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python backend.dtype函数代码示例发布时间:2022-05-27
下一篇:
Python data_utils.get_file函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap