• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.enable_eager_execution函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.enable_eager_execution函数的典型用法代码示例。如果您正苦于以下问题:Python enable_eager_execution函数的具体用法?Python enable_eager_execution怎么用?Python enable_eager_execution使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了enable_eager_execution函数的18个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: main

def main(_):
  tf.enable_eager_execution()
  # Ground-truth constants.
  true_w = [[-2.0], [4.0], [1.0]]
  true_b = [0.5]
  noise_level = 0.01

  # Training constants.
  batch_size = 64
  learning_rate = 0.1

  print("True w: %s" % true_w)
  print("True b: %s\n" % true_b)

  model = LinearModel()
  dataset = synthetic_dataset(true_w, true_b, noise_level, batch_size, 20)

  device = "gpu:0" if tfe.num_gpus() else "cpu:0"
  print("Using device: %s" % device)
  with tf.device(device):
    optimizer = tf.train.GradientDescentOptimizer(learning_rate)
    fit(model, dataset, optimizer, verbose=True, logdir=FLAGS.logdir)

  print("\nAfter training: w = %s" % model.variables[0].numpy())
  print("\nAfter training: b = %s" % model.variables[1].numpy())
开发者ID:didukhle,项目名称:tensorflow,代码行数:25,代码来源:linear_regression.py


示例2: main

def main(_):
  # Build the train and eval datasets from the MNIST data. Also return the
  # input shape which is constructed based on the `image_data_format`
  # i.e channels_first or channels_last.
  tf.enable_eager_execution()

  train_ds, eval_ds, input_shape = get_input_datasets()

  # Instantiate the MirroredStrategy object. If we don't specify `num_gpus` or
  # the `devices` argument then all the GPUs available on the machine are used.
  # TODO(priyag): Use `tf.distribute.MirroredStrategy` once available.
  strategy = mirrored_strategy.MirroredStrategy(['/gpu:0', '/cpu:0'])

  # Create and compile the model under Distribution strategy scope.
  # `fit`, `evaluate` and `predict` will be distributed based on the strategy
  # model was compiled with.
  with strategy.scope():
    model = get_model(input_shape)
    optimizer = rmsprop.RMSProp(learning_rate=0.001)
    model.compile(loss=tf.keras.losses.categorical_crossentropy,
                  optimizer=optimizer,
                  metrics=['accuracy'])

  # Train the model with the train dataset.
  model.fit(x=train_ds, epochs=20, steps_per_epoch=468)

  # Evaluate the model with the eval dataset.
  score = model.evaluate(eval_ds, steps=10, verbose=0)
  print('Test loss:', score[0])
  print('Test accuracy:', score[1])
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:30,代码来源:keras_mnist.py


示例3: main

def main(_):
  tf.enable_eager_execution()

  if not FLAGS.data_path:
    raise ValueError("Must specify --data-path")
  corpus = Datasets(FLAGS.data_path)
  train_data = _divide_into_batches(corpus.train, FLAGS.batch_size)
  eval_data = _divide_into_batches(corpus.valid, 10)

  have_gpu = tfe.num_gpus() > 0
  use_cudnn_rnn = not FLAGS.no_use_cudnn_rnn and have_gpu

  with tf.device("/device:GPU:0" if have_gpu else None):
    # Make learning_rate a Variable so it can be included in the checkpoint
    # and we can resume training with the last saved learning_rate.
    learning_rate = tfe.Variable(20.0, name="learning_rate")
    model = PTBModel(corpus.vocab_size(), FLAGS.embedding_dim,
                     FLAGS.hidden_dim, FLAGS.num_layers, FLAGS.dropout,
                     use_cudnn_rnn)
    optimizer = tf.train.GradientDescentOptimizer(learning_rate)
    checkpoint = tfe.Checkpoint(
        learning_rate=learning_rate, model=model,
        # GradientDescentOptimizer has no state to checkpoint, but noting it
        # here lets us swap in an optimizer that does.
        optimizer=optimizer)
    # Restore existing variables now (learning_rate), and restore new variables
    # on creation if a checkpoint exists.
    checkpoint.restore(tf.train.latest_checkpoint(FLAGS.logdir))
    sys.stderr.write("learning_rate=%f\n" % learning_rate.numpy())

    best_loss = None
    for _ in range(FLAGS.epoch):
      train(model, optimizer, train_data, FLAGS.seq_len, FLAGS.clip)
      eval_loss = evaluate(model, eval_data)
      if not best_loss or eval_loss < best_loss:
        if FLAGS.logdir:
          checkpoint.save(os.path.join(FLAGS.logdir, "ckpt"))
        best_loss = eval_loss
      else:
        learning_rate.assign(learning_rate / 4.0)
        sys.stderr.write("eval_loss did not reduce in this epoch, "
                         "changing learning rate to %f for the next epoch\n" %
                         learning_rate.numpy())
开发者ID:didukhle,项目名称:tensorflow,代码行数:43,代码来源:rnn_ptb.py


示例4: main

def main(_):
  """Eager execution workflow with RevNet trained on CIFAR-10."""
  tf.enable_eager_execution()

  config = get_config(config_name=FLAGS.config, dataset=FLAGS.dataset)
  ds_train, ds_train_one_shot, ds_validation, ds_test = get_datasets(
      data_dir=FLAGS.data_dir, config=config)
  model = revnet.RevNet(config=config)
  global_step = tf.train.get_or_create_global_step()  # Ensure correct summary
  global_step.assign(1)
  learning_rate = tf.train.piecewise_constant(
      global_step, config.lr_decay_steps, config.lr_list)
  optimizer = tf.train.MomentumOptimizer(
      learning_rate, momentum=config.momentum)
  checkpointer = tf.train.Checkpoint(
      optimizer=optimizer, model=model, optimizer_step=global_step)

  if FLAGS.use_defun:
    model.call = tfe.defun(model.call)

  if FLAGS.train_dir:
    summary_writer = tf.contrib.summary.create_file_writer(FLAGS.train_dir)
    if FLAGS.restore:
      latest_path = tf.train.latest_checkpoint(FLAGS.train_dir)
      checkpointer.restore(latest_path)
      print("Restored latest checkpoint at path:\"{}\" "
            "with global_step: {}".format(latest_path, global_step.numpy()))
      sys.stdout.flush()

  for x, y in ds_train:
    train_one_iter(model, x, y, optimizer, global_step=global_step)

    if global_step.numpy() % config.log_every == 0:
      it_test = ds_test.make_one_shot_iterator()
      acc_test, loss_test = evaluate(model, it_test)

      if FLAGS.validate:
        it_train = ds_train_one_shot.make_one_shot_iterator()
        it_validation = ds_validation.make_one_shot_iterator()
        acc_train, loss_train = evaluate(model, it_train)
        acc_validation, loss_validation = evaluate(model, it_validation)
        print("Iter {}, "
              "training set accuracy {:.4f}, loss {:.4f}; "
              "validation set accuracy {:.4f}, loss {:.4f}; "
              "test accuracy {:.4f}, loss {:.4f}".format(
                  global_step.numpy(), acc_train, loss_train, acc_validation,
                  loss_validation, acc_test, loss_test))
      else:
        print("Iter {}, test accuracy {:.4f}, loss {:.4f}".format(
            global_step.numpy(), acc_test, loss_test))
      sys.stdout.flush()

      if FLAGS.train_dir:
        with summary_writer.as_default():
          with tf.contrib.summary.always_record_summaries():
            tf.contrib.summary.scalar("Test accuracy", acc_test)
            tf.contrib.summary.scalar("Test loss", loss_test)
            if FLAGS.validate:
              tf.contrib.summary.scalar("Training accuracy", acc_train)
              tf.contrib.summary.scalar("Training loss", loss_train)
              tf.contrib.summary.scalar("Validation accuracy", acc_validation)
              tf.contrib.summary.scalar("Validation loss", loss_validation)

    if global_step.numpy() % config.save_every == 0 and FLAGS.train_dir:
      saved_path = checkpointer.save(
          file_prefix=os.path.join(FLAGS.train_dir, "ckpt"))
      print("Saved checkpoint at path: \"{}\" "
            "with global_step: {}".format(saved_path, global_step.numpy()))
      sys.stdout.flush()
开发者ID:ChristinaEricka,项目名称:tensorflow,代码行数:69,代码来源:main.py


示例5: run_keras_model_benchmark

def run_keras_model_benchmark(_):
  """Run the benchmark on keras model."""
  # Ensure a valid model name was supplied via command line argument
  if FLAGS.model not in MODELS.keys():
    raise AssertionError("The --model command line argument should "
                         "be a key in the `MODELS` dictionary.")

  # Check if eager execution is enabled
  if FLAGS.eager:
    tf.logging.info("Eager execution is enabled...")
    tf.enable_eager_execution()

  # Load the model
  tf.logging.info("Benchmark on {} model...".format(FLAGS.model))
  keras_model = MODELS[FLAGS.model]
  model = keras_model(weights=None)

  # Get dataset
  dataset_name = "ImageNet"
  if FLAGS.use_synthetic_data:
    tf.logging.info("Using synthetic dataset...")
    dataset_name += "_Synthetic"
    train_dataset = dataset.generate_synthetic_input_dataset(
        FLAGS.model, FLAGS.batch_size)
    val_dataset = dataset.generate_synthetic_input_dataset(
        FLAGS.model, FLAGS.batch_size)
  else:
    raise ValueError("Only synthetic dataset is supported!")

  num_gpus = flags_core.get_num_gpus(FLAGS)

  distribution = None
  # Use distribution strategy
  if FLAGS.dist_strat:
    distribution = distribution_utils.get_distribution_strategy(
        num_gpus=num_gpus)
  elif num_gpus > 1:
    # Run with multi_gpu_model
    # If eager execution is enabled, only one GPU is utilized even if multiple
    # GPUs are provided.
    if FLAGS.eager:
      tf.logging.warning(
          "{} GPUs are provided, but only one GPU is utilized as "
          "eager execution is enabled.".format(num_gpus))
    model = tf.keras.utils.multi_gpu_model(model, gpus=num_gpus)

  # Adam optimizer and some other optimizers doesn't work well with
  # distribution strategy (b/113076709)
  # Use GradientDescentOptimizer here
  optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
  model.compile(loss="categorical_crossentropy",
                optimizer=optimizer,
                metrics=["accuracy"],
                distribute=distribution)

  # Create benchmark logger for benchmark logging
  run_params = {
      "batch_size": FLAGS.batch_size,
      "synthetic_data": FLAGS.use_synthetic_data,
      "train_epochs": FLAGS.train_epochs,
      "num_train_images": FLAGS.num_train_images,
      "num_eval_images": FLAGS.num_eval_images,
  }

  benchmark_logger = logger.get_benchmark_logger()
  benchmark_logger.log_run_info(
      model_name=FLAGS.model,
      dataset_name=dataset_name,
      run_params=run_params,
      test_id=FLAGS.benchmark_test_id)

  # Create callbacks that log metric values about the training and evaluation
  callbacks = model_callbacks.get_model_callbacks(
      FLAGS.callbacks,
      batch_size=FLAGS.batch_size,
      metric_logger=benchmark_logger)
  # Train and evaluate the model
  history = model.fit(
      train_dataset,
      epochs=FLAGS.train_epochs,
      callbacks=callbacks,
      validation_data=val_dataset,
      steps_per_epoch=int(np.ceil(FLAGS.num_train_images / FLAGS.batch_size)),
      validation_steps=int(np.ceil(FLAGS.num_eval_images / FLAGS.batch_size))
  )

  tf.logging.info("Logging the evaluation results...")
  for epoch in range(FLAGS.train_epochs):
    eval_results = {
        "accuracy": history.history["val_acc"][epoch],
        "loss": history.history["val_loss"][epoch],
        tf.GraphKeys.GLOBAL_STEP: (epoch + 1) * np.ceil(
            FLAGS.num_eval_images/FLAGS.batch_size)
    }
    benchmark_logger.log_evaluation_result(eval_results)

  # Clear the session explicitly to avoid session delete error
  tf.keras.backend.clear_session()
开发者ID:812864539,项目名称:models,代码行数:98,代码来源:benchmark_main.py


示例6: run

def run(flags_obj):
  """Run ResNet Cifar-10 training and eval loop using native Keras APIs.

  Args:
    flags_obj: An object containing parsed flag values.

  Raises:
    ValueError: If fp16 is passed as it is not currently supported.

  Returns:
    Dictionary of training and eval stats.
  """
  if flags_obj.enable_eager:
    tf.enable_eager_execution()

  dtype = flags_core.get_tf_dtype(flags_obj)
  if dtype == 'fp16':
    raise ValueError('dtype fp16 is not supported in Keras. Use the default '
                     'value(fp32).')

  data_format = flags_obj.data_format
  if data_format is None:
    data_format = ('channels_first'
                   if tf.test.is_built_with_cuda() else 'channels_last')
  tf.keras.backend.set_image_data_format(data_format)

  if flags_obj.use_synthetic_data:
    input_fn = keras_common.get_synth_input_fn(
        height=cifar_main.HEIGHT,
        width=cifar_main.WIDTH,
        num_channels=cifar_main.NUM_CHANNELS,
        num_classes=cifar_main.NUM_CLASSES,
        dtype=flags_core.get_tf_dtype(flags_obj))
  else:
    input_fn = cifar_main.input_fn

  train_input_dataset = input_fn(
      is_training=True,
      data_dir=flags_obj.data_dir,
      batch_size=flags_obj.batch_size,
      num_epochs=flags_obj.train_epochs,
      parse_record_fn=parse_record_keras)

  eval_input_dataset = input_fn(
      is_training=False,
      data_dir=flags_obj.data_dir,
      batch_size=flags_obj.batch_size,
      num_epochs=flags_obj.train_epochs,
      parse_record_fn=parse_record_keras)

  strategy = distribution_utils.get_distribution_strategy(
      num_gpus=flags_obj.num_gpus,
      turn_off_distribution_strategy=flags_obj.turn_off_distribution_strategy)

  strategy_scope = keras_common.get_strategy_scope(strategy)

  with strategy_scope:
    optimizer = keras_common.get_optimizer()
    model = resnet_cifar_model.resnet56(classes=cifar_main.NUM_CLASSES)

    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['categorical_accuracy'])

  time_callback, tensorboard_callback, lr_callback = keras_common.get_callbacks(
      learning_rate_schedule, cifar_main.NUM_IMAGES['train'])

  train_steps = cifar_main.NUM_IMAGES['train'] // flags_obj.batch_size
  train_epochs = flags_obj.train_epochs

  if flags_obj.train_steps:
    train_steps = min(flags_obj.train_steps, train_steps)
    train_epochs = 1

  num_eval_steps = (cifar_main.NUM_IMAGES['validation'] //
                    flags_obj.batch_size)

  validation_data = eval_input_dataset
  if flags_obj.skip_eval:
    tf.keras.backend.set_learning_phase(1)
    num_eval_steps = None
    validation_data = None

  history = model.fit(train_input_dataset,
                      epochs=train_epochs,
                      steps_per_epoch=train_steps,
                      callbacks=[
                          time_callback,
                          lr_callback,
                          tensorboard_callback
                      ],
                      validation_steps=num_eval_steps,
                      validation_data=validation_data,
                      verbose=1)
  eval_output = None
  if not flags_obj.skip_eval:
    eval_output = model.evaluate(eval_input_dataset,
                                 steps=num_eval_steps,
                                 verbose=1)
  stats = keras_common.build_stats(history, eval_output, time_callback)
#.........这里部分代码省略.........
开发者ID:pooyadavoodi,项目名称:models,代码行数:101,代码来源:keras_cifar_main.py


示例7: run_mnist_eager

def run_mnist_eager(flags_obj):
  """Run MNIST training and eval loop in eager mode.

  Args:
    flags_obj: An object containing parsed flag values.
  """
  tf.enable_eager_execution()
  model_helpers.apply_clean(flags.FLAGS)

  # Automatically determine device and data_format
  (device, data_format) = ('/gpu:0', 'channels_first')
  if flags_obj.no_gpu or not tf.test.is_gpu_available():
    (device, data_format) = ('/cpu:0', 'channels_last')
  # If data_format is defined in FLAGS, overwrite automatically set value.
  if flags_obj.data_format is not None:
    data_format = flags_obj.data_format
  print('Using device %s, and data format %s.' % (device, data_format))

  # Load the datasets
  train_ds = mnist_dataset.train(flags_obj.data_dir).shuffle(60000).batch(
      flags_obj.batch_size)
  test_ds = mnist_dataset.test(flags_obj.data_dir).batch(
      flags_obj.batch_size)

  # Create the model and optimizer
  model = mnist.create_model(data_format)
  optimizer = tf.train.MomentumOptimizer(flags_obj.lr, flags_obj.momentum)

  # Create file writers for writing TensorBoard summaries.
  if flags_obj.output_dir:
    # Create directories to which summaries will be written
    # tensorboard --logdir=<output_dir>
    # can then be used to see the recorded summaries.
    train_dir = os.path.join(flags_obj.output_dir, 'train')
    test_dir = os.path.join(flags_obj.output_dir, 'eval')
    tf.gfile.MakeDirs(flags_obj.output_dir)
  else:
    train_dir = None
    test_dir = None
  summary_writer = tf.contrib.summary.create_file_writer(
      train_dir, flush_millis=10000)
  test_summary_writer = tf.contrib.summary.create_file_writer(
      test_dir, flush_millis=10000, name='test')

  # Create and restore checkpoint (if one exists on the path)
  checkpoint_prefix = os.path.join(flags_obj.model_dir, 'ckpt')
  step_counter = tf.train.get_or_create_global_step()
  checkpoint = tf.train.Checkpoint(
      model=model, optimizer=optimizer, step_counter=step_counter)
  # Restore variables on creation if a checkpoint exists.
  checkpoint.restore(tf.train.latest_checkpoint(flags_obj.model_dir))

  # Train and evaluate for a set number of epochs.
  with tf.device(device):
    for _ in range(flags_obj.train_epochs):
      start = time.time()
      with summary_writer.as_default():
        train(model, optimizer, train_ds, step_counter,
              flags_obj.log_interval)
      end = time.time()
      print('\nTrain time for epoch #%d (%d total steps): %f' %
            (checkpoint.save_counter.numpy() + 1,
             step_counter.numpy(),
             end - start))
      with test_summary_writer.as_default():
        test(model, test_ds)
      checkpoint.save(checkpoint_prefix)
开发者ID:812864539,项目名称:models,代码行数:67,代码来源:mnist_eager.py


示例8: str

import os
os.environ['OMP_NUM_THREADS'] = str(NUM_THREADS)
import tensorflow as tf
import copy
import numpy as np
import time
import pickle
import ncon as ncon
import misc_mera
from sys import stdout


config = tf.ConfigProto()
config.intra_op_parallelism_threads = NUM_THREADS
config.inter_op_parallelism_threads = NUM_THREADS
tf.enable_eager_execution(config=config)
tf.enable_v2_behavior()

@tf.contrib.eager.defun
def ascending_super_operator(hamAB, hamBA, w_isometry, v_isometry, unitary,
                             refsym):
    
    """
    ascending super operator for a modified binary MERA
    ascends 'hamAB' and 'hamBA' up one layer
    Parameters:
    -------------------------
    hamAB, hamBA:    tf.Tensor
                     local Hamiltonian terms
    w_isometry:      tf.Tensor
    v_isometry:      tf.Tensor
开发者ID:zoltanegyed,项目名称:TensorNetwork,代码行数:31,代码来源:modified_binary_mera.py


示例9: main

def main(_):
  """Eager execution workflow with RevNet trained on CIFAR-10."""
  if FLAGS.data_dir is None:
    raise ValueError("No supplied data directory")

  if not os.path.exists(FLAGS.data_dir):
    raise ValueError("Data directory {} does not exist".format(FLAGS.data_dir))

  tf.enable_eager_execution()
  config = config_.get_hparams_cifar_38()

  if FLAGS.validate:
    # 40k Training set
    ds_train = cifar_input.get_ds_from_tfrecords(
        data_dir=FLAGS.data_dir,
        split="train",
        data_aug=True,
        batch_size=config.batch_size,
        epochs=config.epochs,
        shuffle=config.shuffle,
        data_format=config.data_format,
        dtype=config.dtype,
        prefetch=config.batch_size)
    # 10k Training set
    ds_validation = cifar_input.get_ds_from_tfrecords(
        data_dir=FLAGS.data_dir,
        split="validation",
        data_aug=False,
        batch_size=config.eval_batch_size,
        epochs=1,
        shuffle=False,
        data_format=config.data_format,
        dtype=config.dtype,
        prefetch=config.eval_batch_size)
  else:
    # 50k Training set
    ds_train = cifar_input.get_ds_from_tfrecords(
        data_dir=FLAGS.data_dir,
        split="train_all",
        data_aug=True,
        batch_size=config.batch_size,
        epochs=config.epochs,
        shuffle=config.shuffle,
        data_format=config.data_format,
        dtype=config.dtype,
        prefetch=config.batch_size)

  # Always compute loss and accuracy on whole training and test set
  ds_train_one_shot = cifar_input.get_ds_from_tfrecords(
      data_dir=FLAGS.data_dir,
      split="train_all",
      data_aug=False,
      batch_size=config.eval_batch_size,
      epochs=1,
      shuffle=False,
      data_format=config.data_format,
      dtype=config.dtype,
      prefetch=config.eval_batch_size)

  ds_test = cifar_input.get_ds_from_tfrecords(
      data_dir=FLAGS.data_dir,
      split="test",
      data_aug=False,
      batch_size=config.eval_batch_size,
      epochs=1,
      shuffle=False,
      data_format=config.data_format,
      dtype=config.dtype,
      prefetch=config.eval_batch_size)

  model = revnet.RevNet(config=config)
  global_step = tfe.Variable(1, trainable=False)
  learning_rate = tf.train.piecewise_constant(
      global_step, config.lr_decay_steps, config.lr_list)
  optimizer = tf.train.MomentumOptimizer(
      learning_rate, momentum=config.momentum)
  checkpointer = tf.train.Checkpoint(
      optimizer=optimizer, model=model, optimizer_step=global_step)

  if FLAGS.train_dir:
    summary_writer = tf.contrib.summary.create_file_writer(FLAGS.train_dir)
    if FLAGS.restore:
      latest_path = tf.train.latest_checkpoint(FLAGS.train_dir)
      checkpointer.restore(latest_path)
      print("Restored latest checkpoint at path:\"{}\" "
            "with global_step: {}".format(latest_path, global_step.numpy()))
      sys.stdout.flush()

  warmup(model, config)

  for x, y in ds_train:
    loss = train_one_iter(model, x, y, optimizer, global_step=global_step)

    if global_step.numpy() % config.log_every == 0:
      it_train = ds_train_one_shot.make_one_shot_iterator()
      acc_train, loss_train = evaluate(model, it_train)
      it_test = ds_test.make_one_shot_iterator()
      acc_test, loss_test = evaluate(model, it_test)
      if FLAGS.validate:
        it_validation = ds_validation.make_one_shot_iterator()
#.........这里部分代码省略.........
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:101,代码来源:main.py


示例10: main

def main(_):
  tf.enable_eager_execution()
  global_step = tf.train.get_or_create_global_step()
  global_step.assign(1)

  energy_fn, mean, covar = {
      "scg": l2hmc.get_scg_energy_fn(),
      "rw": l2hmc.get_rw_energy_fn()
  }[FLAGS.energy_fn]

  x_dim = 2
  train_iters = 5000
  eval_iters = 2000
  eps = 0.1
  n_steps = 10  # Chain length
  n_samples = 200
  record_loss_every = 100

  dynamics = l2hmc.Dynamics(
      x_dim=x_dim, minus_loglikelihood_fn=energy_fn, n_steps=n_steps, eps=eps)
  learning_rate = tf.train.exponential_decay(
      1e-3, global_step, 1000, 0.96, staircase=True)
  optimizer = tf.train.AdamOptimizer(learning_rate)
  checkpointer = tf.train.Checkpoint(
      optimizer=optimizer, dynamics=dynamics, global_step=global_step)

  if FLAGS.train_dir:
    summary_writer = tf.contrib.summary.create_file_writer(FLAGS.train_dir)
    if FLAGS.restore:
      latest_path = tf.train.latest_checkpoint(FLAGS.train_dir)
      checkpointer.restore(latest_path)
      print("Restored latest checkpoint at path:\"{}\" ".format(latest_path))
      sys.stdout.flush()

  if not FLAGS.restore:
    # Training
    if FLAGS.use_defun:
      # Use `tfe.deun` to boost performance when there are lots of small ops
      loss_fn = tfe.defun(l2hmc.compute_loss)
    else:
      loss_fn = l2hmc.compute_loss

    samples = tf.random_normal(shape=[n_samples, x_dim])
    for i in range(1, train_iters + 1):
      loss, samples, accept_prob = train_one_iter(
          dynamics,
          samples,
          optimizer,
          loss_fn=loss_fn,
          global_step=global_step)

      if i % record_loss_every == 0:
        print("Iteration {}, loss {:.4f}, x_accept_prob {:.4f}".format(
            i, loss.numpy(),
            accept_prob.numpy().mean()))
        if FLAGS.train_dir:
          with summary_writer.as_default():
            with tf.contrib.summary.always_record_summaries():
              tf.contrib.summary.scalar("Training loss", loss, step=global_step)
    print("Training complete.")
    sys.stdout.flush()

    if FLAGS.train_dir:
      saved_path = checkpointer.save(
          file_prefix=os.path.join(FLAGS.train_dir, "ckpt"))
      print("Saved checkpoint at path: \"{}\" ".format(saved_path))
      sys.stdout.flush()

  # Evaluation
  if FLAGS.use_defun:
    # Use tfe.deun to boost performance when there are lots of small ops
    apply_transition = tfe.defun(dynamics.apply_transition)
  else:
    apply_transition = dynamics.apply_transition

  samples = tf.random_normal(shape=[n_samples, x_dim])
  samples_history = []
  for i in range(eval_iters):
    samples_history.append(samples.numpy())
    _, _, _, samples = apply_transition(samples)
  samples_history = np.array(samples_history)
  print("Sampling complete.")
  sys.stdout.flush()

  # Mean and covariance of target distribution
  mean = mean.numpy()
  covar = covar.numpy()
  ac_spectrum = compute_ac_spectrum(samples_history, mean, covar)
  print("First 25 entries of the auto-correlation spectrum: {}".format(
      ac_spectrum[:25]))
  ess = compute_ess(ac_spectrum)
  print("Effective sample size per Metropolis-Hastings step: {}".format(ess))
  sys.stdout.flush()

  if FLAGS.train_dir:
    # Plot autocorrelation spectrum in tensorboard
    plot_step = tfe.Variable(1, trainable=False, dtype=tf.int64)

    for ac in ac_spectrum:
      with summary_writer.as_default():
#.........这里部分代码省略.........
开发者ID:AnishShah,项目名称:tensorflow,代码行数:101,代码来源:main.py


示例11: run

def run(flags_obj):
  """Run ResNet ImageNet training and eval loop using native Keras APIs.

  Args:
    flags_obj: An object containing parsed flag values.

  Raises:
    ValueError: If fp16 is passed as it is not currently supported.
  """
  if flags_obj.enable_eager:
    tf.enable_eager_execution()

  dtype = flags_core.get_tf_dtype(flags_obj)
  if dtype == 'fp16':
    raise ValueError('dtype fp16 is not supported in Keras. Use the default '
                     'value(fp32).')

  data_format = flags_obj.data_format
  if data_format is None:
    data_format = ('channels_first'
                   if tf.test.is_built_with_cuda() else 'channels_last')
  tf.keras.backend.set_image_data_format(data_format)

  # pylint: disable=protected-access
  if flags_obj.use_synthetic_data:
    input_fn = keras_common.get_synth_input_fn(
        height=imagenet_main.DEFAULT_IMAGE_SIZE,
        width=imagenet_main.DEFAULT_IMAGE_SIZE,
        num_channels=imagenet_main.NUM_CHANNELS,
        num_classes=imagenet_main.NUM_CLASSES,
        dtype=flags_core.get_tf_dtype(flags_obj))
  else:
    input_fn = imagenet_main.input_fn

  train_input_dataset = input_fn(is_training=True,
                                 data_dir=flags_obj.data_dir,
                                 batch_size=flags_obj.batch_size,
                                 num_epochs=flags_obj.train_epochs,
                                 parse_record_fn=parse_record_keras)

  eval_input_dataset = input_fn(is_training=False,
                                data_dir=flags_obj.data_dir,
                                batch_size=flags_obj.batch_size,
                                num_epochs=flags_obj.train_epochs,
                                parse_record_fn=parse_record_keras)

  strategy = distribution_utils.get_distribution_strategy(
      num_gpus=flags_obj.num_gpus,
      turn_off_distribution_strategy=flags_obj.turn_off_distribution_strategy)

  strategy_scope = keras_common.get_strategy_scope(strategy)

  with strategy_scope:
    optimizer = keras_common.get_optimizer()
    model = resnet_model.resnet50(num_classes=imagenet_main.NUM_CLASSES)

    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['sparse_categorical_accuracy'])

  time_callback, tensorboard_callback, lr_callback = keras_common.get_callbacks(
      learning_rate_schedule, imagenet_main.NUM_IMAGES['train'])

  train_steps = imagenet_main.NUM_IMAGES['train'] // flags_obj.batch_size
  train_epochs = flags_obj.train_epochs

  if flags_obj.train_steps:
    train_steps = min(flags_obj.train_steps, train_steps)
    train_epochs = 1

  num_eval_steps = (imagenet_main.NUM_IMAGES['validation'] //
                    flags_obj.batch_size)

  validation_data = eval_input_dataset
  if flags_obj.skip_eval:
    # Only build the training graph. This reduces memory usage introduced by
    # control flow ops in layers that have different implementations for
    # training and inference (e.g., batch norm).
    tf.keras.backend.set_learning_phase(1)
    num_eval_steps = None
    validation_data = None

  history = model.fit(train_input_dataset,
                      epochs=train_epochs,
                      steps_per_epoch=train_steps,
                      callbacks=[
                          time_callback,
                          lr_callback,
                          tensorboard_callback
                      ],
                      validation_steps=num_eval_steps,
                      validation_data=validation_data,
                      verbose=1)

  eval_output = None
  if not flags_obj.skip_eval:
    eval_output = model.evaluate(eval_input_dataset,
                                 steps=num_eval_steps,
                                 verbose=1)
  stats = keras_common.build_stats(history, eval_output, time_callback)
#.........这里部分代码省略.........
开发者ID:pooyadavoodi,项目名称:models,代码行数:101,代码来源:keras_imagenet_main.py


示例12: initialize

def initialize():
  tf.enable_eager_execution()
开发者ID:arendsee,项目名称:machine-learning-notebook,代码行数:2,代码来源:eager.py


示例13: setUp

 def setUp(self):
   tf.enable_eager_execution()
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:2,代码来源:test_file_v1_12.py


示例14: main

def main(argv):
  del argv  # unused

  tf.enable_eager_execution()
  tf.set_random_seed(FLAGS.seed)
  timestamp = datetime.strftime(datetime.today(), "%y%m%d_%H%M%S")
  FLAGS.logdir = FLAGS.logdir.format(timestamp=timestamp)
  FLAGS.model_dir = FLAGS.model_dir.format(timestamp=timestamp)
  if not tf.gfile.Exists(FLAGS.model_dir):
    tf.gfile.MakeDirs(FLAGS.model_dir)

  sprites_data = sprites_dataset.SpritesDataset(fake_data=FLAGS.fake_data)

  model = DisentangledSequentialVAE(
      latent_size_static=FLAGS.latent_size_static,
      latent_size_dynamic=FLAGS.latent_size_dynamic,
      hidden_size=FLAGS.hidden_size, channels=sprites_data.channels,
      latent_posterior=FLAGS.latent_posterior)

  global_step = tf.train.get_or_create_global_step()
  optimizer = tf.train.AdamOptimizer(
      tf.train.cosine_decay(FLAGS.learning_rate, global_step, FLAGS.max_steps))

  checkpoint = tf.train.Checkpoint(model=model, global_step=global_step,
                                   optimizer=optimizer)
  checkpoint_manager = tf.contrib.checkpoint.CheckpointManager(
      checkpoint, directory=FLAGS.model_dir, max_to_keep=5)
  checkpoint.restore(checkpoint_manager.latest_checkpoint)

  writer = tf.contrib.summary.create_file_writer(FLAGS.logdir)
  writer.set_as_default()

  dataset = sprites_data.train.map(lambda *x: x[0]).shuffle(1000).repeat()
  dataset = dataset.batch(FLAGS.batch_size).take(FLAGS.max_steps)
  for inputs in dataset.prefetch(buffer_size=None):
    with tf.contrib.summary.record_summaries_every_n_global_steps(
        FLAGS.log_steps, global_step=global_step):
      if FLAGS.enable_debug_logging:
        tf.contrib.summary.histogram("image", inputs)

      with tf.GradientTape() as tape:
        features = model.compressor(inputs)  # (batch, timesteps, hidden)
        static_sample, static_posterior = model.sample_static_posterior(
            features, FLAGS.num_samples)  # (samples, batch, latent)
        dynamic_sample, dynamic_posterior = model.sample_dynamic_posterior(
            features, FLAGS.num_samples, static_sample)  # (sampl, N, T, latent)
        likelihood = model.decoder((dynamic_sample, static_sample))

        reconstruction = tf.reduce_mean(  # integrate samples
            likelihood.mean()[:FLAGS.num_reconstruction_samples], axis=0)
        visualize_reconstruction(inputs, reconstruction,
                                 name="train_reconstruction")

        static_prior = model.static_prior()
        _, dynamic_prior = model.sample_dynamic_prior(
            FLAGS.num_samples, FLAGS.batch_size, sprites_data.length)

        if FLAGS.enable_debug_logging:
          summarize_dist_params(static_prior, "static_prior")
          summarize_dist_params(static_posterior, "static_posterior")
          summarize_dist_params(dynamic_prior, "dynamic_prior")
          summarize_dist_params(dynamic_posterior, "dynamic_posterior")
          summarize_dist_params(likelihood, "likelihood")

        static_prior_log_prob = static_prior.log_prob(static_sample)
        static_posterior_log_prob = static_posterior.log_prob(static_sample)
        dynamic_prior_log_prob = tf.reduce_sum(
            dynamic_prior.log_prob(dynamic_sample), axis=-1)  # sum time
        dynamic_posterior_log_prob = tf.reduce_sum(
            dynamic_posterior.log_prob(dynamic_sample), axis=-1)  # sum time
        likelihood_log_prob = tf.reduce_sum(
            likelihood.log_prob(inputs), axis=-1)  # sum time

        if FLAGS.enable_debug_logging:
          with tf.name_scope("log_probs"):
            summarize_mean_in_nats_and_bits(
                static_prior_log_prob, FLAGS.latent_size_static, "static_prior")
            summarize_mean_in_nats_and_bits(
                static_posterior_log_prob, FLAGS.latent_size_static,
                "static_posterior")
            summarize_mean_in_nats_and_bits(
                dynamic_prior_log_prob, FLAGS.latent_size_dynamic *
                sprites_data.length, "dynamic_prior")
            summarize_mean_in_nats_and_bits(
                dynamic_posterior_log_prob, FLAGS.latent_size_dynamic *
                sprites_data.length, "dynamic_posterior")
            summarize_mean_in_nats_and_bits(
                likelihood_log_prob, sprites_data.frame_size ** 2 *
                sprites_data.channels * sprites_data.length, "likelihood")

        elbo = tf.reduce_mean(static_prior_log_prob -
                              static_posterior_log_prob +
                              dynamic_prior_log_prob -
                              dynamic_posterior_log_prob +
                              likelihood_log_prob)
        loss = -elbo
        tf.contrib.summary.scalar("elbo", elbo)

      grads = tape.gradient(loss, model.variables)
      grads, global_norm = tf.clip_by_global_norm(grads, FLAGS.clip_norm)
#.........这里部分代码省略.........
开发者ID:asudomoeva,项目名称:probability,代码行数:101,代码来源:disentangled_vae.py


示例15:

import tensorflow as tf
import tensorflow.feature_column as fc
tf.enable_eager_execution()# 支持程序立即运行,以检查程序
开发者ID:BronsonLotty,项目名称:SelfProjects,代码行数:3,代码来源:tensorflow_estimators.py


示例16: main

def main(unused_argv):
  """Run a CNN model on MNIST data to demonstrate DistributedStrategies."""

  tf.enable_eager_execution()

  num_gpus = FLAGS.num_gpus
  if num_gpus is None:
    devices = None
  elif num_gpus == 0:
    devices = ["/device:CPU:0"]
  else:
    devices = ["/device:GPU:{}".format(i) for i in range(num_gpus)]
  strategy = tf.distribute.MirroredStrategy(devices)

  with strategy.scope():
    train_ds, test_ds = mnist_datasets()
    train_ds = train_ds.shuffle(NUM_TRAIN_IMAGES).batch(FLAGS.batch_size)
    test_ds = test_ds.batch(FLAGS.batch_size)

    model = create_model()
    optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate, FLAGS.momentum)
    training_loss = tf.keras.metrics.Mean("training_loss", dtype=tf.float32)
    training_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
        "training_accuracy", dtype=tf.float32)
    test_loss = tf.keras.metrics.Mean("test_loss", dtype=tf.float32)
    test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
        "test_accuracy", dtype=tf.float32)

    def train_step(inputs):
      images, labels = inputs
      with tf.GradientTape() as tape:
        logits = model(images, training=True)
        loss = compute_loss(logits, labels)
      grads = tape.gradient(loss, model.variables)
      optimizer.apply_gradients(zip(grads, model.variables))
      training_loss.update_state(loss)
      training_accuracy.update_state(labels, logits)

    def test_step(inputs):
      images, labels = inputs
      logits = model(images, training=False)
      loss = compute_loss(logits, labels)
      test_loss.update_state(loss)
      test_accuracy.update_state(labels, logits)

    train_iterator = strategy.make_dataset_iterator(train_ds)
    test_iterator = strategy.make_dataset_iterator(test_ds)
    for epoch in range(0, FLAGS.num_epochs):
      # Train
      print("Starting epoch {}".format(epoch))
      train_iterator.initialize()
      for _ in range(NUM_TRAIN_IMAGES // FLAGS.batch_size):
        strategy.experimental_run(train_step, train_iterator)
      print("Training loss: {:0.4f}, accuracy: {:0.2f}%".format(
          training_loss.result(), training_accuracy.result() * 100))
      training_loss.reset_states()
      training_accuracy.reset_states()

      # Test
      test_iterator.initialize()
      for _ in range(NUM_TEST_IMAGES // FLAGS.batch_size):
        strategy.experimental_run(test_step, test_iterator)
      print("Test loss: {:0.4f}, accuracy: {:0.2f}%".format(
          test_loss.result(), test_accuracy.result() * 100))
      test_loss.reset_states()
      test_accuracy.reset_states()
开发者ID:AndreasGocht,项目名称:tensorflow,代码行数:66,代码来源:mnist_eager_multigpu.py


示例17: range

      [time_steps for _ in range(batch_size)], dtype=tf.int64)
  labels = tf.random_normal([batch_size, LABEL_DIMENSION])
  return tf.data.Dataset.from_tensors((labels, chars, sequence_length))


class RNNColorbotTest(tf.test.TestCase):

  def testTrainOneEpoch(self):
    model = rnn_colorbot.RNNColorbot(
        rnn_cell_sizes=[256, 128, 64],
        label_dimension=LABEL_DIMENSION,
        keep_prob=1.0)
    optimizer = tf.train.AdamOptimizer(learning_rate=.01)
    dataset = random_dataset()
    with test_util.use_gpu():
      rnn_colorbot.train_one_epoch(model, optimizer, dataset)

  def testTest(self):
    model = rnn_colorbot.RNNColorbot(
        rnn_cell_sizes=[256],
        label_dimension=LABEL_DIMENSION,
        keep_prob=1.0)
    dataset = random_dataset()
    with test_util.use_gpu():
      rnn_colorbot.test(model, dataset)


if __name__ == "__main__":
  tf.enable_eager_execution()
  tf.test.main()
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:30,代码来源:rnn_colorbot_test.py


示例18: main

def main(_):
  """Eager execution workflow with RevNet trained on CIFAR-10."""
  if FLAGS.data_dir is None:
    raise ValueError("No supplied data directory")

  if not os.path.exists(FLAGS.data_dir):
    raise ValueError("Data directory {} does not exist".format(FLAGS.data_dir))

  tf.enable_eager_execution()
  config = config_.get_hparams_cifar_38()
  model = revnet.RevNet(config=config)

  ds_train = cifar_input.get_ds_from_tfrecords(
      data_dir=FLAGS.data_dir,
      split="train",
      data_aug=True,
      batch_size=config.batch_size,
      epochs=config.epochs,
      shuffle=config.shuffle,
      data_format=config.data_format,
      dtype=config.dtype,
      prefetch=config.prefetch)

  ds_validation = cifar_input.get_ds_from_tfrecords(
      data_dir=FLAGS.data_dir,
      split="validation",
      data_aug=False,
      batch_size=config.eval_batch_size,
      epochs=1,
      data_format=config.data_format,
  

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.equal函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.einsum函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap