• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.variables_initializer函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.variables_initializer函数的典型用法代码示例。如果您正苦于以下问题:Python variables_initializer函数的具体用法?Python variables_initializer怎么用?Python variables_initializer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了variables_initializer函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __model_gradients

    def __model_gradients(variable_scope: tf.VariableScope,
                          transformation_variable_scope: tf.VariableScope,
                          output: tf.Tensor, output_gradient: tf.Tensor):
        trainable_variables = tf.get_collection(
            tf.GraphKeys.TRAINABLE_VARIABLES,
            transformation_variable_scope.name)

        gradients = tf.gradients(output, trainable_variables, output_gradient)

        for gradient in gradients:
            gradient_accumulator = tf.Variable(tf.zeros(
                gradient.get_shape(), gradient.dtype), name="gradient_accumulator")

            tf.add_to_collection(
                '{}/model_gradients'.format(variable_scope.name),
                gradient)
            tf.add_to_collection(
                '{}/model_gradient_accumulators'.format(variable_scope.name),
                gradient_accumulator)
            tf.add_to_collection(
                '{}/update_model_gradient_accumulators'.format(
                    variable_scope.name),
                tf.assign_add(gradient_accumulator, gradient).op)

        with tf.control_dependencies(tf.get_collection(
                "{}/update_model_gradient_accumulators".format(variable_scope.name))):
            # there is no noop
            tf.add(1, 1, "update_model_gradient_accumulators")

        tf.variables_initializer(
            tf.get_collection(
                '{}/model_gradient_accumulators'.format(variable_scope.name)),
            'zero_model_gradient_accumulators')
开发者ID:thomasste,项目名称:ugtsa,代码行数:33,代码来源:model_builder.py


示例2: __init__

    def __init__(self, model_architecture, policy_architecture, batch_size, n_particles, n_timesteps,
                        model_path_to_load_variables, model_path_to_save_variables,
                        policy_path_to_load_variables, policy_path_to_save_variables,
                        tb_path):


        self.batch_size = batch_size

        #Build Graph
            # - define all the vars
            # - start session
            # - initialize vars or load them
            # - later: save vars

        #Define model 
        print ('Defining model..')
        self.model = model_(model_architecture, batch_size=batch_size, n_particles=n_particles)

        #Define policy
        print ('Defining policy..')
        self.policy = policy_(policy_architecture, model=self.model, batch_size=batch_size, n_particles=n_particles, n_timesteps=n_timesteps)

        #Start session
        self.sess = tf.Session()

        
        #For tensorboard
        # train_writer = tf.summary.FileWriter(tb_path, self.sess.graph)
        writer = tf.summary.FileWriter(tb_path, graph=tf.get_default_graph())



        #Init the optimizer params, Im not if this resets all the other params. need to check by loading params
        self.sess.run(tf.global_variables_initializer())

        #Initialize vars or load them
        #Model
        print ('Initializing model..')
        saver = tf.train.Saver(self.model.params_dict)
        if model_path_to_load_variables == '':
            self.sess.run(tf.variables_initializer(self.model.params_list))
        else:
            saver.restore(self.sess, model_path_to_load_variables)
            print ('loaded model variables ' + model_path_to_load_variables)

        #Policy
        print( 'Initializing policy..')
        saver = tf.train.Saver(self.policy.params_dict)
        if policy_path_to_load_variables == '':
            self.sess.run(tf.variables_initializer(self.policy.params_list))
        else:
            saver.restore(self.sess, policy_path_to_load_variables)
            print ('loaded policy variables ' + policy_path_to_load_variables)



        self.model_path_to_save_variables = model_path_to_save_variables
        self.policy_path_to_save_variables = policy_path_to_save_variables

        print ('Init Complete')
开发者ID:chriscremer,项目名称:Other_Code,代码行数:60,代码来源:model_based_RL.py


示例3: test_variable

 def test_variable(self):
   with self.test_session() as sess:
     x = tf.Variable(2.0, name="CustomName")
     y = tf.constant(3.0)
     z = x * y
     z_new = ed.copy(z)
     tf.variables_initializer([x]).run()
     self.assertEqual(z_new.eval(), 6.0)
开发者ID:JoyceYa,项目名称:edward,代码行数:8,代码来源:copy_test.py


示例4: test_swap_tensor_variable

 def test_swap_tensor_variable(self):
   with self.test_session() as sess:
     x = tf.constant(2.0)
     y = tf.constant(3.0)
     z = x * y
     qx = tf.Variable(4.0, name="CustomName")
     z_new = ed.copy(z, {x: qx})
     tf.variables_initializer([qx]).run()
     self.assertEqual(z_new.eval(), 12.0)
开发者ID:JoyceYa,项目名称:edward,代码行数:9,代码来源:copy_test.py


示例5: test_local_variable

 def test_local_variable(self):
   with self.test_session() as sess:
     self.assertEquals([], tf.local_variables())
     value0 = 42
     tf.contrib.framework.local_variable(value0)
     value1 = 43
     tf.contrib.framework.local_variable(value1)
     variables = tf.local_variables()
     self.assertEquals(2, len(variables))
     self.assertRaises(tf.OpError, sess.run, variables)
     tf.variables_initializer(variables).run()
     self.assertAllEqual(set([value0, value1]), set(sess.run(variables)))
开发者ID:jeffzheng1,项目名称:tensorflow,代码行数:12,代码来源:variables_test.py


示例6: test_scan_gradients

  def test_scan_gradients(self):
    with self.test_session() as sess:
      a = tf.Variable([1.0, 2.0, 3.0])
      op = tf.scan(lambda a, x: a + x, a)
      copy_op = ed.copy(op)
      gradient = tf.gradients(op, [a])[0]
      copy_gradient = tf.gradients(copy_op, [a])[0]

      tf.variables_initializer([a]).run()
      result_copy, result = sess.run([copy_gradient, gradient])
      self.assertAllClose(result, [3.0, 2.0, 1.0])
      self.assertAllClose(result_copy, [3.0, 2.0, 1.0])
开发者ID:JoyceYa,项目名称:edward,代码行数:12,代码来源:copy_test.py


示例7: run_session

 def run_session(self, *args):
     (sess_device,
      model_params,) = args
     
     graphTf = tf.Graph()
     
     with graphTf.as_default():
         with graphTf.device(sess_device): # Throws an error if GPU is specified but not available.
             self._log.print3("=========== Making the CNN graph... ===============")
             cnn3d = Cnn3d()
             with tf.variable_scope("net"):
                 cnn3d.make_cnn_model( *model_params.get_args_for_arch() ) # Creates the network's graph (without optimizer).
                 
         self._log.print3("=========== Compiling the Testing Function ============")
         self._log.print3("=======================================================\n")
         
         cnn3d.setup_ops_n_feeds_to_test( self._log,
                                          self._params.indices_fms_per_pathtype_per_layer_to_save )
         # Create the saver
         saver_all = tf.train.Saver() # saver_net would suffice
         
     with tf.Session( graph=graphTf, config=tf.ConfigProto(log_device_placement=False, device_count={'CPU':999, 'GPU':99}) ) as sessionTf:
         file_to_load_params_from = self._params.get_path_to_load_model_from()
         if file_to_load_params_from is not None: # Load params
             self._log.print3("=========== Loading parameters from specified saved model ===============")
             chkpt_fname = tf.train.latest_checkpoint( file_to_load_params_from ) if os.path.isdir( file_to_load_params_from ) else file_to_load_params_from
             self._log.print3("Loading parameters from:" + str(chkpt_fname))
             try:
                 saver_all.restore(sessionTf, chkpt_fname)
                 self._log.print3("Parameters were loaded.")
             except Exception as e: handle_exception_tf_restore(self._log, e)
             
         else:
             self._ask_user_if_test_with_random() # Asks user whether to continue with randomly initialized model. It exits if no is given.
             self._log.print3("")
             self._log.print3("=========== Initializing network variables  ===============")
             tf.variables_initializer( var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="net") ).run()
             self._log.print3("Model variables were initialized.")
             
             
         self._log.print3("")
         self._log.print3("======================================================")
         self._log.print3("=========== Testing with the CNN model ===============")
         self._log.print3("======================================================\n")
         
         res_code = inferenceWholeVolumes( *( [sessionTf, cnn3d] + self._params.get_args_for_testing() ) )
     
     self._log.print3("")
     self._log.print3("======================================================")
     self._log.print3("=========== Testing session finished =================")
     self._log.print3("======================================================")
开发者ID:Kamnitsask,项目名称:deepmedic,代码行数:51,代码来源:testSession.py


示例8: load_prior

def load_prior(config, sess, saver):
     logging.info('Loading prior model parameters from file ' + os.path.abspath(config.prior_model))
     saver.restore(sess, os.path.abspath(config.prior_model))

     # fill prior variables with the loaded values
     prior_variables = tf.get_collection_ref('prior_variables')
     prior_variables_dict = dict([(v.name, v) for v in prior_variables])
     assign_tensors = []
     with tf.variable_scope('prior'):
         for v in tf.trainable_variables():
             prior_name = 'loss/prior/'+v.name
             prior_variable = prior_variables_dict[prior_name]
             assign_tensors.append(prior_variable.assign(v))
     tf.variables_initializer(prior_variables)
     sess.run(assign_tensors)
开发者ID:rsennrich,项目名称:nematus,代码行数:15,代码来源:model_loader.py


示例9: yolo_non_max_suppression

def yolo_non_max_suppression(scores, boxes, classes, max_boxes = 10, iou_threshold = 0.5):
    """
    Applies Non-max suppression (NMS) to set of boxes

    Arguments:
    scores -- tensor of shape (None,), output of yolo_filter_boxes()
    boxes -- tensor of shape (None, 4), output of yolo_filter_boxes() that have been scaled to the image size (see later)
    classes -- tensor of shape (None,), output of yolo_filter_boxes()
    max_boxes -- integer, maximum number of predicted boxes you'd like
    iou_threshold -- real value, "intersection over union" threshold used for NMS filtering

    Returns:
    scores -- tensor of shape (, None), predicted score for each box
    boxes -- tensor of shape (4, None), predicted box coordinates
    classes -- tensor of shape (, None), predicted class for each box

    """

    max_boxes_tensor = K.variable(max_boxes, dtype='int32')     # tensor to be used in tf.image.non_max_suppression()
    K.get_session().run(tf.variables_initializer([max_boxes_tensor])) # initialize variable max_boxes_tensor

    # Use tf.image.non_max_suppression() to get the list of indices corresponding to boxes you keep
    nms_indices = tf.image.non_max_suppression(boxes,scores,max_boxes_tensor,iou_threshold=iou_threshold)


    # Use K.gather() to select only nms_indices from scores, boxes and classes
    scores = K.gather(scores,nms_indices)
    boxes = K.gather(boxes,nms_indices)
    classes = K.gather(classes,nms_indices)


    return scores, boxes, classes
开发者ID:DavidWhois,项目名称:YOLO_model_demo,代码行数:32,代码来源:Autonomous_driving_application_Car_detection.py


示例10: adam_variables_initializer

 def adam_variables_initializer(opt, var_list):
     adam_vars = [opt.get_slot(var, name)
                  for name in opt.get_slot_names()
                  for var in var_list]
     if isinstance(opt, tf.train.AdamOptimizer):
         adam_vars.extend(list(opt._get_beta_accumulators()))
     return tf.variables_initializer(adam_vars)
开发者ID:Humhu,项目名称:percepto,代码行数:7,代码来源:test_modal_cartpole.py


示例11: initializeOrRestore

    def initializeOrRestore(self):

        self.ckptDir = os.path.join(self.checkpoint_dir, self.dataset.name)
        self.ckptPrefix = os.path.join(self.ckptDir, self.name, self.name)
        vgg_ckpt_file = os.path.join(self.ckptDir, 'vgg_16', 'vgg_16.ckpt')
        mt_ckpt_file = layers.latest_checkpoint(os.path.join(self.ckptDir, 'mt'))
        # ckpt_file = layers.latest_checkpoint(os.path.join(self.ckptDir, 'vgg_16', 'vgg_16.ckpt'))
        globalVars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)

        if vgg_ckpt_file is not None and tf.train.checkpoint_exists(vgg_ckpt_file):
            varsInCkpt, varsNotInCkpt = layers.scan_checkpoint_for_vars(vgg_ckpt_file, globalVars)
            if len(varsInCkpt) != 0:
                restorationSaver = tf.train.Saver(varsInCkpt)
                self.sess.run(tf.report_uninitialized_variables(var_list=varsInCkpt))
                restorationSaver.restore(self.sess, vgg_ckpt_file)
        else:
            varsNotInCkpt = globalVars

        if mt_ckpt_file is not None and tf.train.checkpoint_exists(mt_ckpt_file):
            varsInCkpt, varsNotInCkpt = layers.scan_checkpoint_for_vars(mt_ckpt_file, varsNotInCkpt)
            varsInCkpt, varsNotInCkpt = layers.replaceVarInListsByName(varsInCkpt, varsNotInCkpt, 'fc6')
            if len(varsInCkpt) != 0:
                restorationSaver = tf.train.Saver(varsInCkpt)
                self.sess.run(tf.report_uninitialized_variables(var_list=varsInCkpt))
                restorationSaver.restore(self.sess, mt_ckpt_file)
        else:
            varsNotInCkpt = globalVars

        self.saver = tf.train.Saver()
        self.sess.run(tf.group(tf.variables_initializer(varsNotInCkpt), tf.local_variables_initializer()))
开发者ID:Tiyanak,项目名称:lip-reading,代码行数:30,代码来源:mt_vgg16.py


示例12: style_transfer_train

def style_transfer_train(loss, img_var, initial_lr=3.0, decayed_lr=0.1, decay_lr_at=180, max_iter=200, print_every=50):
    # Create and initialize the Adam optimizer
    lr_var = tf.Variable(initial_lr, name="lr")
    # Create train_op that updates the generated image when run
    with tf.variable_scope("optimizer") as opt_scope:
        train_op = tf.train.AdamOptimizer(lr_var).minimize(loss, var_list=[img_var])
    # Initialize the generated image and optimization variables
    opt_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=opt_scope.name)
    sess.run(tf.variables_initializer([lr_var, img_var] + opt_vars))
    # Create an op that will clamp the image values when run
    clamp_image_op = tf.assign(img_var, tf.clip_by_value(img_var, -1.5, 1.5))

    imgs_in_process = []

    # Hardcoded handcrafted 
    for t in range(max_iter):
        # Take an optimization step to update img_var
        sess.run(train_op)
        if t < decay_lr_at:
            sess.run(clamp_image_op)
        if t == decay_lr_at:
            sess.run(tf.assign(lr_var, decayed_lr))
        if t % print_every == 0:
            print("train step: %d" % t)
            img = sess.run(img_var)
            imgs_in_process.append(img[0])
    print("train step: %d" % t)
    final_img = sess.run(img_var)[0]
    return imgs_in_process, final_img
开发者ID:haolang9527,项目名称:MyDeepLearning,代码行数:29,代码来源:styletransfer.py


示例13: _get_ece

 def _get_ece(self, ece_op, update_op):
   """Return scalar expected calibration error."""
   with self.test_session() as sess:
     metrics_vars = tf.get_collection(tf.GraphKeys.METRIC_VARIABLES)
     sess.run(tf.variables_initializer(var_list=metrics_vars))
     _ = sess.run(update_op)
   return sess.run(ece_op)
开发者ID:zhangjiulong,项目名称:models,代码行数:7,代码来源:calibration_evaluation_test.py


示例14: run

  def run(self, variables=None, use_coordinator=True, *args, **kwargs):
    """A simple wrapper to run inference.

    1. Initialize algorithm via `initialize`.
    2. (Optional) Build a TensorFlow summary writer for TensorBoard.
    3. (Optional) Initialize TensorFlow variables.
    4. (Optional) Start queue runners.
    5. Run `update` for `self.n_iter` iterations.
    6. While running, `print_progress`.
    7. Finalize algorithm via `finalize`.
    8. (Optional) Stop queue runners.

    To customize the way inference is run, run these steps
    individually.

    Args:
      variables: list, optional.
        A list of TensorFlow variables to initialize during inference.
        Default is to initialize all variables (this includes
        reinitializing variables that were already initialized). To
        avoid initializing any variables, pass in an empty list.
      use_coordinator: bool, optional.
        Whether to start and stop queue runners during inference using a
        TensorFlow coordinator. For example, queue runners are necessary
        for batch training with file readers.
      *args:
        Passed into `initialize`.
      **kwargs:
        Passed into `initialize`.
    """
    self.initialize(*args, **kwargs)

    if variables is None:
      init = tf.global_variables_initializer()
    else:
      init = tf.variables_initializer(variables)

    # Feed placeholders in case initialization depends on them.
    feed_dict = {}
    for key, value in six.iteritems(self.data):
      if isinstance(key, tf.Tensor) and "Placeholder" in key.op.type:
        feed_dict[key] = value

    init.run(feed_dict)

    if use_coordinator:
      # Start input enqueue threads.
      self.coord = tf.train.Coordinator()
      self.threads = tf.train.start_queue_runners(coord=self.coord)

    for _ in range(self.n_iter):
      info_dict = self.update()
      self.print_progress(info_dict)

    self.finalize()

    if use_coordinator:
      # Ask threads to stop.
      self.coord.request_stop()
      self.coord.join(self.threads)
开发者ID:ekostem,项目名称:edward,代码行数:60,代码来源:inference.py


示例15: initialize

    def initialize(self, sess):
        # Initial file lists are empty
        np_paths = []
        ss_paths = []

        variables = tf.global_variables()
        # Initialize all variables first
        sess.run(tf.variables_initializer(variables, name='init'))

        if self.pretrained_model is not None:
            if self.pretrained_model.endswith('.ckpt'):
                # Fresh train directly from ImageNet weights
                print('Loading initial model weights from {:s}'.format(self.pretrained_model))

                var_keep_dic = self.get_variables_in_checkpoint_file(self.pretrained_model)
                # Get the variables to restore, ignoring the variables to fix
                variables_to_restore = self.net.get_variables_to_restore(variables, var_keep_dic)

                restorer = tf.train.Saver(variables_to_restore)
                restorer.restore(sess, self.pretrained_model)
                print('Loaded.')
            else:
                # Restore from checkpoint and meta file
                self.restore_ckpt_from_dir(sess, self.net, self.pretrained_model)
                print('Loaded.')

        last_snapshot_iter = 0
        rate = cfg.TRAIN.LEARNING_RATE
        stepsizes = list(cfg.TRAIN.STEPSIZE)

        return rate, last_snapshot_iter, stepsizes, np_paths, ss_paths
开发者ID:Sanster,项目名称:tf_ctpn,代码行数:31,代码来源:train_val.py


示例16: _run_initsync

    def _run_initsync(self):
        # tparams = [list(chain(*tp)) for tp in self._tower_params]
        tparams = self._tower_params

        # Check to prevent from unnecessarily re-initializing and
        # synchronizing, i.e. when the model loads the weights.
        for v in chain.from_iterable(tparams):
            if getattr(v, '_keras_initialized', False):
                return

        KB.manual_variable_initialization(True)
        sess = KB.get_session()
        KB.manual_variable_initialization(False)

        # glob_variables = tf.global_variables()
        # sess.run(tf.variables_initializer(glob_variables))

        # Initialize on GPU0 and sync to other GPUs
        init_op = tf.variables_initializer(tparams[0])
        # init_op = tf.variables_initializer(self._tower_params[0])
        # init_op = tf.variables_initializer(self.trainable_weights)
        sess.run(init_op)

        # Important if using model_creator. Not necessary of model instance is
        # reused in which case the model layers are shared between slices
        # and are automatically sync'd.
        sync_op = all_sync_params(tparams, self._gdev_list,
                                  usenccl=self._usenccl)
        sess.run(sync_op)

        for v in chain.from_iterable(tparams):
            v._keras_initialized = True
开发者ID:NthTensor,项目名称:keras_experiments,代码行数:32,代码来源:_multigpu.py


示例17: _build_eval_specific_graph

  def _build_eval_specific_graph(self, iterator, model_fn, params,
                                 record_files_placeholder, num_eval_steps):
    """Builds the part of the model that is specific to evaluation."""

    def build():
      features = iterator.get_next()
      estimator_spec = model_fn(
          features, None, tf.estimator.ModeKeys.EVAL, params)
      run_model_op = tf.group(*(update_op for _, update_op in
                                estimator_spec.eval_metric_ops.values()))
      eval_metric_tensors = {k: tensor for (k, (tensor, _))
                             in estimator_spec.eval_metric_ops.items()}
      return run_model_op, estimator_spec.loss, eval_metric_tensors

    if self._use_while_loop:
      def body(i):
        run_model_op_single_step, _, _ = build()
        with tf.control_dependencies([run_model_op_single_step]):
          return i + 1

      run_model_op = tf.while_loop(lambda i: i < num_eval_steps, body, [0],
                                   parallel_iterations=1)
      loss = None
      eval_metric_tensors = {
          "HR": self._compute_metric_mean(rconst.HR_METRIC_NAME),
          "NDCG": self._compute_metric_mean(rconst.NDCG_METRIC_NAME),
      }
    else:
      run_model_op, loss, eval_metric_tensors = build()

    metric_initializer = tf.variables_initializer(
        tf.get_collection(tf.GraphKeys.METRIC_VARIABLES))
    return self._EvalModelProperties(
        record_files_placeholder, iterator, loss, params["eval_batch_size"],
        run_model_op, eval_metric_tensors, metric_initializer)
开发者ID:812864539,项目名称:models,代码行数:35,代码来源:model_runner.py


示例18: test_op_constant

def test_op_constant(dtype, diff, sess):
    ops = (SimNeurons(LIF(tau_rc=1), Signal(np.zeros(10)), None),
           SimNeurons(LIF(tau_rc=2 if diff else 1), Signal(np.zeros(10)),
                      None))

    signals = SignalDict(tf.float32, 1)
    const = signals.op_constant(
        [op.neurons for op in ops], [op.J.shape[0] for op in ops],
        "tau_rc", dtype)
    const1 = signals.op_constant(
        [op.neurons for op in ops], [op.J.shape[0] for op in ops],
        "tau_rc", dtype, ndims=1)
    const3 = signals.op_constant(
        [op.neurons for op in ops], [op.J.shape[0] for op in ops],
        "tau_rc", dtype, ndims=3)

    assert const.dtype.base_dtype == dtype

    sess.run(tf.variables_initializer(tf.get_collection("constants")),
             feed_dict=signals.constant_phs)
    x, x1, x3 = sess.run([const, const1, const3])

    if diff:
        assert np.array_equal(x, [[1]] * 10 + [[2]] * 10)
        assert np.array_equal(x[:, 0], x1)
        assert np.array_equal(x, x3[..., 0])
    else:
        assert np.array_equal(x, 1.0)
        assert np.array_equal(x, x1)
        assert np.array_equal(x, x3)
开发者ID:nengo,项目名称:nengo_deeplearning,代码行数:30,代码来源:test_signals.py


示例19: make_init_fn

 def make_init_fn(parameters):
   with tf.device(device):
     init_op = tf.variables_initializer(parameters)
   def init_fn(sess):
     tf.logging.info("Initializing model parameters.")
     sess.run(init_op)
   return init_fn
开发者ID:ALISCIFP,项目名称:models,代码行数:7,代码来源:problem_generator.py


示例20: restore

    def restore(self, sess, do_restore, at_step=-1):
        saver = self.saver
        name = self.net_name
        out_file = saver['out_file'].replace('\\', '/')
        latest_ckpt = tf.train.get_checkpoint_state(
            self.conf.cachedir, saver['ckpt_file'])
        if not latest_ckpt or not do_restore:
            start_at = 0
            sess.run(tf.variables_initializer(
                PoseTools.get_vars(name)),
                feed_dict=self.fd)
            print("Not loading {:s} variables. Initializing them".format(name))
        else:
            if at_step < 0:
                saver['saver'].restore(sess, latest_ckpt.model_checkpoint_path)
                match_obj = re.match(out_file + '-(\d*)', latest_ckpt.model_checkpoint_path)
                start_at = int(match_obj.group(1)) + 1
            else:
                aa = latest_ckpt.all_model_checkpoint_paths
                model_file = ''
                for a in aa:
                    match_obj = re.match(out_file + '-(\d*)', a)
                    step = int(match_obj.group(1))
                    if step >= at_step:
                        model_file = a
                        break
                saver['saver'].restore(sess, model_file)
                match_obj = re.match(out_file + '-(\d*)', model_file)
                start_at = int(match_obj.group(1)) + 1

        if self.dep_nets:
            self.dep_nets.restore_joint(sess, self.name, self.joint, do_restore)

        return start_at
开发者ID:mkabra,项目名称:poseTF,代码行数:34,代码来源:PoseCommon.py



注:本文中的tensorflow.variables_initializer函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.variance_scaling_initializer函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.variable_scope函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap