• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.assign_add函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.assign_add函数的典型用法代码示例。如果您正苦于以下问题:Python assign_add函数的具体用法?Python assign_add怎么用?Python assign_add使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了assign_add函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: loop_body

 def loop_body(i):
   asn1 = tf.assign_add(var_a, 1, name="a_add")
   with tf.control_dependencies([asn1]):
     asn2 = tf.assign_add(var_b, var_a, name="b_add")
   with tf.control_dependencies([asn2]):
     ni = tf.add(i, 1, name="i_add")
     return ni
开发者ID:hypatiad,项目名称:tensorflow,代码行数:7,代码来源:control_flow_ops_py_test.py


示例2: _apply_stats

    def _apply_stats(self, statsUpdates, accumulate=False, accumulateCoeff=0.):
        updateOps = []
        # obtain the stats var list
        for stats_var in statsUpdates:
            stats_new = statsUpdates[stats_var]
            if accumulate:
                # simple superbatch averaging
                update_op = tf.assign_add(
                    stats_var, accumulateCoeff * stats_new, use_locking=True)
            else:
                # exponential running averaging
                update_op = tf.assign(
                    stats_var, stats_var * self._stats_decay, use_locking=True)
                update_op = tf.assign_add(
                    update_op, (1. - self._stats_decay) * stats_new, use_locking=True)
            updateOps.append(update_op)

        with tf.control_dependencies(updateOps):
            stats_step_op = tf.assign_add(self.stats_step, 1)

        if KFAC_DEBUG:
            stats_step_op = (tf.Print(stats_step_op,
                                      [tf.convert_to_tensor('step:'),
                                       self.global_step,
                                       tf.convert_to_tensor('fac step:'),
                                       self.factor_step,
                                       tf.convert_to_tensor('sgd step:'),
                                       self.sgd_step,
                                       tf.convert_to_tensor('Accum:'),
                                       tf.convert_to_tensor(accumulate),
                                       tf.convert_to_tensor('Accum coeff:'),
                                       tf.convert_to_tensor(accumulateCoeff),
                                       tf.convert_to_tensor('stat step:'),
                                       self.stats_step, updateOps[0], updateOps[1]]))
        return [stats_step_op, ]
开发者ID:IcarusTan,项目名称:baselines,代码行数:35,代码来源:kfac.py


示例3: test_train_skip_train_if_max_step_already_saved

  def test_train_skip_train_if_max_step_already_saved(self):
    with tf.Graph().as_default() as g, self.test_session(g):
      with tf.control_dependencies(self._build_inference_graph()):
        train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
      learn.graph_actions._monitored_train(  # pylint: disable=protected-access
          g,
          output_dir=self._output_dir,
          train_op=train_op,
          loss_op=tf.constant(2.0),
          max_steps=10)
      step = checkpoints.load_variable(
          self._output_dir, tf.contrib.framework.get_global_step().name)
      self.assertEqual(10, step)

    with tf.Graph().as_default() as g, self.test_session(g):
      with tf.control_dependencies(self._build_inference_graph()):
        train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1)
      learn.graph_actions._monitored_train(  # pylint: disable=protected-access
          g,
          output_dir=self._output_dir,
          train_op=train_op,
          loss_op=tf.constant(2.0),
          max_steps=10)
      step = checkpoints.load_variable(
          self._output_dir, tf.contrib.framework.get_global_step().name)
      self.assertEqual(10, step)
开发者ID:MostafaGazar,项目名称:tensorflow,代码行数:26,代码来源:graph_actions_test.py


示例4: _eval_metric

def _eval_metric(input_, topk, correct_predictions, examples, phase):
    """Creates the standard tracking varibles if in test and returns accuracy."""
    my_parameters = {}
    if phase in (Phase.test, Phase.infer):
        dtype = tf.float32
        # Create the variables using tf.Variable because we don't want to share.
        count = tf.Variable(
            tf.constant(0, dtype=dtype),
            name="count_%d" % topk,
            collections=[bookkeeper.GraphKeys.TEST_VARIABLES],
            trainable=False,
        )
        correct = tf.Variable(
            tf.constant(0, dtype=dtype),
            name="correct_%d" % topk,
            collections=[bookkeeper.GraphKeys.TEST_VARIABLES],
            trainable=False,
        )
        my_parameters["count"] = count
        my_parameters["correct"] = correct
        with input_.g.device(count.device):
            examples = tf.assign_add(count, examples)
        with input_.g.device(correct.device):
            correct_predictions = tf.assign_add(correct, correct_predictions)
    return correct_predictions, examples, my_parameters
开发者ID:google,项目名称:prettytensor,代码行数:25,代码来源:pretty_tensor_loss_methods.py


示例5: __init__

    def __init__(self, epsilon=1e-2, shape=()):

        self._sum = tf.get_variable(
            dtype=tf.float64,
            shape=shape,
            initializer=tf.constant_initializer(0.0),
            name="runningsum", trainable=False)
        self._sumsq = tf.get_variable(
            dtype=tf.float64,
            shape=shape,
            initializer=tf.constant_initializer(epsilon),
            name="runningsumsq", trainable=False)
        self._count = tf.get_variable(
            dtype=tf.float64,
            shape=(),
            initializer=tf.constant_initializer(epsilon),
            name="count", trainable=False)
        self.shape = shape

        self.mean = tf.to_float(self._sum / self._count)
        self.std = tf.sqrt( tf.maximum( tf.to_float(self._sumsq / self._count) - tf.square(self.mean) , 1e-2 ))

        newsum = tf.placeholder(shape=self.shape, dtype=tf.float64, name='sum')
        newsumsq = tf.placeholder(shape=self.shape, dtype=tf.float64, name='var')
        newcount = tf.placeholder(shape=[], dtype=tf.float64, name='count')
        self.incfiltparams = U.function([newsum, newsumsq, newcount], [],
            updates=[tf.assign_add(self._sum, newsum),
                     tf.assign_add(self._sumsq, newsumsq),
                     tf.assign_add(self._count, newcount)])
开发者ID:IcarusTan,项目名称:baselines,代码行数:29,代码来源:mpi_running_mean_std.py


示例6: accumulate_privacy_spending

  def accumulate_privacy_spending(self, eps_delta, unused_sigma,
                                  num_examples):
    """Accumulate the privacy spending.

    Currently only support approximate privacy. Here we assume we use Gaussian
    noise on randomly sampled batch so we get better composition: 1. the per
    batch privacy is computed using privacy amplication via sampling bound;
    2. the composition is done using the composition with Gaussian noise.
    TODO(liqzhang) Add a link to a document that describes the bounds used.

    Args:
      eps_delta: EpsDelta pair which can be tensors.
      unused_sigma: the noise sigma. Unused for this accountant.
      num_examples: the number of examples involved.
    Returns:
      a TensorFlow operation for updating the privacy spending.
    """

    eps, delta = eps_delta
    with tf.control_dependencies(
        [tf.Assert(tf.greater(delta, 0),
                   ["delta needs to be greater than 0"])]):
      amortize_ratio = (tf.cast(num_examples, tf.float32) * 1.0 /
                        self._total_examples)
      # Use privacy amplification via sampling bound.
      # See Lemma 2.2 in http://arxiv.org/pdf/1405.7085v2.pdf
      # TODO(liqzhang) Add a link to a document with formal statement
      # and proof.
      amortize_eps = tf.reshape(tf.log(1.0 + amortize_ratio * (
          tf.exp(eps) - 1.0)), [1])
      amortize_delta = tf.reshape(amortize_ratio * delta, [1])
      return tf.group(*[tf.assign_add(self._eps_squared_sum,
                                      tf.square(amortize_eps)),
                        tf.assign_add(self._delta_sum, amortize_delta)])
开发者ID:ZhangShiyue,项目名称:models,代码行数:34,代码来源:accountant.py


示例7: test_summary_saver

 def test_summary_saver(self):
   with tf.Graph().as_default() as g, tf.Session() as sess:
     log_dir = 'log/dir'
     summary_writer = testing.FakeSummaryWriter(log_dir, g)
     var = tf.Variable(0.0)
     tensor = tf.assign_add(var, 1.0)
     summary_op = tf.scalar_summary('my_summary', tensor)
     global_step = tf.contrib.framework.get_or_create_global_step()
     train_op = tf.assign_add(global_step, 1)
     hook = tf.train.SummarySaverHook(
         summary_op=summary_op, save_steps=8, summary_writer=summary_writer)
     hook.begin()
     sess.run(tf.initialize_all_variables())
     mon_sess = monitored_session._HookedSession(sess, [hook])
     for i in range(30):
       _ = i
       mon_sess.run(train_op)
     hook.end(sess)
     summary_writer.assert_summaries(
         test_case=self,
         expected_logdir=log_dir,
         expected_graph=g,
         expected_summaries={
             1: {'my_summary': 1.0},
             9: {'my_summary': 2.0},
             17: {'my_summary': 3.0},
             25: {'my_summary': 4.0},
         })
开发者ID:KalraA,项目名称:tensorflow,代码行数:28,代码来源:basic_session_run_hooks_test.py


示例8: apply

    def apply(self, var_list):
        """Applies the running average to a list of variables
        Creates shadow variables and update op. Returns a grouped update op for
        all the averages in the list."""
        update_ops = []
        with tf.variable_scope('running_average'):
            for var in var_list:
                # add a shadow var that gets initialized to the same value
                # and a count to keep track of how many times it's been updated
                name = var.op.name
                count = tf.get_variable(
                    name+'_count', dtype=tf.float32,
                    initializer=tf.constant_initializer(0.0),
                    shape=[], trainable=False)
                shadow = tf.get_variable(
                    name+'_shadow', dtype=var.dtype,
                    initializer=var.initialized_value(),
                    collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES,
                                 tf.GraphKeys.VARIABLES],
                    trainable=False)
                # now make the update ops
                # increase the count
                count_update = tf.assign_add(count, 1.0)
                with tf.control_dependencies([count_update]):
                    difference = (var - shadow)/count
                    update = tf.assign_add(shadow, difference)
                update_ops.append(update)
                self.shadow_vars[var] = (shadow, count)

        return update_ops
开发者ID:PFCM,项目名称:482-project,代码行数:30,代码来源:averager.py


示例9: evaluate_precision_recall

def evaluate_precision_recall(
    input_layer, labels, threshold=0.5, per_example_weights=None, name=PROVIDED, phase=Phase.train
):
    """Computes the precision and recall of the prediction vs the labels.

  Args:
    input_layer: A Pretty Tensor object.
    labels: The target labels to learn as a float tensor.
    threshold: The threshold to use to decide if the prediction is true.
    per_example_weights: A Tensor with a weight per example.
    name: An optional name.
    phase: The phase of this model; non training phases compute a total across
      all examples.
  Returns:
    Precision and Recall.
  """
    _ = name  # Eliminate warning, name used for namescoping by PT.
    selected, sum_retrieved, sum_relevant = _compute_precision_recall(
        input_layer, labels, threshold, per_example_weights
    )

    if phase != Phase.train:
        dtype = tf.float32
        # Create the variables in all cases so that the load logic is easier.
        relevant_count = tf.get_variable(
            "relevant_count",
            [],
            dtype,
            tf.zeros_initializer,
            collections=[bookkeeper.GraphKeys.TEST_VARIABLES],
            trainable=False,
        )
        retrieved_count = tf.get_variable(
            "retrieved_count",
            [],
            dtype,
            tf.zeros_initializer,
            collections=[bookkeeper.GraphKeys.TEST_VARIABLES],
            trainable=False,
        )
        selected_count = tf.get_variable(
            "selected_count",
            [],
            dtype,
            tf.zeros_initializer,
            collections=[bookkeeper.GraphKeys.TEST_VARIABLES],
            trainable=False,
        )

        with input_layer.g.device(selected_count.device):
            selected = tf.assign_add(selected_count, selected)
        with input_layer.g.device(retrieved_count.device):
            sum_retrieved = tf.assign_add(retrieved_count, sum_retrieved)
        with input_layer.g.device(relevant_count.device):
            sum_relevant = tf.assign_add(relevant_count, sum_relevant)

    return (
        tf.select(tf.equal(sum_retrieved, 0), tf.zeros_like(selected), selected / sum_retrieved),
        tf.select(tf.equal(sum_relevant, 0), tf.zeros_like(selected), selected / sum_relevant),
    )
开发者ID:yaowenwu,项目名称:prettytensor,代码行数:60,代码来源:pretty_tensor_loss_methods.py


示例10: running_mean

def running_mean(cost, tag_name, batch_size=1):
    with tf.name_scope("running_mean_" + tag_name):
        with tf.variable_scope(tag_name):
            cost_sum = tf.get_variable(
              "cost_sum",
              initializer=tf.zeros_initializer,
              dtype=tf.float64,
              shape=(),
              collections=[tf.GraphKeys.LOCAL_VARIABLES],
              trainable=False)
            batches = tf.get_variable(
              "cost_num_batches",
              initializer=tf.zeros_initializer,
              dtype=tf.int32,
              shape=(),
              collections=[tf.GraphKeys.LOCAL_VARIABLES],
              trainable=False)

        cost_add = tf.assign_add(cost_sum, tf.cast(cost, dtype=tf.float64))
        batches_add = tf.assign_add(batches, batch_size)
        update_cost_mean = tf.group(cost_add, batches_add)

        reset_batches = tf.assign(batches, 0)
        reset_cost_sum = tf.assign(cost_sum, 0.0)
        reset_cost_mean = tf.group(reset_batches, reset_cost_sum)

        mean_cost = tf.divide(
          cost_sum,
          tf.cast(batches, dtype=tf.float64))
        train_loss_summary = tf.summary.scalar(tag_name, mean_cost)

    return reset_cost_mean, update_cost_mean, train_loss_summary
开发者ID:cupslab,项目名称:neural_network_cracking,代码行数:32,代码来源:pass_utils.py


示例11: loss

def loss(loss_value):
  """Calculates aggregated mean loss."""
  total_loss = tf.Variable(0.0, False)
  loss_count = tf.Variable(0, False)
  total_loss_update = tf.assign_add(total_loss, loss_value)
  loss_count_update = tf.assign_add(loss_count, 1)
  loss_op = total_loss / tf.cast(loss_count, tf.float32)
  return [total_loss_update, loss_count_update], loss_op
开发者ID:amygdala,项目名称:tensorflow-workshop,代码行数:8,代码来源:util.py


示例12: test_capture_variable

 def test_capture_variable(self):
     monitor = learn.monitors.CaptureVariable(var_name="my_assign_add:0", every_n=8, first_n=2)
     with tf.Graph().as_default() as g, self.test_session(g):
         var = tf.Variable(0.0, name="my_var")
         var.initializer.run()
         tf.assign_add(var, 1.0, name="my_assign_add")
         self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)
         self.assertEqual({0: 1.0, 1: 2.0, 2: 3.0, 10: 4.0, 18: 5.0, 26: 6.0, 29: 7.0}, monitor.values)
开发者ID:paolodedios,项目名称:tensorflow,代码行数:8,代码来源:monitors_test.py


示例13: train_one_epoch

def train_one_epoch(generator, discriminator,
                    generator_optimizer, discriminator_optimizer,
                    dataset, log_interval, noise_dim):
  """Trains `generator` and `discriminator` models on `dataset`.

  Args:
    generator: Generator model.
    discriminator: Discriminator model.
    generator_optimizer: Optimizer to use for generator.
    discriminator_optimizer: Optimizer to use for discriminator.
    dataset: Dataset of images to train on.
    log_interval: How many global steps to wait between logging and collecting
      summaries.
    noise_dim: Dimension of noise vector to use.
  """

  total_generator_loss = 0.0
  total_discriminator_loss = 0.0
  for (batch_index, images) in enumerate(tfe.Iterator(dataset)):
    with tf.device('/cpu:0'):
      tf.assign_add(tf.train.get_global_step(), 1)

    with tf.contrib.summary.record_summaries_every_n_global_steps(log_interval):
      current_batch_size = images.shape[0]
      noise = tf.random_uniform(shape=[current_batch_size, noise_dim],
                                minval=-1., maxval=1., seed=batch_index)

      with tfe.GradientTape(persistent=True) as g:
        generated_images = generator(noise)
        tf.contrib.summary.image('generated_images',
                                 tf.reshape(generated_images, [-1, 28, 28, 1]),
                                 max_images=10)

        discriminator_gen_outputs = discriminator(generated_images)
        discriminator_real_outputs = discriminator(images)
        discriminator_loss_val = discriminator_loss(discriminator_real_outputs,
                                                    discriminator_gen_outputs)
        total_discriminator_loss += discriminator_loss_val

        generator_loss_val = generator_loss(discriminator_gen_outputs)
        total_generator_loss += generator_loss_val

      generator_grad = g.gradient(generator_loss_val, generator.variables)
      discriminator_grad = g.gradient(discriminator_loss_val,
                                      discriminator.variables)

      with tf.variable_scope('generator'):
        generator_optimizer.apply_gradients(zip(generator_grad,
                                                generator.variables))
      with tf.variable_scope('discriminator'):
        discriminator_optimizer.apply_gradients(zip(discriminator_grad,
                                                    discriminator.variables))

      if log_interval and batch_index > 0 and batch_index % log_interval == 0:
        print('Batch #%d\tAverage Generator Loss: %.6f\t'
              'Average Discriminator Loss: %.6f' % (
                  batch_index, total_generator_loss/batch_index,
                  total_discriminator_loss/batch_index))
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:58,代码来源:mnist.py


示例14: accuracy

def accuracy(logits, labels):
  """Calculates aggregated accuracy."""
  is_correct = tf.nn.in_top_k(logits, labels, 1)
  correct = tf.reduce_sum(tf.cast(is_correct, tf.int32))
  incorrect = tf.reduce_sum(tf.cast(tf.logical_not(is_correct), tf.int32))
  correct_count = tf.Variable(0, False)
  incorrect_count = tf.Variable(0, False)
  correct_count_update = tf.assign_add(correct_count, correct)
  incorrect_count_update = tf.assign_add(incorrect_count, incorrect)
  accuracy_op = tf.cast(correct_count, tf.float32) / tf.cast(
      correct_count + incorrect_count, tf.float32)
  return [correct_count_update, incorrect_count_update], accuracy_op
开发者ID:amygdala,项目名称:tensorflow-workshop,代码行数:12,代码来源:util.py


示例15: setUp

  def setUp(self):
    tf.test.TestCase.setUp(self)

    self.log_dir = 'log/dir'
    self.summary_writer = testing.FakeSummaryWriter(self.log_dir)

    var = tf.Variable(0.0)
    tensor = tf.assign_add(var, 1.0)
    self.summary_op = tf.summary.scalar('my_summary', tensor)

    global_step = tf.contrib.framework.get_or_create_global_step()
    self.train_op = tf.assign_add(global_step, 1)
开发者ID:brchiu,项目名称:tensorflow,代码行数:12,代码来源:basic_session_run_hooks_test.py


示例16: advance_counters

  def advance_counters(self, total):
    """Returns ops to advance the per-component step and total counters.

    Args:
      total: Total number of actions to increment counters by.

    Returns:
      tf.Group op incrementing 'step' by 1 and 'total' by total.
    """
    update_total = tf.assign_add(self._total, total, use_locking=True)
    update_step = tf.assign_add(self._step, 1, use_locking=True)
    return tf.group(update_total, update_step)
开发者ID:knathanieltucker,项目名称:models,代码行数:12,代码来源:component.py


示例17: session_run_job

    def session_run_job():
      with tf.Session() as sess:
        a = tf.Variable(10, dtype=tf.int32, name='a')
        b = tf.Variable(20, dtype=tf.int32, name='b')
        d = tf.constant(1, dtype=tf.int32, name='d')
        inc_a = tf.assign_add(a, d, name='inc_a')
        inc_b = tf.assign_add(b, d, name='inc_b')
        inc_ab = tf.group([inc_a, inc_b], name="inc_ab")

        sess.run(tf.global_variables_initializer())

        sess = tf_debug.TensorBoardDebugWrapperSession(sess, self._debugger_url)
        session_run_results.append(sess.run(inc_ab))
开发者ID:jtagscherer,项目名称:tensorboard,代码行数:13,代码来源:interactive_debugger_plugin_test.py


示例18: test_train_loss

 def test_train_loss(self):
   with tf.Graph().as_default() as g, self.test_session(g):
     tf.contrib.framework.create_global_step()
     loss_var = tf.contrib.framework.local_variable(10.0)
     train_op = tf.group(
         tf.assign_add(tf.contrib.framework.get_global_step(), 1),
         tf.assign_add(loss_var, -1.0))
     self._assert_summaries(self._output_dir)
     loss = learn.graph_actions.train(
         g, output_dir=self._output_dir, train_op=train_op,
         loss_op=loss_var.value(), steps=6)
     self.assertEqual(4.0, loss)
     self._assert_summaries(self._output_dir, expected_graphs=[g])
开发者ID:285219011,项目名称:hello-world,代码行数:13,代码来源:graph_actions_test.py


示例19: testPlateauOpHook

  def testPlateauOpHook(self):
    global_step = tf.train.create_global_step()
    counter = tf.get_variable("count", initializer=0, dtype=tf.int32)
    indicator = tf.get_variable("indicator", initializer=0, dtype=tf.int32)
    tf.summary.scalar("count", counter)
    incr_global_step = tf.assign_add(global_step, 1)
    incr_counter = tf.assign_add(counter, 1)
    incr_indicator = tf.assign_add(indicator, 1)

    # Stop if the global step has not gone up by more than 1 in 20 steps.

    ckpt_dir = self.ckpt_dir("plateauop")
    stop_hook = metrics_hook.PlateauOpHook(
        ckpt_dir,
        "count_1",
        incr_indicator,
        num_plateau_steps=20,
        plateau_delta=1.,
        plateau_decrease=False,
        every_n_steps=10)
    with self.sess(stop_hook, ckpt_dir) as sess:
      for _ in range(20):
        sess.run((incr_global_step, incr_counter))

      # Summary files should now have 2 values in them
      self.flush()

      # Run for more steps so that the hook gets triggered and we verify that we
      # don't stop.
      for _ in range(30):
        sess.run((incr_global_step, incr_counter))

      self.flush()

      # Run without incrementing the counter
      for _ in range(30):
        sess.run(incr_global_step)
      self.flush()

      self.assertTrue(sess.run(indicator) < 1)

      # Metrics should be written such that now the counter has gone >20 steps
      # without being incremented.
      # Check that we run the incr_indicator op several times
      for _ in range(3):
        for _ in range(10):
          sess.run(incr_global_step)
        self.flush()

      self.assertTrue(sess.run(indicator) > 1)
开发者ID:kltony,项目名称:tensor2tensor,代码行数:50,代码来源:metrics_hook_test.py


示例20: setUp

 def setUp(self):
   self.model_dir = tempfile.mkdtemp()
   self.graph = tf.Graph()
   with self.graph.as_default():
     self.scaffold = monitored_session.Scaffold()
     self.global_step = tf.contrib.framework.get_or_create_global_step()
     self.train_op = tf.assign_add(self.global_step, 1)
开发者ID:KalraA,项目名称:tensorflow,代码行数:7,代码来源:basic_session_run_hooks_test.py



注:本文中的tensorflow.assign_add函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.assign_sub函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.assign函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap