• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.fill函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.fill函数的典型用法代码示例。如果您正苦于以下问题:Python fill函数的具体用法?Python fill怎么用?Python fill使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了fill函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: thresholding

def thresholding(inputs):
    # find the mean for each example in the batch
    mean_output = tf.reduce_mean(inputs, axis=1)

    # scale each mean based on a factor
    threshold_scalar = tf.Variable(utils.threshold_scalar, tf.float32)
    scaled_mean = tf.scalar_mul(threshold_scalar, mean_output)
    scaled_mean = tf.reshape(scaled_mean, [utils.batch_size])

    # setup matrix for
    min_thresh_for_max = tf.fill([utils.batch_size], 0.05)
    max_thresh_for_min = tf.fill([utils.batch_size], 0.15)   #0.4
    thresholds = tf.maximum(min_thresh_for_max, scaled_mean)
    thresholds = tf.minimum(max_thresh_for_min, thresholds)

    # zero values under the thresholds using bitmask
    thresholds = tf.reshape(thresholds, [128, 1, 1])

    threshold_mask = tf.cast(tf.greater(inputs, thresholds), tf.float32)
    thresholded_input = tf.multiply(inputs, threshold_mask)

    # peak picking
    # select beats by x[i-1] < x[i] > x[i+1] (local maximum)
    x_minus_1 = tf.cast(tf.greater(thresholded_input, tf.manip.roll(thresholded_input, shift=-1, axis=1)), tf.float32)
    x_plus_1 = tf.cast(tf.greater(thresholded_input, tf.manip.roll(thresholded_input, shift=1, axis=1)), tf.float32)
    output = tf.multiply(x_minus_1, x_plus_1)


    return output
开发者ID:nearlyeveryone,项目名称:bpm,代码行数:29,代码来源:bpm_estimator.py


示例2: _create_state

    def _create_state(self, batch_size, dtype, cell_state=None):
        cand_symbols = tf.fill([batch_size, self.max_len],
                               tf.constant(self.start_token, dtype=tf.int32))
        cand_logprobs = tf.ones((batch_size,), dtype=tf.float32) * -float('inf')
        cand_symbols.set_shape([batch_size, self.max_len])

        if cell_state is None:
            cell_state = self.cell.zero_state(batch_size*self.beam_size, dtype=dtype)
        else:
            cell_state = BeamDecoder._tile_along_beam(self.beam_size, cell_state)
        full_size = batch_size * self.beam_size
        first_in_beam_mask = tf.equal(tf.range(full_size) % self.beam_size, 0)

        beam_symbols = tf.fill([full_size, self.max_len],
                               tf.constant(self.start_token, dtype=tf.int32))
        beam_logprobs = tf.select(
            first_in_beam_mask,
            tf.fill([full_size], 0.0),
            tf.fill([full_size], -1e18), # top_k does not play well with -inf
                                         # TODO: dtype-dependent value here
        )

        return (
            cand_symbols,
            cand_logprobs,
            beam_symbols,
            beam_logprobs,
            cell_state
        )
开发者ID:Calvin-L,项目名称:commandline-helper,代码行数:29,代码来源:beam_search.py


示例3: getLoss

 def getLoss(trueCosSim, falseCosSim, margin):
     zero = tf.fill(tf.shape(trueCosSim), 0.0)
     tfMargin = tf.fill(tf.shape(trueCosSim), margin)
     with tf.name_scope("loss"):
         losses = tf.maximum(zero, tf.subtract(tfMargin, tf.subtract(trueCosSim, falseCosSim)))
         loss = tf.reduce_sum(losses)
     return loss
开发者ID:sjqzhang,项目名称:QA,代码行数:7,代码来源:qaLSTMNet.py


示例4: compute_ans

 def compute_ans(op_embedding, comparison):
   op_embedding = tf.expand_dims(op_embedding, 0)
   #dot product of operation embedding with hidden state to the left of the number occurrence
   first = tf.transpose(
       tf.matmul(op_embedding,
                 tf.transpose(
                     tf.reduce_sum(hidden_vectors * tf.tile(
                         tf.expand_dims(
                             tf.transpose(self.batch_ordinal_question), 2),
                         [1, 1, self.utility.FLAGS.embedding_dims]), 0))))
   second = self.batch_question_number_one_mask + tf.transpose(
       tf.matmul(op_embedding,
                 tf.transpose(
                     tf.reduce_sum(hidden_vectors * tf.tile(
                         tf.expand_dims(
                             tf.transpose(self.batch_ordinal_question_one), 2
                         ), [1, 1, self.utility.FLAGS.embedding_dims]), 0))))
   question_number_softmax = tf.nn.softmax(tf.concat(axis=1, values=[first, second]))
   if (self.mode == "test"):
     cond = tf.equal(question_number_softmax,
                     tf.reshape(
                         tf.reduce_max(question_number_softmax, 1),
                         [self.batch_size, 1]))
     question_number_softmax = tf.where(
         cond,
         tf.fill(tf.shape(question_number_softmax), 1.0),
         tf.fill(tf.shape(question_number_softmax), 0.0))
     question_number_softmax = tf.cast(question_number_softmax,
                                       self.data_type)
   ans = tf.reshape(
       tf.reduce_sum(question_number_softmax * tf.concat(
           axis=1, values=[self.batch_question_number, self.batch_question_number_one]),
                     1), [self.batch_size, 1])
   return ans
开发者ID:Hukongtao,项目名称:models,代码行数:34,代码来源:model.py


示例5: language_model

def language_model(input, vocab_size):
  """Form p(x[0], ..., x[timesteps - 1]),

  \prod_{t=0}^{timesteps - 1} p(x[t] | x[:t]),

  To calculate the probability, we call log_prob on
  x = [x[0], ..., x[timesteps - 1]] given
  `input` = [0, x[0], ..., x[timesteps - 2]].

  We implement this separately from the generative model so the
  forward pass, e.g., embedding/dense layers, can be parallelized.

  [batch_size, timesteps] -> [batch_size, timesteps]
  """
  x = tf.one_hot(input, depth=vocab_size, dtype=tf.float32)
  h = tf.fill(tf.stack([tf.shape(x)[0], FLAGS.hidden_size]), 0.0)
  c = tf.fill(tf.stack([tf.shape(x)[0], FLAGS.hidden_size]), 0.0)
  hs = []
  reuse = None
  for t in range(FLAGS.timesteps):
    if t > 0:
      reuse = True
    xt = x[:, t, :]
    h, c = lstm_cell(xt, h, c, name="lstm", reuse=reuse)
    hs.append(h)

  h = tf.stack(hs, 1)
  logits = tf.layers.dense(h, vocab_size, name="dense")
  output = Categorical(logits=logits)
  return output
开发者ID:JoyceYa,项目名称:edward,代码行数:30,代码来源:lstm.py


示例6: testInitRequiredAssignAdd

 def testInitRequiredAssignAdd(self):
   with self.test_session():
     p = tf.Variable(tf.fill([1024, 1024], 1),
                            tf.int32)
     a = tf.assign_add(p, tf.fill([1024, 1024], 0))
     with self.assertRaisesOpError("use uninitialized"):
       a.op.run()
开发者ID:CdricGmd,项目名称:tensorflow,代码行数:7,代码来源:dense_update_ops_test.py


示例7: testParallelAssignWithLocking

  def testParallelAssignWithLocking(self):
    with self.test_session() as sess:
      zeros_t = tf.fill([1024, 1024], 0.0)
      ones_t = tf.fill([1024, 1024], 1.0)
      p = tf.Variable(zeros_t)
      assigns = [tf.assign(p, tf.mul(ones_t, float(i)),
                                  use_locking=True)
                 for i in range(1, 21)]
      p.initializer.run()

      def run_assign(assign_op):
        sess.run(assign_op)
      threads = [self.checkedThread(target=run_assign, args=(assign_op,))
                 for assign_op in assigns]
      for t in threads:
        t.start()
      for t in threads:
        t.join()

      vals = p.eval()

      # Assert every element is the same, and taken from one of the assignments.
      self.assertTrue(vals[0, 0] > 0)
      self.assertTrue(vals[0, 0] <= 20)
      self.assertAllEqual(vals, np.ones([1024, 1024]) * vals[0, 0])
开发者ID:CdricGmd,项目名称:tensorflow,代码行数:25,代码来源:dense_update_ops_test.py


示例8: _variance

  def _variance(self):
    # We need to put the tf.where inside the outer tf.where to ensure we never
    # hit a NaN in the gradient.
    denom = tf.where(tf.greater(self.df, 2.),
                     self.df - 2.,
                     tf.ones_like(self.df))
    # Abs(scale) superfluous.
    var = (tf.ones(self.batch_shape_tensor(), dtype=self.dtype) *
           tf.square(self.scale) * self.df / denom)
    # When 1 < df <= 2, variance is infinite.
    inf = np.array(np.inf, dtype=self.dtype.as_numpy_dtype())
    result_where_defined = tf.where(
        self.df > tf.fill(self.batch_shape_tensor(), 2.),
        var,
        tf.fill(self.batch_shape_tensor(), inf, name="inf"))

    if self.allow_nan_stats:
      nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
      return tf.where(
          tf.greater(
              self.df,
              tf.ones(self.batch_shape_tensor(), dtype=self.dtype)),
          result_where_defined,
          tf.fill(self.batch_shape_tensor(), nan, name="nan"))
    else:
      return control_flow_ops.with_dependencies(
          [
              tf.assert_less(
                  tf.ones([], dtype=self.dtype),
                  self.df,
                  message="variance not defined for components of df <= 1"),
          ],
          result_where_defined)
开发者ID:asudomoeva,项目名称:probability,代码行数:33,代码来源:student_t.py


示例9: get_online_sequences

def get_online_sequences(sequence_length, batch_size,
                         pattern_length=10):
    """Gets tensors which produce new random examples every time
    they are evaluated.

    Args:
        sequence_length: the length of the time-lag the model has to
            remember the sequence for.
        batch_size: how many at once.
        pattern_length: the length of the pattern that has to be
            remembered and regurgitated.

    Returns:
        (data, targets): data is
            `[sequence_length + 2*pattern_length, batch_size, 1]`, targets
            are also `[sequence_length + 2*pattern_length, batch_size, 1]`.
    """
    # first we need a pattern to remember
    pattern = tf.random_uniform([pattern_length, batch_size, 1], maxval=8,
                                dtype=tf.int32)
    central_fillers = tf.fill([sequence_length-1, batch_size, 1], 8)
    go = tf.fill([1, batch_size, 1], 9)
    final_fillers = tf.fill([pattern_length, batch_size, 1], 8)
    inputs = tf.concat(axis=0, values=[pattern, central_fillers, go, final_fillers])

    fillers = tf.fill([sequence_length+pattern_length, batch_size, 1], 8)
    targets = tf.concat(axis=0, values=[fillers, pattern])

    return inputs, targets
开发者ID:PFCM,项目名称:datasets,代码行数:29,代码来源:copy_pattern.py


示例10: add_model

  def add_model(self, input_data):
    """Adds a linear-layer plus a softmax transformation

    The core transformation for this model which transforms a batch of input
    data into a batch of predictions. In this case, the mathematical
    transformation effected is

    y = softmax(xW + b)

    Hint: Make sure to create tf.Variables as needed. Also, make sure to use
          tf.name_scope to ensure that your name spaces are clean.
    Hint: For this simple use-case, it's sufficient to initialize both weights W
          and biases b with zeros.

    Args:
      input_data: A tensor of shape (batch_size, n_features).
    Returns:
      out: A tensor of shape (batch_size, n_classes)
    """
    ### YOUR CODE HERE
    with tf.variable_scope("linear-transform"):
        weight = tf.Variable(tf.fill([self.config.n_features,self.config.n_classes],0.0))
        bias = tf.Variable(tf.fill([self.config.n_classes],0.0))
        z = tf.matmul(input_data,weight) + bias
        out = softmax(z)
    ### END YOUR CODE
    return out
开发者ID:kvfrans,项目名称:cs224-solutions,代码行数:27,代码来源:q1_classifier.py


示例11: _chain_backprop

def _chain_backprop(n):
  """Creates forward backward graph using tf.gradients.

  A0->A1->A2->..->An
    /    /       /
  B0<-B1<-B2<-..<-Bn
  """

  def forward(A0, n):
    """Takes A0, applies n operations to it, returns An."""

    A = A0
    for L in range(1, n+1): # op_i produces A_i
      A = tf.tanh(A, name="A"+str(L))
    return A

  def backward(A0, An, Bn, n):
    B0 = tf.gradients([An], [A0], grad_ys=[Bn])[0]
    return B0

  A0 = tf.fill((size,), 1.0, name="A0")
  An = forward(A0, n)
  Bn = tf.fill((size,), 1.0, name="Bn")
  B0 = tf.gradients([An], [A0], grad_ys=[Bn])[0]
  return B0
开发者ID:BhaskarNallani,项目名称:gradient-checkpointing,代码行数:25,代码来源:mem_util_test.py


示例12: make_hard_softmax

 def make_hard_softmax(self, softmax):
   #converts soft selection to hard selection. used at test time
   cond = tf.equal(
       softmax, tf.reshape(tf.reduce_max(softmax, 1), [self.batch_size, 1]))
   softmax = tf.where(
       cond, tf.fill(tf.shape(softmax), 1.0), tf.fill(tf.shape(softmax), 0.0))
   softmax = tf.cast(softmax, self.data_type)
   return softmax
开发者ID:Hukongtao,项目名称:models,代码行数:8,代码来源:model.py


示例13: testFillNegative

  def testFillNegative(self):
    with self.test_session():
      for shape in (-1,), (2, -1), (-1, 2):
        with self.assertRaises(ValueError):
          tf.fill(shape, 7)

      # Using a placeholder so this won't be caught in Python.
      dims = tf.placeholder(tf.int32)
      fill_t = tf.fill(dims, 3.0)
      for shape in (-1,), (2, -1), (-1, 2):
        with self.assertRaises(tf.errors.InvalidArgumentError):
          fill_t.eval({dims: shape})
开发者ID:4chin,项目名称:tensorflow,代码行数:12,代码来源:constant_op_test.py


示例14: LSTMBiasInit

def LSTMBiasInit(shape, dtype):
  """Returns ones for forget-gate, and zeros for the others."""
  shape = np.array(shape)

  # Check internal consistencies.
  assert shape.shape == (1,), shape
  assert shape[0] % 4 == 0, shape

  n = shape[0] // 4
  ones = tf.fill([n], tf.constant(1, dtype=dtype))
  zeros = tf.fill([3 * n], tf.constant(0, dtype=dtype))
  return tf.concat([ones, zeros], 0)
开发者ID:812864539,项目名称:models,代码行数:12,代码来源:blocks_lstm.py


示例15: testShapeFunctionEdgeCases

    def testShapeFunctionEdgeCases(self):
        # Non-vector dimensions.
        with self.assertRaises(ValueError):
            tf.fill([[0, 1], [2, 3]], 1.0)

        # Non-scalar value.
        with self.assertRaises(ValueError):
            tf.fill([3, 2], [1.0, 2.0])

        # Partial dimension information.
        f = tf.fill(tf.placeholder(tf.int32, shape=(4,)), 3.0)
        self.assertEqual([None, None, None, None], f.get_shape().as_list())
开发者ID:khellan,项目名称:tensorflow,代码行数:12,代码来源:constant_op_test.py


示例16: testAssignNonStrictShapeChecking

  def testAssignNonStrictShapeChecking(self):
    with self.test_session():
      data = tf.fill([1024, 1024], 0)
      p = tf.Variable([1])
      a = tf.assign(p, data, validate_shape=False)
      a.op.run()
      self.assertAllEqual(p.eval(), data.eval())

      # Assign to yet another shape
      data2 = tf.fill([10, 10], 1)
      a2 = tf.assign(p, data2, validate_shape=False)
      a2.op.run()
      self.assertAllEqual(p.eval(), data2.eval())
开发者ID:CdricGmd,项目名称:tensorflow,代码行数:13,代码来源:dense_update_ops_test.py


示例17: testInitialStateComputation

  def testInitialStateComputation(self, tuple_state, mask):
    if tuple_state:
      initial_state = (tf.fill([BATCH_SIZE, 6], 2),
                       (tf.fill([BATCH_SIZE, 7], 3),
                        tf.fill([BATCH_SIZE, 8], 4)))
    else:
      initial_state = tf.fill([BATCH_SIZE, 9], 10)

    trainable_state_module = snt.TrainableInitialState(initial_state, mask=mask)
    trainable_state = trainable_state_module()
    flat_trainable_state = nest.flatten(trainable_state)
    nest.assert_same_structure(initial_state, trainable_state)
    flat_initial_state = nest.flatten(initial_state)
    if mask is not None:
      flat_mask = nest.flatten(mask)
    else:
      flat_mask = (True,) * len(flat_initial_state)

    self.evaluate(tf.global_variables_initializer())

    # Check all variables are initialized correctly and return a state that
    # has the same as it is provided.
    for trainable_state, initial_state in zip(flat_trainable_state,
                                              flat_initial_state):
      self.assertAllEqual(
          self.evaluate(trainable_state), self.evaluate(initial_state))

    # Change the value of all the trainable variables to ones.
    for variable in tf.trainable_variables():
      self.evaluate(tf.assign(variable, tf.ones_like(variable)))

    # In eager mode to re-evaluate the module we must re-connect it.
    trainable_state = trainable_state_module()
    flat_trainable_state = nest.flatten(trainable_state)

    # Check that the values of the initial_states have changed if and only if
    # they are trainable.
    for trainable_state, initial_state, mask in zip(flat_trainable_state,
                                                    flat_initial_state,
                                                    flat_mask):
      trainable_state_value = self.evaluate(trainable_state)
      initial_state_value = self.evaluate(initial_state)
      if mask:
        expected_value = np.ones_like(initial_state_value)
      else:
        expected_value = initial_state_value

      self.assertAllEqual(trainable_state_value, expected_value)
开发者ID:ccchang0111,项目名称:sonnet,代码行数:48,代码来源:rnn_core_test.py


示例18: rnn_decoder

def rnn_decoder(decoder_inputs, initial_state, cell, word_dropout_keep_prob=1, replace_inp=None,
                loop_function=None, scope=None):
  """RNN decoder for the sequence-to-sequence model.

  Args:
    decoder_inputs: A list of 2D Tensors [batch_size x input_size].
    initial_state: 2D Tensor with shape [batch_size x cell.state_size].
    cell: rnn_cell.RNNCell defining the cell function and size.
    loop_function: If not None, this function will be applied to the i-th output
      in order to generate the i+1-st input, and decoder_inputs will be ignored,
      except for the first element ("GO" symbol). This can be used for decoding,
      but also for training to emulate http://arxiv.org/abs/1506.03099.
      Signature -- loop_function(prev, i) = next
        * prev is a 2D Tensor of shape [batch_size x output_size],
        * i is an integer, the step number (when advanced control is needed),
        * next is a 2D Tensor of shape [batch_size x input_size].
    scope: VariableScope for the created subgraph; defaults to "rnn_decoder".

  Returns:
    A tuple of the form (outputs, state), where:
      outputs: A list of the same length as decoder_inputs of 2D Tensors with
        shape [batch_size x output_size] containing generated outputs.
      state: The state of each cell at the final time-step.
        It is a 2D Tensor of shape [batch_size x cell.state_size].
        (Note that in some cases, like basic RNN cell or GRU cell, outputs and
         states can be the same. They are different for LSTM cells though.)
  """
  with variable_scope.variable_scope(scope or "rnn_decoder"):
    state = initial_state
    outputs = []
    prev = None
    seq_len = len(decoder_inputs)
    keep = tf.select(tf.random_uniform([seq_len]) < word_dropout_keep_prob,
            tf.fill([seq_len], True), tf.fill([seq_len], False))
    for i, inp in enumerate(decoder_inputs):
      if loop_function is not None and prev is not None:
        with variable_scope.variable_scope("loop_function", reuse=True):
          if word_dropout_keep_prob < 1:
            inp = tf.cond(keep[i], lambda: loop_function(prev, i), lambda: replace_inp)
          else:
            inp = loop_function(prev, i)
      if i > 0:
        variable_scope.get_variable_scope().reuse_variables()
      output, state = cell(inp, state)
      outputs.append(output)
      if loop_function is not None:
        prev = output
  return outputs, state
开发者ID:noble6emc2,项目名称:Question_Answering,代码行数:48,代码来源:seq2seq.py


示例19: BatchClipByL2norm

def BatchClipByL2norm(t, upper_bound, name=None):
  """Clip an array of tensors by L2 norm.

  Shrink each dimension-0 slice of tensor (for matrix it is each row) such
  that the l2 norm is at most upper_bound. Here we clip each row as it
  corresponds to each example in the batch.

  Args:
    t: the input tensor.
    upper_bound: the upperbound of the L2 norm.
    name: optional name.
  Returns:
    the clipped tensor.
  """

  assert upper_bound > 0
  with tf.op_scope([t, upper_bound], name, "batch_clip_by_l2norm") as name:
    saved_shape = tf.shape(t)
    batch_size = tf.slice(saved_shape, [0], [1])
    t2 = tf.reshape(t, tf.concat(0, [batch_size, [-1]]))
    upper_bound_inv = tf.fill(tf.slice(saved_shape, [0], [1]),
                              tf.constant(1.0/upper_bound))
    # Add a small number to avoid divide by 0
    l2norm_inv = tf.rsqrt(tf.reduce_sum(t2 * t2, [1]) + 0.000001)
    scale = tf.minimum(l2norm_inv, upper_bound_inv) * upper_bound
    clipped_t = tf.matmul(tf.diag(scale), t2)
    clipped_t = tf.reshape(clipped_t, saved_shape, name=name)
  return clipped_t
开发者ID:Peratham,项目名称:models,代码行数:28,代码来源:utils.py


示例20: calculate_reshape

def calculate_reshape(original_shape, new_shape, validate=False, name=None):
  """Calculates the reshaped dimensions (replacing up to one -1 in reshape)."""
  batch_shape_static = tensor_util.constant_value_as_shape(new_shape)
  if batch_shape_static.is_fully_defined():
    return np.int32(batch_shape_static.as_list()), batch_shape_static, []
  with tf.name_scope(name, "calculate_reshape", [original_shape, new_shape]):
    original_size = tf.reduce_prod(original_shape)
    implicit_dim = tf.equal(new_shape, -1)
    size_implicit_dim = (
        original_size // tf.maximum(1, -tf.reduce_prod(new_shape)))
    new_ndims = tf.shape(new_shape)
    expanded_new_shape = tf.where(  # Assumes exactly one `-1`.
        implicit_dim, tf.fill(new_ndims, size_implicit_dim), new_shape)
    validations = [] if not validate else [
        tf.assert_rank(
            original_shape, 1, message="Original shape must be a vector."),
        tf.assert_rank(new_shape, 1, message="New shape must be a vector."),
        tf.assert_less_equal(
            tf.count_nonzero(implicit_dim, dtype=tf.int32),
            1,
            message="At most one dimension can be unknown."),
        tf.assert_positive(
            expanded_new_shape, message="Shape elements must be >=-1."),
        tf.assert_equal(
            tf.reduce_prod(expanded_new_shape),
            original_size,
            message="Shape sizes do not match."),
    ]
    return expanded_new_shape, batch_shape_static, validations
开发者ID:lewisKit,项目名称:probability,代码行数:29,代码来源:batch_reshape.py



注:本文中的tensorflow.fill函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.floor函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.eye函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap