• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.dynamic_partition函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.dynamic_partition函数的典型用法代码示例。如果您正苦于以下问题:Python dynamic_partition函数的具体用法?Python dynamic_partition怎么用?Python dynamic_partition使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了dynamic_partition函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _partition_and_stitch

    def _partition_and_stitch(self, args, func_name):
        """
        args is a list of tensors, to be passed to self.likelihoods.<func_name>

        args[-1] is the 'Y' argument, which contains the indexes to self.likelihoods.

        This function splits up the args using dynamic_partition, calls the
        relevant function on the likelihoods, and re-combines the result.
        """
        # get the index from Y
        Y = args[-1]
        ind = Y[:, -1]
        ind = tf.cast(ind, tf.int32)
        Y = Y[:, :-1]
        args[-1] = Y

        # split up the arguments into chunks corresponding to the relevant likelihoods
        args = zip(*[tf.dynamic_partition(X, ind, self.num_likelihoods) for X in args])

        # apply the likelihood-function to each section of the data
        with params_as_tensors_for(self, convert=False):
            funcs = [getattr(lik, func_name) for lik in self.likelihood_list]
        results = [f(*args_i) for f, args_i in zip(funcs, args)]

        # stitch the results back together
        partitions = tf.dynamic_partition(tf.range(0, tf.size(ind)), ind, self.num_likelihoods)
        results = tf.dynamic_stitch(partitions, results)

        return results
开发者ID:sanket-kamthe,项目名称:GPflow,代码行数:29,代码来源:likelihoods.py


示例2: loop

    def loop(q_, mask, mass_, found_):
        q_list = tf.dynamic_partition(q_, mask, 2)
        condition_indices = tf.dynamic_partition(tf.range(tf.shape(q_)[0]), mask, 2)  # 0 element it False,
        #  1 element if true

        p = q_list[1] * (1.0 - mass_) / tf.reduce_sum(q_list[1])
        p_new = tf.dynamic_stitch(condition_indices, [q_list[0], p])

        # condition verification and mask modification
        less_mask = tf.cast(tf.less(u, p_new), tf.int32)  # 0 when u is bigger than p, 1 when u is less than p
        condition_indices = tf.dynamic_partition(tf.range(tf.shape(p_new)[0]), less_mask,
                                                 2)  # 0 when u is bigger than p, 1 when u is less than p

        split_p_new = tf.dynamic_partition(p_new, less_mask, 2)
        split_u = tf.dynamic_partition(u, less_mask, 2)

        alpha = tf.dynamic_stitch(condition_indices, [split_p_new[0], split_u[1]])
        mass_ += tf.reduce_sum(split_u[1])

        mask = mask * (tf.ones_like(less_mask) - less_mask)

        found_ = tf.cond(tf.equal(tf.reduce_sum(less_mask), 0),
                         lambda: False,
                         lambda: True)

        alpha = tf.reshape(alpha, q_.shape)

        return alpha, mask, mass_, found_
开发者ID:RileyShe,项目名称:DeepPavlov,代码行数:28,代码来源:tf_csoftmax_attention.py


示例3: __call__

    def __call__(self, X):
        ind = tf.gather(tf.transpose(X), tf.shape(X)[1]-1)  # ind = X[:,-1]
        ind = tf.cast(ind, tf.int32)
        X = tf.transpose(tf.gather(tf.transpose(X), tf.range(0, tf.shape(X)[1]-1)))  # X = X[:,:-1]

        # split up X into chunks corresponding to the relevant likelihoods
        x_list = tf.dynamic_partition(X, ind, len(self.meanfunction_list))
        # apply the likelihood-function to each section of the data
        results = [m(x) for x, m in zip(x_list, self.meanfunction_list)]
        # stitch the results back together
        partitions = tf.dynamic_partition(tf.range(0, tf.size(ind)), ind, len(self.meanfunction_list))
        return tf.dynamic_stitch(partitions, results)
开发者ID:sanket-kamthe,项目名称:GPflow,代码行数:12,代码来源:mean_functions.py


示例4: split_apply_merge

def split_apply_merge(inp, partitions, fns):
    """Split input according to partitions.  Pass results through fns and merge.
  Args:
    inp: the input vector
    partitions: tensor of same length as input vector, having values 0, 1
    fns: the two functions.
  Returns:
    the vector routed, where routed[i] = fns[partitions[i]](inp[i])
  """
    new_inputs = tf.dynamic_partition(inp, partitions, len(fns))
    new_outputs = [fns[i](x) for i, x in enumerate(new_inputs)]
    new_indices = tf.dynamic_partition(tf.range(0, inp.get_shape()[0]), partitions, len(fns))
    return tf.dynamic_stitch(new_indices, new_outputs)
开发者ID:rudaoshi,项目名称:neuralmachines,代码行数:13,代码来源:reinforce.py


示例5: add_loss

def add_loss(graph, locations, confidences, batched_bboxes, batched_num_bboxes, bbox_priors, cfg):
  
  with graph.name_scope("loss"):
    # ground truth bounding boxes:
    # [batch_size, # of ground truth bounding boxes, 4]
    # we also need to know the number of ground truth bounding boxes for each image in the batch
    # (it can be different for each image...)
    # We could assume 1 for now.
    
    # Pass the locations, confidences, and ground truth labels to the matching function
    locations = tf.reshape(locations, [-1, 4])
    confidences = tf.reshape(confidences, [-1])
    
    # add the priors to the predicted residuals
    locations += tf.tile(bbox_priors, [cfg.BATCH_SIZE, 1])
    
    # add a small epsilon to the confidences
    confidences += small_epsilon
    
    # print "Shapes"
    # print locations.get_shape().as_list()
    # print confidences.get_shape().as_list()
    # print batched_bboxes.get_shape().as_list()
    # print batched_num_bboxes.get_shape().as_list()
    params = [locations, confidences, batched_bboxes, batched_num_bboxes, cfg.BATCH_SIZE, cfg.LOCATION_LOSS_ALPHA]
    matching, stacked_gt_bboxes = tf.py_func(compute_assignments, params, [tf.int32, tf.float32], name="bipartite_matching") 
    
    # matching: [num_predictions * batch_size] 0s and 1s for partitioning
    # stacked_gt_bboxes : [total number of gt bboxes for this batch, 4]
    
    # dynamic partition the bounding boxes and confidences into "positives" and "negatives"
    unmatched_locations, matched_locations = tf.dynamic_partition(locations, matching, 2)
    unmatched_confidences, matched_confidences = tf.dynamic_partition(confidences, matching, 2)
    
    # sum the norm from the "positive" bounding boxes 
    #loss = tf.nn.l2_loss(matched_locations - stacked_gt_bboxes)
    
    # sum the negative logs of the "positive" confidences
    #loss = loss - tf.reduce_sum(tf.log(matched_confidences)) + tf.reduce_sum(tf.log((1. - matched_confidences) + small_epsilon))
    
    # sum the negative logs of one minus the all of the confidences
    ###loss = loss - (1. / tf.cast(tf.reduce_sum(batched_num_bboxes), tf.float32) ) *  tf.reduce_sum(tf.log( 1. - confidences))
    #loss = loss -  tf.reduce_sum(tf.log( (1. - confidences) + small_epsilon))
    
    location_loss = cfg.LOCATION_LOSS_ALPHA * tf.nn.l2_loss(matched_locations - stacked_gt_bboxes)
    confidence_loss = -1. * tf.reduce_sum(tf.log(matched_confidences)) - tf.reduce_sum(tf.log((1. - unmatched_confidences) + small_epsilon))
    
    #loss = -1. * tf.reduce_sum(tf.log(matched_confidences)) - tf.reduce_sum(tf.log((1. - unmatched_confidences) + small_epsilon)) + cfg.LOCATION_LOSS_ALPHA * tf.nn.l2_loss(matched_locations - stacked_gt_bboxes)
  
  return location_loss, confidence_loss, matching
开发者ID:gvanhorn38,项目名称:inception,代码行数:50,代码来源:model.py


示例6: apply_factor

def apply_factor(tensor, *args, **kwargs):
    scope = kwargs.pop("scope", "")     
    with tf.name_scope(scope):
        n_args = len(args)

        if n_args is 0:
            tensor, output_size, error_symbol = tensor
            return one_hot(tensor, output_size, scope=scope)
        else:
            tensor, args = slice_out_int_literals(tensor, list(args))
            args, is_batched = make_batch_consistent(args)
            tensor, output_size, error_symbol = tensor

            # handle the case where all arguments were int literals
            tensor_dim_sizes = [dim.value for dim in tensor.get_shape()]
            if not tensor_dim_sizes:
                return one_hot(tensor, output_size, scope=scope)

            # Each arg is batch size x arg dim. Add dimensions to enable broadcasting.
            for i, arg in enumerate(args):
                for j in range(len(args)):
                    if j == i: continue
                    args[i] = tf.expand_dims(args[i], j + 1)

            # compute joint before tensor is applied
            joint = 0
            for arg in args:
                joint = joint + arg

            # prepare for unsorted_segment_sum
            joint = tf.reshape(joint, (-1, np.prod(tensor_dim_sizes)))
            joint = tf.transpose(joint, [1, 0])  # |tensor| x batch_size

            flat_tensor = tf.reshape(tensor, [-1])
            if error_symbol is not None:
                to_logsumexp = tf.dynamic_partition(joint, flat_tensor, output_size + 1)
                del to_logsumexp[error_symbol]
            else:
                to_logsumexp = tf.dynamic_partition(joint, flat_tensor, output_size)



            result = tf.pack(
                        map(lambda x : logsumexp(x, reduction_indices=0), to_logsumexp)
                    )

            result = tf.transpose(result, [1, 0])
            if not is_batched: result = tf.squeeze(result)
            return result
开发者ID:ml-lab,项目名称:TerpreT,代码行数:49,代码来源:terpret_tf_log_runtime.py


示例7: create_tensor

  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """ Perform M steps of set2set gather,
        detailed descriptions in: https://arxiv.org/abs/1511.06391 """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    # Extract atom_features
    atom_features = in_layers[0].out_tensor
    atom_split = in_layers[1].out_tensor

    self.c = tf.zeros((self.batch_size, self.n_hidden))
    self.h = tf.zeros((self.batch_size, self.n_hidden))

    for i in range(self.M):
      q_expanded = tf.gather(self.h, atom_split)
      e = tf.reduce_sum(atom_features * q_expanded, 1)
      e_mols = tf.dynamic_partition(e, atom_split, self.batch_size)
      # Add another value(~-Inf) to prevent error in softmax
      e_mols = [
          tf.concat([e_mol, tf.constant([-1000.])], 0) for e_mol in e_mols
      ]
      a = tf.concat([tf.nn.softmax(e_mol)[:-1] for e_mol in e_mols], 0)
      r = tf.segment_sum(tf.reshape(a, [-1, 1]) * atom_features, atom_split)
      # Model using this layer must set pad_batches=True
      q_star = tf.concat([self.h, r], axis=1)
      self.h, self.c = self.LSTMStep(q_star, self.c)

    out_tensor = q_star
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor
开发者ID:AhlamMD,项目名称:deepchem,代码行数:34,代码来源:graph_layers.py


示例8: smoothed_l1_loss

def smoothed_l1_loss(input_tensor):
    absval = tf.abs(input_tensor)
    ind = tf.to_int32(absval > 1)
    inner, outer = tf.dynamic_partition(absval, ind, 2)
    loss = tf.reduce_sum(0.5 * tf.square(inner)) + \
        tf.reduce_sum(outer - 0.5)
    return loss
开发者ID:yulongwang12,项目名称:tfplay,代码行数:7,代码来源:smoothed_l1_loss.py


示例9: __call__

  def __call__(self, *parents):
    # x = [atom_features, deg_slice, membership, deg_adj_list placeholders...]
    atom_features = parents[0].out_tensor

    # Extract graph topology
    membership = parents[2].out_tensor

    # Perform the mol gather

    assert (self.batch_size > 1, "graph_gather requires batches larger than 1")

    # Obtain the partitions for each of the molecules
    activated_par = tf.dynamic_partition(atom_features, membership,
                                         self.batch_size)

    # Sum over atoms for each molecule
    sparse_reps = [
        tf.reduce_sum(activated, 0, keep_dims=True)
        for activated in activated_par
    ]
    max_reps = [
        tf.reduce_max(activated, 0, keep_dims=True)
        for activated in activated_par
    ]

    # Get the final sparse representations
    sparse_reps = tf.concat(axis=0, values=sparse_reps)
    max_reps = tf.concat(axis=0, values=max_reps)
    mol_features = tf.concat(axis=1, values=[sparse_reps, max_reps])

    if self.activation_fn is not None:
      mol_features = self.activation_fn(mol_features)
    self.out_tensor = mol_features
    return mol_features
开发者ID:joegomes,项目名称:deepchem,代码行数:34,代码来源:layers.py


示例10: testScalarIndexOutOfRange

 def testScalarIndexOutOfRange(self):
   with self.test_session() as sess:
     bad = 17
     data = np.zeros(5)
     partitions = tf.dynamic_partition(data, bad, num_partitions=7)
     with self.assertRaisesOpError(r"partitions = 17 is not in \[0, 7\)"):
       sess.run(partitions)
开发者ID:debaratidas1994,项目名称:tensorflow,代码行数:7,代码来源:dynamic_partition_op_test.py


示例11: graph_gather

def graph_gather(atoms, membership_placeholder, batch_size):
  """
  Parameters
  ----------
  atoms: tf.Tensor
    Of shape (n_atoms, n_feat)
  membership_placeholder: tf.Placeholder
    Of shape (n_atoms,). Molecule each atom belongs to.
  batch_size: int
    Batch size for deep model.

  Returns
  -------
  tf.Tensor
    Of shape (batch_size, n_feat)
  """

  # WARNING: Does not work for Batch Size 1! If batch_size = 1, then use reduce_sum!
  assert (batch_size > 1, "graph_gather requires batches larger than 1")

  # Obtain the partitions for each of the molecules
  activated_par = tf.dynamic_partition(atoms, membership_placeholder,
                                       batch_size)

  # Sum over atoms for each molecule 
  sparse_reps = [
      tf.reduce_sum(activated, 0, keep_dims=True) for activated in activated_par
  ]

  # Get the final sparse representations
  sparse_reps = tf.concat(axis=0, values=sparse_reps)

  return sparse_reps
开发者ID:joegomes,项目名称:deepchem,代码行数:33,代码来源:layers.py


示例12: mmd_objective

def mmd_objective(z, s, sdim):
    """
    Compute the MMD from latent space and nuisance_id

    Notes:
    Reimplementation in tensorflow of the Variational Fair Autoencoder
    https://arxiv.org/abs/1511.00830
    """
    
    #mmd_method = mmd_rbf
    mmd_method = mmd_fourier
    
    z_dim = z.get_shape().as_list()[1]

    # STEP 1: construct lists of samples in their proper batches
    z_part = tf.dynamic_partition(z, s, sdim)

                
    # STEP 2: add noise to all of them and get the mmd
    mmd = 0
    for j, z_j in enumerate(z_part):
        z0_ = z_j
        aux_z0 = tf.random_normal([1, z_dim])  # if an S category does not have any samples
        z0 = tf.concat([z0_, aux_z0], 0)
        if len(z_part) == 2:
            z1_ = z_part[j + 1]
            aux_z1 = tf.random_normal((1, z_dim))
            z1 = tf.concat([z1_, aux_z1], axis=0)
            return mmd_method(z0, z1)
        z1 = z
        mmd += mmd_method(z0, z1)
    return mmd
开发者ID:ssehztirom,项目名称:scVI-reproducibility,代码行数:32,代码来源:scVI.py


示例13: call

  def call(self, x, mask=None):
    """Execute this layer on input tensors.

    x = [atom_features, membership]
    
    Parameters
    ----------
    x: list
      Tensors as listed above
    mask: bool, optional
      Ignored. Present only to shadow superclass call() method.

    Returns
    -------
    outputs: Tensor
      Tensor of molecular features
    """
    # Add trainable weights
    self.build()
    outputs = x[0]
    membership = x[1]

    if self.gaussian_expand:
      outputs = self.gaussian_histogram(outputs)

    outputs = tf.dynamic_partition(outputs, membership, self.batch_size)

    output_molecules = [tf.reduce_sum(molecule, 0) for molecule in outputs]

    output_molecules = tf.stack(output_molecules)
    if self.gaussian_expand:
      output_molecules = tf.matmul(output_molecules, self.W) + self.b
      output_molecules = self.activation(output_molecules)
    return output_molecules
开发者ID:joegomes,项目名称:deepchem,代码行数:34,代码来源:weave_layers.py


示例14: mol_conv_layer

def mol_conv_layer(atoms, cH_params, aux_params, layer):
    #Sum all neighbors using adjacency matrix
    atom_sum_neigh = sum_neigh(atoms, aux_params, layer)

    # Partition the atom matrix by degree of atoms
    # THIS CREATES PROBLEMS WITH GRADIENTS. NEED TO USE SLICING
    indices = tf.sub(deg_list_ph, tf.constant(1,dtype=tf.int32))
    
    atom_partitions = tf.dynamic_partition(atom_sum_neigh, indices, max_deg)

    # Get collection of modified atom features
    new_rel_atoms_collection = []
    for deg in range(1,6):
        # Obtain relevant atoms for this degree
        rel_atoms = atom_partitions[deg-1]

        # Apply hidden affine to relevant atoms and append
        if bool_separate_conv_depths:
            out = affine(rel_atoms, cH_params['W'+str(deg)+'_'+str(layer)], cH_params['b'+str(deg)+'_'+str(layer)])
        else:
            out = affine(rel_atoms, cH_params['W'+str(deg)], cH_params['b'+str(deg)])
        new_rel_atoms_collection.append(out)

    # Combine all atoms back into the list
    # NOTE: FOR NOW USE CONCATENATION. MEANS WE CANNOT USE ARBITARY deg_list ORDER
    hidden_atoms = tf.concat(0, new_rel_atoms_collection)

    # Apply relu
    activated_atoms = tf.nn.relu(hidden_atoms)

    return activated_atoms
开发者ID:rbharath,项目名称:deepchem,代码行数:31,代码来源:bondvolution.py


示例15: _build_graph

    def _build_graph(self):
        """Construct tensorflow nodes for round of clustering"""
        # N.B. without tf.Variable, makes awesome glitchy clustered images
        self.centroids_in = tf.Variable(tf.slice(tf.random_shuffle(self.arr),
                                     [0, 0], [self.k, -1]), name="centroids_in")
        # tiled should be shape(self.n_pixels, self.k, size_data = 2 + self.channels)
        tiled_pix = tf.tile(tf.expand_dims(self.arr, 1),
                            multiples=[1, self.k, 1], name="tiled_pix")

        # no need to take square root b/c positive reals and sqrt are isomorphic
        def radical_euclidean_dist(x, y):
            """Takes in 2 tensors and returns euclidean distance radical, i.e. dist**2"""
            with tf.name_scope("radical_euclidean"):
                return tf.square(tf.sub(x, y))

        # should be shape(self.n_pixels, self.k)
        distances = tf.reduce_sum(radical_euclidean_dist(tiled_pix, self.centroids_in),
                                  reduction_indices=2, name="distances")
        # should be shape(self.n_pixels)
        nearest = tf.to_int32(tf.argmin(distances, 1), name="nearest")

        # should be list of len self.k with tensors of shape(size_cluster, size_data)
        self.clusters = tf.dynamic_partition(self.arr, nearest, self.k)
        # should be shape(self.k, size_data)
        self.centroids = tf.pack([tf.reduce_mean(cluster, 0) for cluster in self.clusters],
            name="centroids_out")
        self.update_roids = tf.assign(self.centroids_in, self.centroids)
开发者ID:meereeum,项目名称:k-meanz,代码行数:27,代码来源:k_means_tf.py


示例16: testErrorIndexOutOfRange

 def testErrorIndexOutOfRange(self):
   with self.test_session() as sess:
     data = tf.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8],
                                  [9, 10, 11], [12, 13, 14]])
     indices = tf.constant([0, 2, 99, 2, 2])
     partitions = tf.dynamic_partition(data, indices, num_partitions=4)
     with self.assertRaisesOpError(r"partitions\[2\] = 99 is not in \[0, 4\)"):
       sess.run(partitions)
开发者ID:debaratidas1994,项目名称:tensorflow,代码行数:8,代码来源:dynamic_partition_op_test.py


示例17: pair_loss

def pair_loss(y_true, y_pred):
    y_true = tf.cast(y_true, tf.int32)
    parts = tf.dynamic_partition(y_pred, y_true, 2)
    y_pos = parts[1]
    y_neg = parts[0]
    y_pos = tf.expand_dims(y_pos, 0)
    y_neg = tf.expand_dims(y_neg, -1)
    out = K.sigmoid(y_neg - y_pos)
    return K.mean(out)
开发者ID:rafajak,项目名称:open-solution-talking-data,代码行数:9,代码来源:contrib.py


示例18: scatter_update

 def scatter_update(cls, factor, indices, values, sharding_func):
     """Helper function for doing sharded scatter update."""
     assert isinstance(factor, list)
     if len(factor) == 1:
         with ops.colocate_with(factor[0]):
             # TODO(agarwal): assign instead of scatter update for full batch update.
             return tf.scatter_update(factor[0], indices, values).op
     else:
         num_shards = len(factor)
         assignments, new_ids = sharding_func(indices)
         assert assignments is not None
         assignments = tf.cast(assignments, tf.int32)
         sharded_ids = tf.dynamic_partition(new_ids, assignments, num_shards)
         sharded_values = tf.dynamic_partition(values, assignments, num_shards)
         updates = []
         for i in xrange(num_shards):
             updates.append(tf.scatter_update(factor[i], sharded_ids[i], sharded_values[i]))
         return tf.group(*updates)
开发者ID:damienmg,项目名称:tensorflow,代码行数:18,代码来源:factorization_ops.py


示例19: _add_beam_tag_dynamic

    def _add_beam_tag_dynamic(self, feat_x, beam_path, cur_size):
        max_size = self.beam_size
        path_list = tf.dynamic_partition(beam_path, tf.range(cur_size), max_size)

        non_empty_path = [tf.cond(tf.less(tf.shape(e)[0], 1),
                         lambda : tf.zeros(shape=[0, self.window_size, self.dim_feat_x]),
                         lambda : self._add_tag_dynamic(feat_x, tf.reshape(e, [-1])))
               for e in path_list
               ]
        return tf.concat(0, non_empty_path)
开发者ID:staylonging,项目名称:tf,代码行数:10,代码来源:cnnglobal_back.py


示例20: update_centroids

def update_centroids(samples, centroids, num_clusters):
    # First, lets find the data samples closest to a centroid, then we update 
    # its value using all vectors within that cluster
    expanded_data_vectors = tf.expand_dims(samples, 0)
    expanded_centroids = tf.expand_dims(centroids, 1)
    distances = tf.reduce_sum( tf.square( tf.sub( expanded_data_vectors, expanded_centroids ) ), 2 )
    nearest_samples = tf.to_int32( tf.argmin(distances, 0) )
    partitioned_data = tf.dynamic_partition(samples, nearest_samples, num_clusters)
    new_centroids = tf.concat(0, [tf.expand_dims(tf.reduce_mean(partition, 0), 0) for partition in partitioned_data])
    return new_centroids
开发者ID:adithyaraghav,项目名称:TensorFlow,代码行数:10,代码来源:tf_k-means.py



注:本文中的tensorflow.dynamic_partition函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.dynamic_stitch函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.divide函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap