• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python embedding_ops.embedding_lookup函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.embedding_ops.embedding_lookup函数的典型用法代码示例。如果您正苦于以下问题:Python embedding_lookup函数的具体用法?Python embedding_lookup怎么用?Python embedding_lookup使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了embedding_lookup函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testAggregateGradients

  def testAggregateGradients(self):

    def fn(x):
      ind1 = constant_op.constant(np.array([0, 1]))
      ind2 = constant_op.constant(np.array([2, 3]))
      ind3 = constant_op.constant(np.array([1, 3]))
      # A mixture of IndexedSlices and dense tensor to aggregate.
      g1 = embedding_ops.embedding_lookup(x, ind1)
      g2 = embedding_ops.embedding_lookup(x, ind2)
      g3 = embedding_ops.embedding_lookup(x, ind3)
      g4 = math_ops.reduce_sum(x * constant_op.constant(2.0))
      return g1 * g2 * g3 * g4

    var_np = np.random.rand(4, 2).astype(np.float32)
    var = constant_op.constant(var_np)
    grad = backprop.gradients_function(fn, [0])(var)[0]
    grad = self.evaluate(ops.convert_to_tensor(grad))

    if not context.executing_eagerly():
      tf_var = array_ops.constant(var_np, dtypes.float32)
      tf_ind1 = array_ops.constant([0, 1])
      tf_ind2 = array_ops.constant([2, 3])
      tf_ind3 = array_ops.constant([1, 3])
      tf_g1 = embedding_ops.embedding_lookup(tf_var, tf_ind1)
      tf_g2 = embedding_ops.embedding_lookup(tf_var, tf_ind2)
      tf_g3 = embedding_ops.embedding_lookup(tf_var, tf_ind3)
      tf_g4 = math_ops.reduce_sum(tf_var * 2.0, axis=(0, 1))
      tf_y = tf_g1 * tf_g2 * tf_g3 * tf_g4
      tf_grad = gradients.gradients(tf_y, [tf_var])[0]

      tf_dense_grad = math_ops.unsorted_segment_sum(
          tf_grad.values, tf_grad.indices, tf_grad.dense_shape[0])

      self.assertAllClose(grad, self.evaluate(tf_dense_grad))
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:34,代码来源:backprop_test.py


示例2: testHigherRankMaxNorm

 def testHigherRankMaxNorm(self):
   np.random.seed(8)
   with self.cached_session():
     for params_shape in (12,), (6, 3), (6, 2, 3):
       # Test embedding rank 0, 1, 2.
       # Note: the first dimension must be a common multiple of procs below.
       params = 2 * np.ones(params_shape)
       params_norm = params / np.sqrt(
           np.sum(
               params * params, tuple(range(params.ndim)[1:]), keepdims=True))
       for ids_shape in (), (3), (4, 3), (2, 3, 4):
         ids = np.random.randint(
             params.shape[0], size=np.prod(ids_shape,
                                           dtype=np.int64)).reshape(ids_shape)
         # Compare nonsharded to gather
         simple = embedding_ops.embedding_lookup(
             params, ids, max_norm=1.0).eval()
         # assertAllClose is used here as different implementations of sqrt may
         # be used to compute each of the values being compared.  For example,
         # on AVX512 builds the embedding operation makes use of Eigen's fast
         # vectorized square root algorithm for doubles.  These different
         # implementations of sqrt are not guaranteed to produce exactly the
         # same results. Therefore, an exact comparison cannot be made.
         self.assertAllClose(simple, array_ops.gather(params_norm, ids).eval())
         # Run a few different sharded versions.
         for procs in 1, 2, 3:
           stride = procs * math_ops.range(params.shape[0] // procs)
           split_params = [
               array_ops.gather(params, stride + p) for p in xrange(procs)
           ]
           sharded = embedding_ops.embedding_lookup(
               split_params, ids, max_norm=1.0).eval()
           self.assertAllEqual(simple, sharded)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:33,代码来源:embedding_ops_test.py


示例3: testConstructionNonSharded

 def testConstructionNonSharded(self):
   with ops.Graph().as_default():
     p = variables.Variable(
         array_ops.zeros(
             shape=[100, 100], dtype=dtypes.float32))
     ids = constant_op.constant([0, 1, 1, 7], dtype=dtypes.int32)
     embedding_ops.embedding_lookup([p], ids)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:7,代码来源:embedding_ops_test.py


示例4: testAggregateGradients

  def testAggregateGradients(self):

    def fn(x):
      ind1 = tensor.Tensor(np.array([0, 1]))
      ind2 = tensor.Tensor(np.array([2, 3]))
      ind3 = tensor.Tensor(np.array([1, 3]))
      # A mixture of IndexedSlices and dense tensor to aggregate.
      g1 = embedding_ops.embedding_lookup(x, ind1)
      g2 = embedding_ops.embedding_lookup(x, ind2)
      g3 = embedding_ops.embedding_lookup(x, ind3)
      g4 = math_ops.reduce_sum(x * tensor.Tensor(2.0))
      return g1 * g2 * g3 * g4

    var_np = np.random.rand(4, 2).astype(np.float32)
    var = tensor.Tensor(var_np)
    grad = backprop.gradients_function(fn, [0])(var)[0]

    with context.graph_mode(), self.test_session():
      tf_var = array_ops.constant(var_np, dtypes.float32)
      tf_ind1 = array_ops.constant([0, 1])
      tf_ind2 = array_ops.constant([2, 3])
      tf_ind3 = array_ops.constant([1, 3])
      tf_g1 = embedding_ops.embedding_lookup(tf_var, tf_ind1)
      tf_g2 = embedding_ops.embedding_lookup(tf_var, tf_ind2)
      tf_g3 = embedding_ops.embedding_lookup(tf_var, tf_ind3)
      tf_g4 = math_ops.reduce_sum(tf_var * 2.0, reduction_indices=(0, 1))
      tf_y = tf_g1 * tf_g2 * tf_g3 * tf_g4
      tf_grad = gradients.gradients(tf_y, [tf_var])[0]

      tf_dense_grad = math_ops.unsorted_segment_sum(
          tf_grad.values, tf_grad.indices, tf_grad.dense_shape[0])

      self.assertAllClose(grad.numpy(), tf_dense_grad.eval())
开发者ID:chdinh,项目名称:tensorflow,代码行数:33,代码来源:backprop_test.py


示例5: testHigherRankMaxNorm

 def testHigherRankMaxNorm(self):
   np.random.seed(8)
   with self.test_session():
     for params_shape in (12,), (6, 3), (6, 2, 3):
       # Test embedding rank 0, 1, 2.
       # Note: the first dimension must be a common multiple of procs below.
       params = 2 * np.ones(params_shape)
       params_norm = params / np.sqrt(
           np.sum(
               params * params, tuple(range(params.ndim)[1:]), keepdims=True))
       for ids_shape in (), (3), (4, 3), (2, 3, 4):
         ids = np.random.randint(
             params.shape[0], size=np.prod(ids_shape,
                                           dtype=np.int64)).reshape(ids_shape)
         # Compare nonsharded to gather
         simple = embedding_ops.embedding_lookup(
             params, ids, max_norm=1.0).eval()
         self.assertAllEqual(simple, array_ops.gather(params_norm, ids).eval())
         # Run a few different sharded versions.
         for procs in 1, 2, 3:
           stride = procs * math_ops.range(params.shape[0] // procs)
           split_params = [
               array_ops.gather(params, stride + p) for p in xrange(procs)
           ]
           sharded = embedding_ops.embedding_lookup(
               split_params, ids, max_norm=1.0).eval()
           self.assertAllEqual(simple, sharded)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:27,代码来源:embedding_ops_test.py


示例6: body

    def body(it, cost):
      embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
      cost = control_flow_ops.cond(
          math_ops.equal(it, 3), lambda: math_ops.square(cost),
          (lambda: cost + math_ops.reduce_sum(embedding)))
      return it + 1, cost

      _, cost = control_flow_ops.while_loop(
          cond, body, [constant_op.constant(0),
                       constant_op.constant(0.0)])

      dynamic_grads = gradients_impl.gradients(cost, [embedding_matrix])[0]
      dynamic_grads = math_ops.segment_sum(dynamic_grads.values,
                                           dynamic_grads.indices)

      embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
      static = math_ops.square(
          math_ops.reduce_sum(embedding) + math_ops.reduce_sum(embedding) +
          math_ops.reduce_sum(embedding)) + math_ops.reduce_sum(embedding)
      static_grads = gradients_impl.gradients(static, [embedding_matrix])[0]
      static_grads = math_ops.segment_sum(static_grads.values,
                                          static_grads.indices)

      with self.cached_session():
        self.evaluate(variables.global_variables_initializer())
        self.assertAllEqual(*self.evaluate([static_grads, dynamic_grads]))
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:26,代码来源:control_flow_ops_test.py


示例7: fn

 def fn(x):
   ind1 = constant_op.constant(np.array([0, 1]))
   ind2 = constant_op.constant(np.array([2, 3]))
   ind3 = constant_op.constant(np.array([1, 3]))
   # A mixture of IndexedSlices and dense tensor to aggregate.
   g1 = embedding_ops.embedding_lookup(x, ind1)
   g2 = embedding_ops.embedding_lookup(x, ind2)
   g3 = embedding_ops.embedding_lookup(x, ind3)
   g4 = math_ops.reduce_sum(x * constant_op.constant(2.0))
   return g1 * g2 * g3 * g4
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:10,代码来源:backprop_test.py


示例8: testConstructionSharded

 def testConstructionSharded(self):
   with ops.Graph().as_default():
     p = []
     for _ in range(2):
       p += [
           variables.Variable(
               array_ops.zeros(shape=[100, 100], dtype=dtypes.float32))
       ]
       ids = constant_op.constant([0, 1, 1, 17], dtype=dtypes.int32)
     embedding_ops.embedding_lookup(p, ids)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:10,代码来源:embedding_ops_test.py


示例9: create_decoder

  def create_decoder(self):
    start_time = time.time()

    with vs.variable_scope("embedding" or scope):
      tokens = self.tokens[:-1]
      embeddings = []
      with tf.device("/cpu:0"):
        sqrt3 = np.sqrt(3)
        embedding = vs.get_variable(
            "embedding", [self.vocab_size, self.embedding_size],
            initializer=tf.random_uniform_initializer(-sqrt3, sqrt3))

        for token in tokens:
          # Create the embedding layer.
          emb = embedding_ops.embedding_lookup(embedding, token)
          emb.set_shape([self.batch_size, self.embedding_size])
          embeddings.append(emb)

    cell = rnn_cell.GRUCell(self.decoder_cell_size)
    cell = rnn_cell.OutputProjectionWrapper(cell, self.vocab_size)
    self.decoder_states = rnn.rnn(
        cell, embeddings, dtype=tf.float32, sequence_length=self.tokens_len)[0]
    self.logits = self.decoder_states

    print('create_decoder graph time %f' % (time.time() - start_time))
开发者ID:suriyadeepan,项目名称:tensorflow,代码行数:25,代码来源:lm.py


示例10: testAdamSparse

  def testAdamSparse(self):
    with ops.device('/cpu:0'):
      # Create 2-D embedding for 3 objects on CPU because sparse/sliced updates
      # are not implemented on TPU.
      embedding_matrix = resource_variable_ops.ResourceVariable(
          array_ops.ones([3, 2]))

    with self.test_scope():
      with backprop.GradientTape() as tape:
        embedding = embedding_ops.embedding_lookup(embedding_matrix, [1])
        y = math_ops.reduce_sum(embedding)
      dy_dx = tape.gradient(y, embedding_matrix)
      self.assertIsInstance(dy_dx, ops.IndexedSlices)
      optimizer = adam.AdamOptimizer(0.1)
      # The gradient application operations will run on CPU because optimizer
      # updates are always collocated with the variable.
      optimizer.apply_gradients([(dy_dx, embedding_matrix)])

      # This assign_add will run on CPU because when an input to an
      # operation is a resource, this operation is placed on the resource's
      # device by the eager runtime.
      embedding_matrix.assign_add(array_ops.ones([3, 2]))

    self.assertAllClose([[2.0, 2.0],
                         [1.9, 1.9],
                         [2.0, 2.0]], embedding_matrix.numpy())
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:26,代码来源:eager_test.py


示例11: calculate_loss_from_wals_model

 def calculate_loss_from_wals_model(self, wals_model, sp_inputs):
   current_rows = embedding_ops.embedding_lookup(
       wals_model.row_factors, math_ops.range(wals_model._input_rows),
       partition_strategy="div")
   current_cols = embedding_ops.embedding_lookup(
       wals_model.col_factors, math_ops.range(wals_model._input_cols),
       partition_strategy="div")
   row_wts = embedding_ops.embedding_lookup(
       wals_model._row_weights, math_ops.range(wals_model._input_rows),
       partition_strategy="div")
   col_wts = embedding_ops.embedding_lookup(
       wals_model._col_weights, math_ops.range(wals_model._input_cols),
       partition_strategy="div")
   return factorization_ops_test_utils.calculate_loss(
       sp_inputs, current_rows, current_cols, wals_model._regularization,
       wals_model._unobserved_weight, row_wts, col_wts)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:16,代码来源:factorization_ops_test.py


示例12: loop_function

  def loop_function(prev, i, log_beam_probs, beam_path, beam_symbols):
    if output_projection is not None:
      prev = nn_ops.xw_plus_b(
          prev, output_projection[0], output_projection[1])
    # prev= prev.get_shape().with_rank(2)[1]

    probs  = tf.log(tf.nn.softmax(prev))

    if i > 1:

        probs = tf.reshape(probs + log_beam_probs[-1],
                               [-1, beam_size * num_symbols])

    best_probs, indices = tf.nn.top_k(probs, beam_size)
    indices = tf.stop_gradient(tf.squeeze(tf.reshape(indices, [-1, 1])))
    best_probs = tf.stop_gradient(tf.reshape(best_probs, [-1, 1]))

    symbols = indices % num_symbols # Which word in vocabulary.
    beam_parent = indices // num_symbols # Which hypothesis it came from.


    beam_symbols.append(symbols)
    beam_path.append(beam_parent)
    log_beam_probs.append(best_probs)

    # Note that gradients will not propagate through the second parameter of
    # embedding_lookup.

    emb_prev = embedding_ops.embedding_lookup(embedding, symbols)
    emb_prev  = tf.reshape(emb_prev,[beam_size,embedding_size])
    # emb_prev = embedding_ops.embedding_lookup(embedding, symbols)
    if not update_embedding:
      emb_prev = array_ops.stop_gradient(emb_prev)
    return emb_prev
开发者ID:Vunb,项目名称:Neural_Conversation_Models,代码行数:34,代码来源:my_seq2seq.py


示例13: attention_decoder_with_embedding

def attention_decoder_with_embedding(decoder_inputs, initial_state, attention_states,
                                     cell, embedding, num_heads=1,
                                     output_size=None, dtype=dtypes.float32, scope=None,
                                     initial_state_attention=False):
    """
    We are not using output_projection because we are NOT using a sampled softmax

    Parameters
    ----------
    decoder_inputs
    initial_state
    attention_states
    cell
    embedding: outside embedding passed in
    num_heads
    output_size
    dtype
    scope
    initial_state_attention

    Returns
    -------

    """
    if output_size is None:
        output_size = cell.output_size

    with vs.variable_scope(scope or "attention_decoder_with_embedding"):
        emb_inp = [
            embedding_ops.embedding_lookup(embedding, i) for i in decoder_inputs]
        return attention_decoder(
            emb_inp, initial_state, attention_states, cell, output_size=output_size,
            num_heads=num_heads, loop_function=None,
            initial_state_attention=initial_state_attention)
开发者ID:windweller,项目名称:Trident,代码行数:34,代码来源:seq2seq.py


示例14: _tf_dec_embedding_attention_decoder

    def _tf_dec_embedding_attention_decoder(self, enc_out, decoder_input, last_state,
                                    cell, num_symbols, embedding_size, num_heads=1,
                                    output_size=None, output_projection=None,
                                    dtype=dtypes.float32,
                                    scope=None, src_mask=None, maxout_layer=False, encoder="reverse",
                                    start=None, init_const=False, bow_mask=None):
        """Decode single step version of tensorflow.models.rnn.seq2seq.embedding_attention_decoder
            """
        if output_size is None:
          output_size = cell.output_size
        if output_projection is not None:
          proj_weights = ops.convert_to_tensor(output_projection[0], dtype=dtype)
          proj_weights.get_shape().assert_is_compatible_with([cell.output_size,
                                                                num_symbols])   
          proj_biases = ops.convert_to_tensor(output_projection[1], dtype=dtype)
          proj_biases.get_shape().assert_is_compatible_with([num_symbols])

        with variable_scope.variable_scope(scope or "embedding_attention_decoder"):
          with ops.device("/cpu:0"):
            embedding = variable_scope.get_variable("embedding",
                                                    [num_symbols, embedding_size])
          emb_inp = embedding_ops.embedding_lookup(embedding, decoder_input)
          return self._tf_dec_attention_decoder(
              enc_out, emb_inp, last_state, cell, output_size=output_size,
              num_heads=num_heads, src_mask=src_mask, maxout_layer=maxout_layer, embedding_size=embedding_size,
              encoder=encoder, start=start, init_const=init_const, bow_mask=bow_mask)
开发者ID:ehasler,项目名称:tensorflow,代码行数:26,代码来源:tf_seq2seq.py


示例15: __call__

  def __call__(self, inputs, state, scope=None):
    """Run the cell on embedded inputs."""
    with vs.variable_scope(scope or type(self).__name__):  # "EmbeddingWrapper2"
      with ops.device("/cpu:0"):
        if self._initializer:
          initializer = self._initializer
        elif vs.get_variable_scope().initializer:
          initializer = vs.get_variable_scope().initializer
        else:
          # Default initializer for embeddings should have variance=1.
          sqrt3 = math.sqrt(3)  # Uniform(-sqrt(3), sqrt(3)) has variance=1.
          initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)
        embeddings = []
        for i in xrange(len(self._embedding_classes)):
            embeddings.append(vs.get_variable("embedding"+str(i), [self._embedding_classes[i],
                                                  self._embedding_sizes[i]],
                                    initializer=initializer))
        embedded = []
        for i in xrange(len(self._embedding_classes)):
            embedded.append(embedding_ops.embedding_lookup(
                  embeddings[i], array_ops.reshape(inputs[i], [-1])))

        finalEmbedded = tf.concat(1, embedded)

    return self._cell(finalEmbedded, state)
开发者ID:KentonMurray,项目名称:DeepDomers,代码行数:25,代码来源:MultipleInputEmbeddingWrapper.py


示例16: __call__

  def __call__(self, inputs, state, scope=None):
    """Run the cell on embedded inputs."""
    with vs.variable_scope(scope or type(self).__name__):  # "EmbeddingWrapper"
      with ops.device("/cpu:0"):
        if self._embedding:
          embedding = self._embedding
        else:
          if self._initializer:
            initializer = self._initializer
          elif vs.get_variable_scope().initializer:
            initializer = vs.get_variable_scope().initializer
          else:
            # Default initializer for embeddings should have variance=1.
            sqrt3 = math.sqrt(3)  # Uniform(-sqrt(3), sqrt(3)) has variance=1.
            initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)
          embedding = vs.get_variable("embedding", [self._embedding_classes,
                                                    self._cell.input_size],
                                      initializer=initializer)
        embedded = embedding_ops.embedding_lookup(
            embedding, array_ops.reshape(inputs, [-1]))

        """print (embedded)
        print ("{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}")"""

    return self._cell(embedded, state)
开发者ID:chagge,项目名称:repository,代码行数:25,代码来源:rnn_cell.py


示例17: testShardedDivPartitioningUnknownParamShape

  def testShardedDivPartitioningUnknownParamShape(self):
    with self.test_session():
      num_shards = 5
      vocab_size = 13
      # Embedding dimensions is 10. The vocab_size x 10 embedding
      # parameters are spread in num_shards matrices, so the first
      # 3 shards are 3 x 10 and the last 2 shards are 2 x 10.

      # We clear parameter shapes, to test when shape is not statically known.
      p, params, feed_dict = _EmbeddingParams(
          num_shards, vocab_size, use_shapeless_placeholder=True)

      num_vals = 30
      # Fetch num_vals embeddings for random word ids. Since
      # num_vals > vocab_size, this ought to have repetitions, so
      # will test that aspect.
      id_vals = np.random.randint(vocab_size, size=num_vals)
      ids = constant_op.constant(list(id_vals), dtype=dtypes.int64)

      embedding = embedding_ops.embedding_lookup(
          p, ids, partition_strategy="div")
      tf_result = embedding.eval(feed_dict=feed_dict)
    np_result, _, _ = _EmbeddingResult(
        params, id_vals, num_shards, vocab_size, partition_strategy="div")
    self.assertAllEqual(np_result, tf_result)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:25,代码来源:embedding_ops_test.py


示例18: testMinimizeSparseResourceVariable

 def testMinimizeSparseResourceVariable(self):
   for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
     with self.cached_session():
       var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
       var1 = resource_variable_ops.ResourceVariable([3.0], dtype=dtype)
       x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
       pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
       pred += var1
       loss = pred * pred
       sgd_op = gradient_descent.GradientDescentOptimizer(1.0).minimize(loss)
       # TODO(apassos) calling initialize_resources on all resources here
       # doesn't work because the sessions and graph are reused across unit
       # tests and this would mean trying to reinitialize variables. Figure out
       # a long-term solution for this.
       variables.global_variables_initializer().run()
       # Fetch params to validate initial values
       self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
       self.assertAllCloseAccordingToType([3.0], var1.eval())
       # Run 1 step of sgd
       sgd_op.run()
       # Validate updated params
       np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0
       np_grad = 2 * np_pred
       self.assertAllCloseAccordingToType(
           [[1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0]], var0.eval())
       self.assertAllCloseAccordingToType([3.0 - np_grad], var1.eval())
开发者ID:HughKu,项目名称:tensorflow,代码行数:26,代码来源:gradient_descent_test.py


示例19: __init__

  def __init__(self, embedding, start_tokens, end_token):
    """Initializer.

    Args:
      embedding: A callable that takes a vector tensor of `ids` (argmax ids),
        or the `params` argument for `embedding_lookup`. The returned tensor
        will be passed to the decoder input.
      start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
      end_token: `int32` scalar, the token that marks end of decoding.

    Raises:
      ValueError: if `start_tokens` is not a 1D tensor or `end_token` is not a
        scalar.
    """
    if callable(embedding):
      self._embedding_fn = embedding
    else:
      self._embedding_fn = (
          lambda ids: embedding_ops.embedding_lookup(embedding, ids))

    self._start_tokens = ops.convert_to_tensor(
        start_tokens, dtype=dtypes.int32, name="start_tokens")
    self._end_token = ops.convert_to_tensor(
        end_token, dtype=dtypes.int32, name="end_token")
    if self._start_tokens.get_shape().ndims != 1:
      raise ValueError("start_tokens must be a vector")
    self._batch_size = array_ops.size(start_tokens)
    if self._end_token.get_shape().ndims != 0:
      raise ValueError("end_token must be a scalar")
    self._start_inputs = self._embedding_fn(self._start_tokens)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:30,代码来源:helper.py


示例20: embedding_lookup_unique

def embedding_lookup_unique(params, ids, name=None):
  """Version of embedding_lookup that avoids duplicate lookups.

  This can save communication in the case of repeated ids.
  Same interface as embedding_lookup. Except it supports multi-dimensional `ids`
  which allows to not reshape input/output to fit gather.

  Args:
    params: A list of tensors with the same shape and type, or a
      `PartitionedVariable`. Shape `[index, d1, d2, ...]`.
    ids: A one-dimensional `Tensor` with type `int32` or `int64` containing
      the ids to be looked up in `params`. Shape `[ids1, ids2, ...]`.
    name: A name for this operation (optional).

  Returns:
    A `Tensor` with the same type as the tensors in `params` and dimension of
    `[ids1, ids2, d1, d2, ...]`.

  Raises:
    ValueError: If `params` is empty.
  """
  with ops.name_scope(name, "EmbeddingLookupUnique", [params, ids]):
    ids = ops.convert_to_tensor(ids)
    shape = array_ops.shape(ids)
    ids_flat = array_ops.reshape(
        ids, math_ops.reduce_prod(shape, keep_dims=True))
    unique_ids, idx = array_ops.unique(ids_flat)
    unique_embeddings = embedding_ops.embedding_lookup(params, unique_ids)
    embeds_flat = array_ops.gather(unique_embeddings, idx)
    embed_shape = array_ops.concat(
        [shape, array_ops.shape(unique_embeddings)[1:]], 0)
    embeds = array_ops.reshape(embeds_flat, embed_shape)
    embeds.set_shape(ids.get_shape().concatenate(
        unique_embeddings.get_shape()[1:]))
    return embeds
开发者ID:1000sprites,项目名称:tensorflow,代码行数:35,代码来源:embedding_ops.py



注:本文中的tensorflow.python.ops.embedding_ops.embedding_lookup函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python embedding_ops.embedding_lookup_sparse函数代码示例发布时间:2022-05-27
下一篇:
Python util.pick_vector函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap