• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python tensorflow.map_fn函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.map_fn函数的典型用法代码示例。如果您正苦于以下问题:Python map_fn函数的具体用法?Python map_fn怎么用?Python map_fn使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了map_fn函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testMapSparseTensor

 def testMapSparseTensor(self):
     with self.test_session():
         with self.assertRaises(TypeError):
             tf.map_fn(
                 lambda x: x,
                 tf.SparseTensor(indices=[[0, 0], [0, 1], [1, 0]], values=tf.constant([0, 1, 2]), shape=[2, 2]),
             )
开发者ID:botonchou,项目名称:tensorflow,代码行数:7,代码来源:functional_ops_test.py


示例2: testMap_Scoped

  def testMap_Scoped(self):
    with self.test_session() as sess:

      def double_scoped(x):
        """2x with a dummy 2 that is scoped."""
        with tf.variable_scope("body"):
          # Dummy variable, just to check that scoping works as intended.
          two = tf.get_variable("two", [], dtype=tf.int32,
                                initializer=tf.constant_initializer(2))
          return tf.mul(x, two)

      with tf.variable_scope("root") as varscope:
        elems = tf.constant([1, 2, 3, 4, 5, 6], name="data")
        doubles = np.array([2*x for x in [1, 2, 3, 4, 5, 6]])

        r = tf.map_fn(double_scoped, elems)
        # Check that we have the one variable we asked for here.
        self.assertEqual(len(tf.trainable_variables()), 1)
        self.assertEqual(tf.trainable_variables()[0].name, "root/body/two:0")
        sess.run([tf.initialize_all_variables()])
        self.assertAllEqual(doubles, r.eval())

        # Now let's reuse our single variable.
        varscope.reuse_variables()
        r = tf.map_fn(double_scoped, elems)
        self.assertEqual(len(tf.trainable_variables()), 1)
        self.assertAllEqual(doubles, r.eval())
开发者ID:285219011,项目名称:hello-world,代码行数:27,代码来源:functional_ops_test.py


示例3: alexnet

def alexnet(image, batch_size=50):
    """ Canonical alexnet implementation. See the network detail above
    Arg:
        images: pixels value in an array (transformed to a tensor)

    Return:
        logits
    """
    x_image = tf.reshape(image, [-1, 32, 32, 3])

    # Randomly crop a [height, width] section of the image.
    # distorted_image = tf.random_crop(x_image, [height, width, 3])

    # Randomly flip the image horizontally.
    # distorted_image = tf.image.random_flip_left_right(x_image)
    distorted_image = tf.map_fn(lambda image: tf.image.random_flip_left_right(image), x_image)

    # Because these operations are not commutative, consider randomizing
    # the order their operation.
    # NOTE: since per_image_standardization zeros the mean and makes
    # the stddev unit, this likely has no effect see tensorflow#1458.
    distorted_image = tf.map_fn(lambda image: tf.image.random_brightness(image, max_delta=63), distorted_image)
    distorted_image = tf.map_fn(lambda image: tf.image.random_contrast(image, lower=0.2, upper=1.8), distorted_image)

    # Subtract off the mean and divide by the variance of the pixels.
    float_image = tf.map_fn(lambda image: tf.image.per_image_standardization(image), distorted_image)

    # conv1
    conv1 = conv_layer(float_image, 3, 64, "conv1")

    #poo1
    pool1 = tf.nn.max_pool(conv1, ksize=[1,3,3,1], strides=[1,2,2,1], padding='SAME')

    #norm1
    norm1 = tf.nn.lrn(pool1, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

    #conv2
    W_conv2 = weight_variable([5,5,64,64])
    conv2 = conv_layer(norm1, 64, 64, "conv2")

    #norm2
    norm2 = tf.nn.lrn(conv2, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)

    #poo2
    pool2 = tf.nn.max_pool(norm2, ksize=[1,3,3,1], strides=[1,2,2,1], padding='SAME')

    #fc1, fully connected layer
    reshape = tf.reshape(pool2, [-1, 8 * 8 * 64])
    fc1 = fc_layer(reshape, 8*8*64, 1024, "fc1")

    #local4
    # weights = weight_variable([384, 192])
    # biases = bias_variable([192])
    # local4 = tf.nn.relu(tf.matmul(local3, weights) + biases)


    # linear layer(WX + b),
    logits = fc_layer(fc1, 1024, 10, "output_layer", act=tf.identity)

    return logits
开发者ID:walkerlala,项目名称:walkerlala.github.io,代码行数:60,代码来源:cifar10_alexnet.py


示例4: parse_sequence_to_pairs_batch

def parse_sequence_to_pairs_batch(
    serialized_example, preprocess_fn, is_training, num_views, batch_size,
    window):
  """Parses a serialized sequence example into a batch of preprocessed data.

  Args:
    serialized_example: A serialized SequenceExample.
    preprocess_fn: A function with the signature (raw_images, is_training) ->
      preprocessed_images.
    is_training: Boolean, whether or not we're in training.
    num_views: Int, the number of simultaneous viewpoints at each timestep in
      the dataset.
    batch_size: Int, size of the batch to get.
    window: Int, only take pairs from a maximium window of this size.
  Returns:
    preprocessed: A 4-D float32 `Tensor` holding preprocessed images.
    anchor_images: A 4-D float32 `Tensor` holding raw anchor images.
    pos_images: A 4-D float32 `Tensor` holding raw positive images.
  """
  _, views, seq_len = parse_sequence_example(serialized_example, num_views)

  # Get random (anchor, positive) timestep and viewpoint indices.
  num_pairs = batch_size // 2
  ap_time_indices, a_view_indices, p_view_indices = get_tcn_anchor_pos_indices(
      seq_len, num_views, num_pairs, window)

  # Gather the image strings.
  combined_anchor_indices = tf.concat(
      [tf.expand_dims(a_view_indices, 1),
       tf.expand_dims(ap_time_indices, 1)], 1)
  combined_pos_indices = tf.concat(
      [tf.expand_dims(p_view_indices, 1),
       tf.expand_dims(ap_time_indices, 1)], 1)
  anchor_images = tf.gather_nd(views, combined_anchor_indices)
  pos_images = tf.gather_nd(views, combined_pos_indices)

  # Decode images.
  anchor_images = tf.map_fn(
      preprocessing.decode_image, anchor_images, dtype=tf.float32)
  pos_images = tf.map_fn(
      preprocessing.decode_image, pos_images, dtype=tf.float32)

  # Concatenate [anchor, postitive] images into a batch and preprocess it.
  concatenated = tf.concat([anchor_images, pos_images], 0)
  preprocessed = preprocess_fn(concatenated, is_training)
  anchor_prepro, positive_prepro = tf.split(preprocessed, num_or_size_splits=2,
                                            axis=0)

  # Set static batch dimensions for all image tensors
  ims = [anchor_prepro, positive_prepro, anchor_images, pos_images]
  ims = [set_image_tensor_batch_dim(i, num_pairs) for i in ims]
  [anchor_prepro, positive_prepro, anchor_images, pos_images] = ims

  # Assign each anchor and positive the same label.
  anchor_labels = tf.range(1, num_pairs+1)
  positive_labels = tf.range(1, num_pairs+1)

  return (anchor_prepro, positive_prepro, anchor_images, pos_images,
          anchor_labels, positive_labels, seq_len)
开发者ID:danabo,项目名称:models,代码行数:59,代码来源:data_providers.py


示例5: testMap_MultiOutputMismatchedDtype

 def testMap_MultiOutputMismatchedDtype(self):
   with self.test_session():
     nums = np.array([1, 2, 3, 4, 5, 6])
     with self.assertRaisesRegexp(
         TypeError, r"two structures don't have the same sequence type."):
       # lambda emits tuple, but dtype is a list
       tf.map_fn(lambda x: ((x + 3) * 2, -(x + 3) * 2), nums,
                 dtype=[tf.int64, tf.int64])
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:8,代码来源:functional_ops_test.py


示例6: loss

    def loss(self, logits, labels, weights=None, biases=None):
        with tf.name_scope("xent"):
            loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, labels))

            l2_reg = 0.0
            if weights:
                l2_reg += sum(tf.map_fn(tf.nn.l2_loss, weights))
            if biases:
                l2_reg += sum(tf.map_fn(tf.nn.l2_loss, biases))
            loss += self.l2_lambda * l2_reg
        return loss
开发者ID:tech-team,项目名称:sentiment,代码行数:11,代码来源:cnn.py


示例7: compare_vs_map

def compare_vs_map(X1, X2, kern, sess):
    K12_map = tf.map_fn(lambda x: kern.K(x[0], x[1]), [X1, X2], dtype=settings.float_type)
    K12_native = kern.K(X1, X2)
    assert_allclose(*sess.run([K12_map, K12_native]))

    K_map = tf.map_fn(kern.K, X1, dtype=settings.float_type)
    K_native = kern.K(X1)
    assert_allclose(*sess.run([K_map, K_native]))

    Kdiag_map = tf.map_fn(kern.Kdiag, X1, dtype=settings.float_type)
    Kdiag_native = kern.Kdiag(X1)
    assert_allclose(*sess.run([Kdiag_map, Kdiag_native]))
开发者ID:sanket-kamthe,项目名称:GPflow,代码行数:12,代码来源:test_broadcasting.py


示例8: transpose

def transpose(images):
    """Transpose an image/images by swapping the first and second dimension.
    (A mirror to tf.image transpose_image)

    Args:
        images: 4-D Tensor of shape `[batch, height, width, channels]` or
            3-D Tensor of shape `[height, width, channels]`.

    Returns:
        If `image` was 4-D, a 4-D float Tensor of shape
        `[batch, target_height, target_width, channels]`
        If `image` was 3-D, a 3-D float Tensor of shape
        `[target_height, target_width, channels]

    Raises:
        ValueError: if the shape of `image` not supported.
    """
    images_shape = get_shape(images)
    if len(images_shape) > 4:
        ValueError("'image' must have either 3 or 4 dimensions, "
                   "received `{}`.".format(images_shape))

    if len(images_shape) == 4:
        return tf.map_fn(lambda img: tf.image.transpose_image(img), images)

    return tf.image.transpose_image(images)
开发者ID:AlexMikhalev,项目名称:polyaxon,代码行数:26,代码来源:image.py


示例9: build_inputs_and_outputs

  def build_inputs_and_outputs(self):

    if self.frame_features:

      serialized_examples = tf.placeholder(tf.string, shape=(None,))

      fn = lambda x: self.build_prediction_graph(x)
      video_id_output, top_indices_output, top_predictions_output = (
          tf.map_fn(fn, serialized_examples, 
                    dtype=(tf.string, tf.int32, tf.float32)))

    else:

      serialized_examples = tf.placeholder(tf.string, shape=(None,))

      video_id_output, top_indices_output, top_predictions_output = (
          self.build_prediction_graph(serialized_examples))

    inputs = {"example_bytes": 
              saved_model_utils.build_tensor_info(serialized_examples)}

    outputs = {
        "video_id": saved_model_utils.build_tensor_info(video_id_output),
        "class_indexes": saved_model_utils.build_tensor_info(top_indices_output),
        "predictions": saved_model_utils.build_tensor_info(top_predictions_output)}

    return inputs, outputs
开发者ID:lvaleriu,项目名称:Youtube-8M-WILLOW,代码行数:27,代码来源:export_model.py


示例10: _decode_png_instance_masks

  def _decode_png_instance_masks(self, keys_to_tensors):
    """Decode PNG instance segmentation masks and stack into dense tensor.

    The instance segmentation masks are reshaped to [num_instances, height,
    width].

    Args:
      keys_to_tensors: a dictionary from keys to tensors.

    Returns:
      A 3-D float tensor of shape [num_instances, height, width] with values
        in {0, 1}.
    """

    def decode_png_mask(image_buffer):
      image = tf.squeeze(
          tf.image.decode_image(image_buffer, channels=1), axis=2)
      image.set_shape([None, None])
      image = tf.to_float(tf.greater(image, 0))
      return image

    png_masks = keys_to_tensors['image/object/mask']
    height = keys_to_tensors['image/height']
    width = keys_to_tensors['image/width']
    if isinstance(png_masks, tf.SparseTensor):
      png_masks = tf.sparse_tensor_to_dense(png_masks, default_value='')
    return tf.cond(
        tf.greater(tf.size(png_masks), 0),
        lambda: tf.map_fn(decode_png_mask, png_masks, dtype=tf.float32),
        lambda: tf.zeros(tf.to_int32(tf.stack([0, height, width]))))
开发者ID:forging2012,项目名称:models,代码行数:30,代码来源:tf_example_decoder.py


示例11: normalized_to_image_coordinates

def normalized_to_image_coordinates(normalized_boxes, image_shape,
                                    parallel_iterations=32):
    """Converts a batch of boxes from normal to image coordinates.

    Args:
      normalized_boxes: a float32 tensor of shape [None, num_boxes, 4] in
        normalized coordinates.
      image_shape: a float32 tensor of shape [4] containing the image shape.
      parallel_iterations: parallelism for the map_fn op.

    Returns:
      absolute_boxes: a float32 tensor of shape [None, num_boxes, 4] containg the
        boxes in image coordinates.
    """

    def _to_absolute_coordinates(normalized_boxes):
        return box_list_ops.to_absolute_coordinates(
            box_list.BoxList(normalized_boxes),
            image_shape[1], image_shape[2], check_range=False).get()

    absolute_boxes = tf.map_fn(
        _to_absolute_coordinates,
        elems=(normalized_boxes),
        dtype=tf.float32,
        parallel_iterations=parallel_iterations,
        back_prop=True)
    return absolute_boxes
开发者ID:Zumbalamambo,项目名称:deepcv,代码行数:27,代码来源:ops.py


示例12: create_tensor

  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    inputs = self._get_input_tensors(in_layers)
    parent_tensor = inputs[0]
    training = kwargs['training'] if 'training' in kwargs else 1.0

    parent_tensor = parent_tensor / 255.0
    if not self.augment:
      out_tensor = parent_tensor
    else:

      def preprocess(img):
        img = tf.image.random_flip_left_right(img)
        img = tf.image.random_flip_up_down(img)
        img = tf.image.rot90(img, k=np.random.randint(0, 4))
        if self.distort_color:
          img = tf.image.random_brightness(img, max_delta=32. / 255.)
          img = tf.image.random_saturation(img, lower=0.5, upper=1.5)
          img = tf.clip_by_value(img, 0.0, 1.0)
        if self.central_crop:
          # sample cut ratio from a clipped gaussian
          img = tf.image.central_crop(img,
                                      np.clip(
                                          np.random.normal(1., 0.06), 0.8, 1.))
          img = tf.image.resize_bilinear(
              tf.expand_dims(img, 0), tf.convert_to_tensor(self.size))[0]
        return img

      outs = tf.map_fn(preprocess, parent_tensor)
      # train/valid differences
      out_tensor = training * outs + (1 - training) * parent_tensor
    if set_tensors:
      self.out_tensor = out_tensor
    return out_tensor
开发者ID:ktaneishi,项目名称:deepchem,代码行数:33,代码来源:model.py


示例13: get_labels_from_annotation_batch

def get_labels_from_annotation_batch(annotation_batch_tensor, class_labels):

    batch_labels = tf.map_fn(fn=lambda x: get_labels_from_annotation(annotation_tensor=x, class_labels=class_labels),
                             elems=annotation_batch_tensor,
                             dtype=tf.float32)

    return batch_labels
开发者ID:ruyi345,项目名称:Fully-convolutional-networks-TF,代码行数:7,代码来源:utils.py


示例14: _sample_n

  def _sample_n(self, n, seed=None):
    n_draws = tf.cast(self.total_count, dtype=tf.int32)
    k = self.event_shape_tensor()[0]

    # broadcast the total_count and logits to same shape
    n_draws = tf.ones_like(
        self.logits[..., 0], dtype=n_draws.dtype) * n_draws
    logits = tf.ones_like(
        n_draws[..., tf.newaxis], dtype=self.logits.dtype) * self.logits

    # flatten the total_count and logits
    flat_logits = tf.reshape(logits, [-1, k])  # [B1B2...Bm, k]
    flat_ndraws = n * tf.reshape(n_draws, [-1])  # [B1B2...Bm]

    # computes each total_count and logits situation by map_fn
    def _sample_single(args):
      logits, n_draw = args[0], args[1]  # [K], []
      x = tf.multinomial(logits[tf.newaxis, ...], n_draw,
                         seed)  # [1, n*n_draw]
      x = tf.reshape(x, shape=[n, -1])  # [n, n_draw]
      x = tf.reduce_sum(tf.one_hot(x, depth=k), axis=-2)  # [n, k]
      return x

    x = tf.map_fn(
        _sample_single, [flat_logits, flat_ndraws],
        dtype=self.dtype)  # [B1B2...Bm, n, k]

    # reshape the results to proper shape
    x = tf.transpose(x, perm=[1, 0, 2])
    final_shape = tf.concat([[n], self.batch_shape_tensor(), [k]], 0)
    x = tf.reshape(x, final_shape)  # [n, B1, B2,..., Bm, k]
    return x
开发者ID:asudomoeva,项目名称:probability,代码行数:32,代码来源:multinomial.py


示例15: alignment_summary

def alignment_summary(features, labels, predictions, mode, params):
    image = tf.cast(features, tf.uint8)
    prediction = predictions['prediction']
    label_points = tf.stack([labels[:, ::2], labels[:, 1::2]], axis=2)
    predict_points = tf.stack(
        [prediction[:, ::2], prediction[:, 1::2]], axis=2)
    def draw_points(args):
        image, label_points, predict_points = args
        def draw_points_pyfn(image, points, color, radius=1):
            if image.shape[-1] == 1:
                image = np.squeeze(image, axis=-1)
            image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
            draw = ImageDraw.Draw(image_pil)
            im_width, im_height = image_pil.size
            for point in points:
                x = point[0] * im_width
                y = point[1] * im_height
                draw.ellipse([(x - radius, y - radius),
                              (x + radius, y + radius)],
                             outline=color, fill=color)
            image = np.array(image_pil)
            return image
        image = tf.py_func(
            functools.partial(draw_points_pyfn, color=(0, 255, 0)),
            (image, label_points), tf.uint8)
        image = tf.py_func(
            functools.partial(draw_points_pyfn, color=(255, 0, 0)),
            (image, predict_points), tf.uint8)
        return image
    image = tf.map_fn(
        draw_points, (image, label_points, predict_points),
        dtype=tf.uint8, back_prop=False)
    tf.summary.image('image', image, 10)
    add_trainable_variables_histogram()
开发者ID:kimmyzhang,项目名称:tensorcv,代码行数:34,代码来源:summary.py


示例16: batch_logits

def batch_logits(indices, acts):
  init_outs = tf.zeros([1, FLAGS.wvs, 1])

  def logits_continue(*parms):
    cur, idxs, _, _, _ = parms
    return tf.less(cur, tf.size(idxs), name='batch_done')

  def logits_batch_body(*parms):
    i, idxs, ptr, css, act = parms
    i_s = tf.reshape(tf.slice(idxs, tf.pack([i]), [1]), [])
    start, size = get_bounds(i_s)
    outs = forward_prop_nodes(start, size, acts, ptr)
    new_css = tf.cond(tf.equal(i, iZERO),
                      lambda: outs,
                      lambda: tf.concat(0, [css, outs]))
    return i + iONE, indices, ptr + size, new_css, acts
  with tf.device('/cpu:0'):
    iZ =  tf.convert_to_tensor(0, dtype=tf.int32)
  zero_activations(acts)
  while_parms = [iZ, indices, iZ, init_outs, acts]
  _, _, _, outs, _ = tf.while_loop(logits_continue, logits_batch_body, while_parms,
                                   parallel_iterations=1, name='batch_logits')
  lumpy_logits = tf.map_fn(activation_to_logits, outs, name='raw_logits')
  logits = tf.squeeze(lumpy_logits, [2], name='logits')
  return logits
开发者ID:rgobbel,项目名称:rntn,代码行数:25,代码来源:tf_rntn.py


示例17: preprocess_for_inception

def preprocess_for_inception(images):
  """Preprocess images for inception.

  Args:
    images: images minibatch. Shape [batch size, width, height,
      channels]. Values are in [0..255].

  Returns:
    preprocessed_images
  """

  # Images should have 3 channels.
  assert images.shape[3].value == 3

  # tfgan_eval.preprocess_image function takes values in [0, 1], so rescale.
  with tf.control_dependencies([tf.assert_greater_equal(images, 0.0),
                                tf.assert_less_equal(images, 255.0)]):
    images = tf.identity(images)

  preprocessed_images = tf.map_fn(
      fn=tfgan_eval.preprocess_image,
      elems=images,
      back_prop=False
  )

  return preprocessed_images
开发者ID:changchunli,项目名称:compare_gan,代码行数:26,代码来源:fid_score.py


示例18: testMap_MultiInputSingleOutput

 def testMap_MultiInputSingleOutput(self):
     with self.test_session():
         nums = np.array([1, 2, 3, 4, 5, 6])
         r = tf.map_fn(lambda x: x[0] * x[1][0] + x[1][1], (nums, (nums, -nums)), dtype=tf.int64)
         self.assertEqual((6,), r.get_shape())
         received = r.eval()
         self.assertAllEqual(nums * nums + (-nums), received)
开发者ID:botonchou,项目名称:tensorflow,代码行数:7,代码来源:functional_ops_test.py


示例19: rotate90

def rotate90(images, k=1, is_random=False, seed=None, name=None):
    """Rotate (randomly) images counter-clockwise by 90 degrees.
    (A mirror to tf.image rot90)

    Args:
        images: 4-D Tensor of shape `[batch, height, width, channels]` or
            3-D Tensor of shape `[height, width, channels]`.
        k: A scalar integer. The number of times the image is rotated by 90 degrees.
        is_random: `bool`, If True, adjust randomly.
        seed: A Python integer. Used to create a random seed. See @{tf.set_random_seed}.
        name: A name for this operation (optional).

    Returns:
        If `image` was 4-D, a 4-D float Tensor of shape
        `[batch, target_height, target_width, channels]`
        If `image` was 3-D, a 3-D float Tensor of shape
        `[target_height, target_width, channels]

    Raises:
        ValueError: if the shape of `image` not supported.
    """
    if is_random:
        k = random_ops.random_shuffle([0, 1, 2, 3], seed=seed)[0]

    images_shape = get_shape(images)
    if len(images_shape) > 4:
        ValueError("'image' must have either 3 or 4 dimensions, "
                   "received `{}`.".format(images_shape))

    if len(images_shape) == 4:
        return tf.map_fn(lambda img: tf.image.rot90(img, k, name), images)

    return tf.image.rot90(images, k, name)
开发者ID:AlexMikhalev,项目名称:polyaxon,代码行数:33,代码来源:image.py


示例20: standardize

def standardize(images):
    """Linearly scales `image` to have zero mean and unit norm.
    (A mirror to tf.image per_image_standardization)

    This op computes `(x - mean) / adjusted_stddev`, where `mean` is the average
    of all values in image, and
    `adjusted_stddev = max(stddev, 1.0/sqrt(image.NumElements()))`.

    `stddev` is the standard deviation of all values in `image`. It is capped
    away from zero to protect against division by 0 when handling uniform images.

    Args:
        images: 4-D Tensor of shape `[batch, height, width, channels]` or
                3-D Tensor of shape `[height, width, channels]`.

    Returns:
        The standardized image with same shape as `image`.

    Raises:
        ValueError: if the shape of 'image' is incompatible with this function.

    """
    images_shape = get_shape(images)
    if len(images_shape) > 4:
        ValueError("'image' must have either 3 or 4 dimensions, "
                   "received `{}`.".format(images_shape))

    if len(images_shape) == 4:
        return tf.map_fn(lambda img: tf.image.per_image_standardization(img), images)
    return tf.image.per_image_standardization(images)
开发者ID:AlexMikhalev,项目名称:polyaxon,代码行数:30,代码来源:image.py



注:本文中的tensorflow.map_fn函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.matmul函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.make_template函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap