本文整理汇总了Python中tensorflow.random_crop函数的典型用法代码示例。如果您正苦于以下问题:Python random_crop函数的具体用法?Python random_crop怎么用?Python random_crop使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了random_crop函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: image_augmentation
def image_augmentation(self, train_data, test_data):
train_data = tf.map_fn(lambda img: tf.image.flip_left_right(img), train_data)
train_data = tf.map_fn(lambda img: tf.image.random_brightness(img,max_delta=63), train_data)
train_data = tf.map_fn(lambda img: tf.image.random_contrast(img, lower=0.2, upper=1.8),train_data)
if self.params['use_grayscale']:
train_data = tf.map_fn(lambda img: tf.image.rgb_to_grayscale(img), train_data)
if self.params['use_gradient_images']:
train_data = self.apply_sobel(train_data)
# self.input_real = tf.map_fn(lambda img: tf.image.per_image_standardization(img), self.input_real)
train_data = tf.map_fn(lambda img: tf.image.per_image_standardization(img),train_data)
test_data = tf.map_fn(lambda img: tf.image.per_image_standardization(img),test_data)
test_data = test_data
if self.params['use_grayscale']:
test_data = tf.map_fn(lambda img: tf.image.rgb_to_grayscale(img), test_data)
if self.params['use_gradient_images']:
test_data = self.apply_sobel(test_data)
# self.input_test = tf.map_fn(lambda img: tf.image.per_image_standardization(img), self.input_test)
train_data = tf.map_fn(lambda img: tf.image.resize_image_with_crop_or_pad(img,30,30),train_data)
train_data = tf.map_fn(lambda img: tf.image.resize_image_with_crop_or_pad(img,42,42),train_data)
if self.params['use_grayscale']:
train_data = tf.map_fn(lambda img: tf.random_crop(img,[32,32,1]),train_data)
else:
train_data = tf.map_fn(lambda img: tf.random_crop(img,[32,32,3]),train_data)
return train_data, test_data
开发者ID:alenaliu,项目名称:noise-as-targets-tensorflow,代码行数:29,代码来源:model.py
示例2: inputs
def inputs(tf_dir, is_train, batch_size, num_epochs=None):
image, caption_tids, cocoid = records(tf_dir, num_epochs)
reshaped_image = tf.image.resize_images(image, IM_S, IM_S)
if is_train:
distorted_image = tf.random_crop(reshaped_image, [CNN_S, CNN_S, 3])
distorted_image = tf.image.random_brightness(distorted_image, max_delta=32./255.)
distorted_image = tf.image.random_contrast(distorted_image, lower=0.2, upper=1.8)
distorted_image = tf.clip_by_value(distorted_image, 0.0, 1.0)
else:
distorted_image = tf.image.resize_image_with_crop_or_pad(reshaped_image, CNN_S, CNN_S)
image = distorted_image
# [0,1) --> [-1,1)
image = tf.sub(image, 0.5)
image = tf.mul(image, 2.0)
num_preprocess_threads = 4
min_queue_examples = 20
outputs = [image, caption_tids, cocoid]
return tf.train.shuffle_batch(
outputs,
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples)
开发者ID:lulupango,项目名称:image-caption-baseline,代码行数:30,代码来源:coco_inputs.py
示例3: read_and_preprocess
def read_and_preprocess(example_data):
parsed = tf.parse_single_example(example_data, {
'image/encoded': tf.FixedLenFeature((), tf.string, ''),
'image/class/label': tf.FixedLenFeature([], tf.int64, 1),
})
image_bytes = tf.reshape(parsed['image/encoded'], shape=[])
label = tf.cast(
tf.reshape(parsed['image/class/label'], shape=[]), dtype=tf.int32) - 1
# end up with pixel values that are in the -1, 1 range
image = tf.image.decode_jpeg(image_bytes, channels=NUM_CHANNELS)
image = tf.image.convert_image_dtype(image, dtype=tf.float32) # 0-1
image = tf.expand_dims(image, 0) # resize_bilinear needs batches
image = tf.image.resize_bilinear(
image, [HEIGHT + 10, WIDTH + 10], align_corners=False)
image = tf.squeeze(image) # remove batch dimension
image = tf.random_crop(image, [HEIGHT, WIDTH, NUM_CHANNELS])
image = tf.image.random_flip_left_right(image)
image = tf.image.random_brightness(image, max_delta=63.0 / 255.0)
image = tf.image.random_contrast(image, lower=0.2, upper=1.8)
#pixel values are in range [0,1], convert to [-1,1]
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
#return {'image':image}, label
return image, label
开发者ID:GoogleCloudPlatform,项目名称:training-data-analyst,代码行数:28,代码来源:model.py
示例4: read_input
def read_input(image_queue):
# Read the images and generate the decode from PNG image
imageReader = tf.WholeFileReader()
image_key, image_value = imageReader.read(image_queue)
image_decode = tf.image.decode_png(image_value, channels=1)
image_decode = tf.cast(image_decode, tf.float32)
# Preprocess data
image_key = rename_image_filename(image_key) # rename image filename
label = search_label(image_key)
# CREATE OBJECT
class Record(object):
pass
record = Record()
# Instantiate object
record.key = image_key
record.label = tf.cast(label, tf.int32)
record.image = image_decode
# PROCESSING IMAGES
# reshaped_image = tf.cast(record.image, tf.float32)
# height = 245
# width = 320
height = 96
width = 96
# Image processing for training the network. Note the many random distortions applied to the image.
# Randomly crop a [height, width] section of the image.
distorted_image = tf.random_crop(record.image, [height, width, 1])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Because these operations are not commutative, consider randomizing randomize the order their operation.
distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image, lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_whitening(distorted_image)
return generate_train_batch(record.label, float_image)
开发者ID:dllatas,项目名称:deepLearning,代码行数:34,代码来源:input.py
示例5: pre_process_img
def pre_process_img(image):
image = tf.image.random_flip_left_right(image)
image = tf.image.random_brightness(image, max_delta=32./255)
image = tf.image.random_contrast(image, lower=0.8, upper=1.2)
image = tf.random_crop(image, [default_height-np.random.randint(0, 4), default_width-np.random.randint(0, 4), 1])
image = tf.image.resize_images(image, [default_height, default_width])
return image
开发者ID:UGuess,项目名称:emotion_classifier,代码行数:7,代码来源:cnn_cv_tfrecord.py
示例6: preprocess_example
def preprocess_example(self, example, mode, hparams):
# Crop to target shape instead of down-sampling target, leaving target
# of maximum available resolution.
target_shape = (self.output_dim, self.output_dim, self.num_channels)
example["targets"] = tf.random_crop(example["targets"], target_shape)
example["inputs"] = image_utils.resize_by_area(example["targets"],
self.input_dim)
if self.inpaint_fraction is not None and self.inpaint_fraction > 0:
mask = random_square_mask((self.input_dim,
self.input_dim,
self.num_channels),
self.inpaint_fraction)
example["inputs"] = tf.multiply(
tf.convert_to_tensor(mask, dtype=tf.int64),
example["inputs"])
if self.input_dim is None:
raise ValueError("Cannot train in-painting for examples with "
"only targets (i.e. input_dim is None, "
"implying there are only targets to be "
"generated).")
return example
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:28,代码来源:allen_brain.py
示例7: preprocess_for_train
def preprocess_for_train(image,
output_height,
output_width,
padding=_PADDING):
"""Preprocesses the given image for training.
Note that the actual resizing scale is sampled from
[`resize_size_min`, `resize_size_max`].
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
padding: The amound of padding before and after each dimension of the image.
Returns:
A preprocessed image.
"""
padded_image = tf.pad(image, [[padding, padding], [padding, padding], [0, 0]])
# Randomly crop a [height, width] section of the image.
distorted_image = tf.random_crop(padded_image,
[output_height, output_width, 3])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Because these operations are not commutative, consider randomizing
# the order their operation.
distorted_image = tf.image.random_brightness(distorted_image,
max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image,
lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
return tf.image.per_image_whitening(distorted_image)
开发者ID:alexalemi,项目名称:models,代码行数:35,代码来源:cifar10_preprocessing.py
示例8: add_image_distortion
def add_image_distortion(self):
with tf.variable_scope('distort_image'):
image = tf.image.decode_jpeg(self.jpeg, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
crop_scale = tf.random_uniform([], minval=0.5, maxval=1)
height = tf.cast(INPUT_SIZE[0] / crop_scale, tf.int32)
width = tf.cast(INPUT_SIZE[1] / crop_scale, tf.int32)
image = tf.image.resize_images(image, height, width)
image = tf.random_crop(image, [INPUT_SIZE[0], INPUT_SIZE[1], 3])
image = tf.image.random_flip_left_right(image)
def distort_colors_1():
i = tf.image.random_brightness(image, max_delta=32. / 255.)
i = tf.image.random_saturation(i, lower=0.5, upper=1.5)
i = tf.image.random_hue(i, max_delta=0.2)
i = tf.image.random_contrast(i, lower=0.5, upper=1.5)
return i
def distort_colors_2():
i = tf.image.random_brightness(image, max_delta=32. / 255.)
i = tf.image.random_contrast(i, lower=0.5, upper=1.5)
i = tf.image.random_saturation(i, lower=0.5, upper=1.5)
i = tf.image.random_hue(i, max_delta=0.2)
return i
image = tf.cond(tf.equal(0, tf.random_uniform(shape=[], maxval=2, dtype=tf.int32)),
distort_colors_1, distort_colors_2)
image = tf.sub(image, 0.5)
image = tf.mul(image, 2.0)
self.distorted_image = image
开发者ID:thran,项目名称:neuron_nets,代码行数:32,代码来源:inception_model.py
示例9: random_distort_image
def random_distort_image(image):
distorted_image = image
distorted_image = tf.image.pad_to_bounding_box(
image, 4, 4, 40, 40) # pad 4 pixels to each side
distorted_image = tf.random_crop(distorted_image, [32, 32, 3])
distorted_image = tf.image.random_flip_left_right(distorted_image)
return distorted_image
开发者ID:bgshih,项目名称:tf_resnet_cifar,代码行数:7,代码来源:model_resnet.py
示例10: distort_inputs
def distort_inputs(reshaped_image):
distorted_image = tf.random_crop(reshaped_image, imshape)
distorted_image = tf.image.random_flip_left_right(distorted_image)
distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image, lower=0.2, upper=1.8)
float_image = tf.image.per_image_whitening(distorted_image)
return float_image
开发者ID:tcoatale,项目名称:cnn_framework,代码行数:7,代码来源:cifar10.py
示例11: random_shift
def random_shift(v):
if random_shift_y:
v = tf.concat([v[-random_shift_y:], v, v[:random_shift_y]], 0)
if random_shift_x:
v = tf.concat([v[:, -random_shift_x:], v, v[:, :random_shift_x]],
1)
return tf.random_crop(v, [resize[0], resize[1], size[2]])
开发者ID:shikharbahl,项目名称:acai,代码行数:7,代码来源:data.py
示例12: testNoOp
def testNoOp(self):
# No random cropping is performed since the size is value.shape.
for shape in (2, 1, 1), (2, 1, 3), (4, 5, 3):
value = np.arange(0, np.prod(shape), dtype=np.int32).reshape(shape)
with self.test_session():
crop = tf.random_crop(value, shape).eval()
self.assertAllEqual(crop, value)
开发者ID:0ruben,项目名称:tensorflow,代码行数:7,代码来源:random_crop_test.py
示例13: distorted_inputs
def distorted_inputs(data_dir, batch_size):
filenames = [os.path.join(data_dir, "data_batch_%d.bin" % i) for i in xrange(1, 6)]
print(filenames)
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError("Failed to find file: " + f)
filename_queue = tf.train.string_input_producer(filenames)
read_input = read_cifar10(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
distorted_image = tf.random_crop(reshaped_image, [height, width, 3])
distorted_image = tf.image.random_flip_left_right(distorted_image)
distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image, lower=0.2, upper=1.8)
float_image = tf.image.per_image_whitening(distorted_image)
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN * min_fraction_of_examples_in_queue)
print(
"Filling queue with %d CIFAR images before starting to train. "
"This will take a few minutes." % min_queue_examples
)
return _generate_image_and_label_batch(float_image, read_input.label, min_queue_examples, batch_size)
开发者ID:izeye,项目名称:samples-tensorflow,代码行数:32,代码来源:cifar10_input.py
示例14: image_batch
def image_batch(image_paths, batch_size, load_size=286, crop_size=256, channels=3, shuffle=True,
num_threads=4, min_after_dequeue=100, allow_smaller_final_batch=False):
""" for jpg and png files """
# queue and reader
img_queue = tf.train.string_input_producer(image_paths, shuffle=shuffle)
reader = tf.WholeFileReader()
# preprocessing
_, img = reader.read(img_queue)
img = tf.image.decode_image(img, channels=3)
'''
tf.image.random_flip_left_right should be used before tf.image.resize_images,
because tf.image.decode_image reutrns a tensor without shape which makes
tf.image.resize_images collapse. Maybe it's a bug!
'''
img = tf.image.random_flip_left_right(img)
img = tf.image.resize_images(img, [load_size, load_size])
img = tf.random_crop(img, [crop_size, crop_size, channels])
img = tf.cast(img, tf.float32) / 127.5 - 1
# batch
if shuffle:
capacity = min_after_dequeue + (num_threads + 1) * batch_size
img_batch = tf.train.shuffle_batch([img],
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
allow_smaller_final_batch=allow_smaller_final_batch)
else:
img_batch = tf.train.batch([img],
batch_size=batch_size,
allow_smaller_final_batch=allow_smaller_final_batch)
return img_batch, len(image_paths)
开发者ID:BenJamesbabala,项目名称:CycleGAN-Tensorflow-Simple,代码行数:34,代码来源:data.py
示例15: _parser
def _parser(serialized_example):
"""Parses a single tf.Example into image and label tensors."""
features = tf.parse_single_example(
serialized_example,
features={
"image": tf.FixedLenFeature([], tf.string),
"label": tf.FixedLenFeature([], tf.int64),
})
image = tf.decode_raw(features["image"], tf.uint8)
# Initially reshaping to [H, W, C] does not work
image = tf.reshape(image, [NUM_CHANNEL, IMAGE_HEIGHT, IMAGE_WIDTH])
# This is needed for `tf.image.resize_image_with_crop_or_pad`
image = tf.transpose(image, [1, 2, 0])
image = tf.cast(image, dtype)
label = tf.cast(features["label"], tf.int32)
if data_aug:
image = tf.image.resize_image_with_crop_or_pad(image, IMAGE_HEIGHT + 4,
IMAGE_WIDTH + 4)
image = tf.random_crop(image, [IMAGE_HEIGHT, IMAGE_WIDTH, NUM_CHANNEL])
image = tf.image.random_flip_left_right(image)
if data_format == "channels_first":
image = tf.transpose(image, [2, 0, 1])
if div255:
image /= 255.
return image, label
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:30,代码来源:cifar_input.py
示例16: read_and_augment_data
def read_and_augment_data(image_list, label_list, image_size, batch_size, max_nrof_epochs,
random_crop, random_flip, random_rotate, nrof_preprocess_threads, shuffle=True):
images = ops.convert_to_tensor(image_list, dtype=tf.string)
labels = ops.convert_to_tensor(label_list, dtype=tf.int32)
# Makes an input queue
input_queue = tf.train.slice_input_producer([images, labels],
num_epochs=max_nrof_epochs, shuffle=shuffle)
images_and_labels = []
for _ in range(nrof_preprocess_threads):
image, label = read_images_from_disk(input_queue)
if random_rotate:
image = tf.py_func(random_rotate_image, [image], tf.uint8)
if random_crop:
image = tf.random_crop(image, [image_size, image_size, 3])
else:
image = tf.image.resize_image_with_crop_or_pad(image, image_size, image_size)
if random_flip:
image = tf.image.random_flip_left_right(image)
#pylint: disable=no-member
image.set_shape((image_size, image_size, 3))
image = tf.image.per_image_standardization(image)
images_and_labels.append([image, label])
image_batch, label_batch = tf.train.batch_join(
images_and_labels, batch_size=batch_size,
capacity=4 * nrof_preprocess_threads * batch_size,
allow_smaller_final_batch=True)
return image_batch, label_batch
开发者ID:kissthink,项目名称:facenet_regonistant,代码行数:32,代码来源:facenet.py
示例17: preprocess_for_train
def preprocess_for_train(image,
output_height,
output_width,
padding):
"""Preprocesses the given image for training.
Note that the actual resizing scale is sampled from
[`resize_size_min`, `resize_size_max`].
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
padding: The amound of padding before and after each dimension of the image.
Returns:
A preprocessed image.
"""
# Transform the image to floats.
image = tf.to_float(image)
if padding > 0:
image = tf.pad(image, [[padding, padding], [padding, padding], [0, 0]])
angles = 0.1 * np.pi * np.random.randint(8,size=1) - 0.4 * np.pi
image = tf.contrib.image.rotate(image, angles)
# Randomly crop a [height, width] section of the image.
distorted_image = tf.random_crop(image,
[output_height, output_width, 3])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)
#distorted_image = tf.image.random_contrast(distorted_image,lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
return tf.image.per_image_standardization(distorted_image)
开发者ID:ZhenqiWangC,项目名称:models,代码行数:33,代码来源:deeplearning_cifar.py
示例18: distorted_inputs
def distorted_inputs(data_dir, batch_size):
"""Construct distorted input for CIFAR training using the Reader ops.
Args:
data_dir: file name list.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 1] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
filenames = get_train_filenames(data_dir)
print(filenames)
for f in filenames:
if not gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = read_aurora(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# angle = int(random.random()*360)
# M = cv2.getRotationMatrix2D((IMAGE_SIZE/2, IMAGE_SIZE/2), angle, 1)
# dst = cv2.warpAffine(reshaped_image, M, (IMAGE_SIZE, IMAGE_SIZE))
# # Convert rotated image back to tensor
# rotated_tensor = tf.convert_to_tensor(np.array(dst))
# Image processing for training the network. Note the many random
# distortions applied to the image.
# Randomly crop a [height, width] section of the image.
distorted_image = tf.random_crop(reshaped_image, [height, width, 1])
# distorted_image = tf.image.resize_area()
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Because these operations are not commutative, consider randomizing
# randomize the order their operation.
# distorted_image = tf.image.random_brightness(distorted_image,
# max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image, lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_whitening(distorted_image)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN * min_fraction_of_examples_in_queue)
print ('Filling queue with %d aurora images before starting to train. '
'This will take a few minutes.' % min_queue_examples)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples, batch_size)
开发者ID:xuptlib,项目名称:annet,代码行数:60,代码来源:input_data.py
示例19: random_crop_and_pad_image_and_labels
def random_crop_and_pad_image_and_labels(image, label, crop_h, crop_w, ignore_label=255):
"""
Randomly crop and pads the input images.
Args:
image: Training image to crop/ pad.
label: Segmentation mask to crop/ pad.
crop_h: Height of cropped segment.
crop_w: Width of cropped segment.
ignore_label: Label to ignore during the training.
"""
label = tf.cast(label, dtype=tf.float32)
label = label - ignore_label # Needs to be subtracted and later added due to 0 padding.
combined = tf.concat(axis=2, values=[image, label])
image_shape = tf.shape(image)
combined_pad = tf.image.pad_to_bounding_box(combined, 0, 0, tf.maximum(crop_h, image_shape[0]), tf.maximum(crop_w, image_shape[1]))
last_image_dim = tf.shape(image)[-1]
# last_label_dim = tf.shape(label)[-1]
combined_crop = tf.random_crop(combined_pad, [crop_h, crop_w, 4])
img_crop = combined_crop[:, :, :last_image_dim]
label_crop = combined_crop[:, :, last_image_dim:]
label_crop = label_crop + ignore_label
label_crop = tf.cast(label_crop, dtype=tf.uint8)
# Set static shape so that tensorflow knows shape at compile time.
img_crop.set_shape((crop_h, crop_w, 3))
label_crop.set_shape((crop_h,crop_w, 1))
return img_crop, label_crop
开发者ID:YCYchunyan,项目名称:Deeplab-v2--ResNet-101--Tensorflow,代码行数:30,代码来源:image_reader.py
示例20: distorted_inputs
def distorted_inputs(batch_size):
path = "train"
read_input = read_cifar10(path)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE_Y
width = IMAGE_SIZE_X
distorted_image = tf.random_crop(reshaped_image, [height, width, 3])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Because these operations are not commutative, consider randomizing
# the order their operation.
distorted_image = tf.image.random_brightness(distorted_image,
max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image,
lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_whitening(distorted_image)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
min_fraction_of_examples_in_queue)
print ('Filling queue with %d CIFAR images before starting to train. '
'This will take a few minutes.' % min_queue_examples)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples, batch_size,
shuffle=True)
开发者ID:isunglee,项目名称:project_iii,代码行数:34,代码来源:cifar10_input.py
注:本文中的tensorflow.random_crop函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论