本文整理汇总了Python中menpo.visualize.progress_bar_str函数的典型用法代码示例。如果您正苦于以下问题:Python progress_bar_str函数的具体用法?Python progress_bar_str怎么用?Python progress_bar_str使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了progress_bar_str函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _import_glob_generator
def _import_glob_generator(
pattern,
extension_map,
max_assets=None,
has_landmarks=False,
landmark_resolver=None,
importer_kwargs=None,
verbose=False,
):
filepaths = list(glob_with_suffix(pattern, extension_map))
if max_assets:
filepaths = filepaths[:max_assets]
n_files = len(filepaths)
if n_files == 0:
raise ValueError("The glob {} yields no assets".format(pattern))
for i, asset in enumerate(
_multi_import_generator(
filepaths,
extension_map,
has_landmarks=has_landmarks,
landmark_resolver=landmark_resolver,
importer_kwargs=importer_kwargs,
)
):
if verbose:
print_dynamic(
"- Loading {} assets: {}".format(n_files, progress_bar_str(float(i + 1) / n_files, show_bar=True))
)
yield asset
开发者ID:kod3r,项目名称:menpo,代码行数:29,代码来源:base.py
示例2: _get_relative_locations
def _get_relative_locations(shapes, graph, level_str, verbose):
r"""
returns numpy.array of size 2 x n_images x n_edges
"""
# convert given shapes to point graphs
if isinstance(graph, Tree):
point_graphs = [PointTree(shape.points, graph.adjacency_array,
graph.root_vertex) for shape in shapes]
else:
point_graphs = [PointDirectedGraph(shape.points, graph.adjacency_array)
for shape in shapes]
# initialize an output numpy array
rel_loc_array = np.empty((2, graph.n_edges, len(point_graphs)))
# get relative locations
for c, pt in enumerate(point_graphs):
# print progress
if verbose:
print_dynamic('{}Computing relative locations from '
'shapes - {}'.format(
level_str,
progress_bar_str(float(c + 1) / len(point_graphs),
show_bar=False)))
# get relative locations from this shape
rl = pt.relative_locations()
# store
rel_loc_array[..., c] = rl.T
# rollaxis and return
return np.rollaxis(rel_loc_array, 2, 1)
开发者ID:VLAM3D,项目名称:antonakoscvpr2015,代码行数:33,代码来源:builder.py
示例3: _regression_data
def _regression_data(self, images, gt_shapes, perturbed_shapes,
verbose=False):
r"""
Method that generates the regression data : features and delta_ps.
Parameters
----------
images : list of :map:`MaskedImage`
The set of landmarked images.
gt_shapes : :map:`PointCloud` list
List of the ground truth shapes that correspond to the images.
perturbed_shapes : :map:`PointCloud` list
List of the perturbed shapes in order to regress.
verbose : `boolean`, optional
If ``True``, the progress is printed.
"""
if verbose:
print_dynamic('- Generating regression data')
n_images = len(images)
features = []
delta_ps = []
for j, (i, s, p_shape) in enumerate(zip(images, gt_shapes,
perturbed_shapes)):
if verbose:
print_dynamic('- Generating regression data - {}'.format(
progress_bar_str((j + 1.) / n_images, show_bar=False)))
for ps in p_shape:
features.append(self.features(i, ps))
delta_ps.append(self.delta_ps(s, ps))
return np.asarray(features), np.asarray(delta_ps)
开发者ID:OlivierML,项目名称:menpofit,代码行数:34,代码来源:trainer.py
示例4: apply_pyramid_on_images
def apply_pyramid_on_images(generators, n_levels, verbose=False):
r"""
Exhausts the pyramid generators verbosely
"""
all_images = []
for j in range(n_levels):
if verbose:
level_str = '- Apply pyramid: '
if n_levels > 1:
level_str = '- Apply pyramid: [Level {} - '.format(j + 1)
level_images = []
for c, g in enumerate(generators):
if verbose:
print_dynamic(
'{}Computing feature space/rescaling - {}'.format(
level_str,
progress_bar_str((c + 1.) / len(generators),
show_bar=False)))
level_images.append(next(g))
all_images.append(level_images)
if verbose:
print_dynamic('- Apply pyramid: Done\n')
return all_images
开发者ID:OlivierML,项目名称:menpofit,代码行数:25,代码来源:trainer.py
示例5: _build_appearance_model_sparse
def _build_appearance_model_sparse(all_patches_array, graph, patch_shape,
n_channels, n_appearance_parameters,
level_str, verbose):
# build appearance model
if verbose:
print_dynamic('{}Training appearance distribution per '
'edge'.format(level_str))
# compute mean appearance vector
app_mean = np.mean(all_patches_array, axis=1)
# appearance vector and patch vector lengths
patch_len = np.prod(patch_shape) * n_channels
# initialize block sparse covariance matrix
all_cov = lil_matrix((graph.n_vertices * patch_len,
graph.n_vertices * patch_len))
# compute covariance matrix for each edge
for e in range(graph.n_edges):
# print progress
if verbose:
print_dynamic('{}Training appearance distribution '
'per edge - {}'.format(
level_str,
progress_bar_str(float(e + 1) / graph.n_edges,
show_bar=False)))
# edge vertices
v1 = np.min(graph.adjacency_array[e, :])
v2 = np.max(graph.adjacency_array[e, :])
# find indices in target covariance matrix
v1_from = v1 * patch_len
v1_to = (v1 + 1) * patch_len
v2_from = v2 * patch_len
v2_to = (v2 + 1) * patch_len
# extract data
edge_data = np.concatenate((all_patches_array[v1_from:v1_to, :],
all_patches_array[v2_from:v2_to, :]))
# compute covariance inverse
icov = _covariance_matrix_inverse(np.cov(edge_data),
n_appearance_parameters)
# v1, v2
all_cov[v1_from:v1_to, v2_from:v2_to] += icov[:patch_len, patch_len::]
# v2, v1
all_cov[v2_from:v2_to, v1_from:v1_to] += icov[patch_len::, :patch_len]
# v1, v1
all_cov[v1_from:v1_to, v1_from:v1_to] += icov[:patch_len, :patch_len]
# v2, v2
all_cov[v2_from:v2_to, v2_from:v2_to] += icov[patch_len::, patch_len::]
return app_mean, all_cov.tocsr()
开发者ID:VLAM3D,项目名称:antonakoscvpr2015,代码行数:59,代码来源:builder.py
示例6: _build_deformation_model
def _build_deformation_model(graph, relative_locations, level_str, verbose):
# build deformation model
if verbose:
print_dynamic('{}Training deformation distribution per '
'graph edge'.format(level_str))
def_len = 2 * graph.n_vertices
def_cov = np.zeros((def_len, def_len))
for e in range(graph.n_edges):
# print progress
if verbose:
print_dynamic('{}Training deformation distribution '
'per edge - {}'.format(
level_str,
progress_bar_str(float(e + 1) / graph.n_edges,
show_bar=False)))
# get vertices adjacent to edge
parent = graph.adjacency_array[e, 0]
child = graph.adjacency_array[e, 1]
# compute covariance matrix
edge_cov = np.linalg.inv(np.cov(relative_locations[..., e]))
# store its values
s1 = edge_cov[0, 0]
s2 = edge_cov[1, 1]
s3 = 2 * edge_cov[0, 1]
# Fill the covariance matrix matrix
# get indices
p1 = 2 * parent
p2 = 2 * parent + 1
c1 = 2 * child
c2 = 2 * child + 1
# up-left block
def_cov[p1, p1] += s1
def_cov[p2, p2] += s2
def_cov[p2, p1] += s3
# up-right block
def_cov[p1, c1] = - s1
def_cov[p2, c2] = - s2
def_cov[p1, c2] = - s3 / 2
def_cov[p2, c1] = - s3 / 2
# down-left block
def_cov[c1, p1] = - s1
def_cov[c2, p2] = - s2
def_cov[c1, p2] = - s3 / 2
def_cov[c2, p1] = - s3 / 2
# down-right block
def_cov[c1, c1] += s1
def_cov[c2, c2] += s2
def_cov[c1, c2] += s3
return def_cov
开发者ID:VLAM3D,项目名称:antonakoscvpr2015,代码行数:58,代码来源:builder.py
示例7: _create_pyramid
def _create_pyramid(cls, images, n_levels, downscale, pyramid_on_features,
feature_type, verbose=False):
r"""
Function that creates a generator function for Gaussian pyramid. The
pyramid can be created either on the feature space or the original
(intensities) space.
Parameters
----------
images: list of :class:`menpo.image.Image`
The set of landmarked images from which to build the AAM.
n_levels: int
The number of multi-resolution pyramidal levels to be used.
downscale: float
The downscale factor that will be used to create the different
pyramidal levels.
pyramid_on_features: boolean
If True, the features are extracted at the highest level and the
pyramid is created on the feature images.
If False, the pyramid is created on the original (intensities)
space.
feature_type: list of size 1 with str or function/closure or None
The feature type to be used in case pyramid_on_features is enabled.
verbose: bool, Optional
Flag that controls information and progress printing.
Default: False
Returns
-------
generator: function
The generator function of the Gaussian pyramid.
"""
if pyramid_on_features:
# compute features at highest level
feature_images = []
for c, i in enumerate(images):
if verbose:
print_dynamic('- Computing feature space: {}'.format(
progress_bar_str((c + 1.) / len(images),
show_bar=False)))
feature_images.append(compute_features(i, feature_type[0]))
if verbose:
print_dynamic('- Computing feature space: Done\n')
# create pyramid on feature_images
generator = [i.gaussian_pyramid(n_levels=n_levels,
downscale=downscale)
for i in feature_images]
else:
# create pyramid on intensities images
# features will be computed per level
generator = [i.gaussian_pyramid(n_levels=n_levels,
downscale=downscale)
for i in images]
return generator
开发者ID:yymath,项目名称:menpo,代码行数:56,代码来源:builder.py
示例8: _scale_images
def _scale_images(cls, images, s, level_str, verbose):
scaled_images = []
for c, i in enumerate(images):
if verbose:
print_dynamic(
'{}Scaling features: {}'.format(
level_str, progress_bar_str((c + 1.) / len(images),
show_bar=False)))
scaled_images.append(i.rescale(s))
return scaled_images
开发者ID:VLAM3D,项目名称:alabortcvpr2015,代码行数:10,代码来源:builder.py
示例9: _normalize_images
def _normalize_images(self, images, group, label, ref_shape, verbose):
# normalize the scaling of all images wrt the reference_shape size
norm_images = []
for c, i in enumerate(images):
if verbose:
print_dynamic('- Normalizing images size: {}'.format(
progress_bar_str((c + 1.) / len(images), show_bar=False)))
i = rescale_to_reference_shape(i, ref_shape, group=group,
label=label)
if self.sigma:
i.pixels = fsmooth(i.pixels, self.sigma)
norm_images.append(i)
return norm_images
开发者ID:VLAM3D,项目名称:alabortcvpr2015,代码行数:13,代码来源:builder.py
示例10: _compute_features
def _compute_features(self, images, level_str, verbose):
feature_images = []
for c, i in enumerate(images):
if verbose:
print_dynamic(
'{}Computing feature space: {}'.format(
level_str, progress_bar_str((c + 1.) / len(images),
show_bar=False)))
if self.features:
i = self.features(i)
feature_images.append(i)
return feature_images
开发者ID:VLAM3D,项目名称:alabortcvpr2015,代码行数:13,代码来源:builder.py
示例11: _compute_minimum_spanning_tree
def _compute_minimum_spanning_tree(shapes, root_vertex, level_str, verbose):
# initialize edges and weights matrix
n_vertices = shapes[0].n_points
n_edges = nchoosek(n_vertices, 2)
weights = np.zeros((n_vertices, n_vertices))
edges = np.empty((n_edges, 2), dtype=np.int32)
# fill edges and weights
e = -1
for i in range(n_vertices-1):
for j in range(i+1, n_vertices, 1):
# edge counter
e += 1
# print progress
if verbose:
print_dynamic('{}Computing complete graph`s weights - {}'.format(
level_str,
progress_bar_str(float(e + 1) / n_edges,
show_bar=False)))
# fill in edges
edges[e, 0] = i
edges[e, 1] = j
# create data matrix of edge
diffs_x = [s.points[i, 0] - s.points[j, 0] for s in shapes]
diffs_y = [s.points[i, 1] - s.points[j, 1] for s in shapes]
coords = np.array([diffs_x, diffs_y])
# compute mean
m = np.mean(coords, axis=1)
# compute covariance
c = np.cov(coords)
# get weight
for im in range(len(shapes)):
weights[i, j] += -np.log(multivariate_normal.pdf(coords[:, im],
mean=m, cov=c))
weights[j, i] = weights[i, j]
# create undirected graph
complete_graph = UndirectedGraph(edges)
if verbose:
print_dynamic('{}Minimum spanning graph computed.\n'.format(level_str))
# compute minimum spanning graph
return complete_graph.minimum_spanning_tree(weights, root_vertex)
开发者ID:VLAM3D,项目名称:antonakoscvpr2015,代码行数:50,代码来源:builder.py
示例12: _warp_images
def _warp_images(self, images, shapes, _, level_str, verbose):
# extract parts
parts_images = []
for c, (i, s) in enumerate(zip(images, shapes)):
if verbose:
print_dynamic('{}Warping images - {}'.format(
level_str,
progress_bar_str(float(c + 1) / len(images),
show_bar=False)))
parts_image = build_parts_image(
i, s, self.parts_shape, normalize_parts=self.normalize_parts)
parts_images.append(parts_image)
return parts_images
开发者ID:VLAM3D,项目名称:alabortcvpr2015,代码行数:15,代码来源:builder.py
示例13: compute_sparse_covariance
def compute_sparse_covariance(X, adjacency_array, patch_len, level_str,
verbose):
n_features, n_samples = X.shape
n_edges = adjacency_array.shape[0]
# initialize block sparse covariance matrix
all_cov = np.zeros((n_features, n_features))
# compute covariance matrix for each edge
for e in range(n_edges):
# print progress
if verbose:
print_dynamic('{}Distribution per edge - {}'.format(
level_str,
progress_bar_str(float(e + 1) / n_edges,
show_bar=False)))
# edge vertices
v1 = np.min(adjacency_array[e, :])
v2 = np.max(adjacency_array[e, :])
# find indices in target covariance matrix
v1_from = v1 * patch_len
v1_to = (v1 + 1) * patch_len
v2_from = v2 * patch_len
v2_to = (v2 + 1) * patch_len
# extract data
edge_data = np.concatenate((X[v1_from:v1_to, :], X[v2_from:v2_to, :]))
# compute covariance inverse
icov = np.linalg.inv(np.cov(edge_data))
# v1, v2
all_cov[v1_from:v1_to, v2_from:v2_to] += icov[:patch_len, patch_len::]
# v2, v1
all_cov[v2_from:v2_to, v1_from:v1_to] += icov[patch_len::, :patch_len]
# v1, v1
all_cov[v1_from:v1_to, v1_from:v1_to] += icov[:patch_len, :patch_len]
# v2, v2
all_cov[v2_from:v2_to, v2_from:v2_to] += icov[patch_len::, patch_len::]
return np.linalg.inv(all_cov)
开发者ID:hporange,项目名称:antonakoscvpr2015,代码行数:46,代码来源:sparsepca.py
示例14: _normalization_wrt_reference_shape
def _normalization_wrt_reference_shape(cls, images, group, label,
reference_shape, verbose=False):
r"""
Normalizes the images sizes with respect to the reference
shape (mean shape) scaling. This step is essential before building a
deformable model.
Parameters
----------
images : list of :map:`MaskedImage`
The set of landmarked images from which to build the model.
group : `string`
The key of the landmark set that should be used. If ``None``,
and if there is only one set of landmarks, this set will be used.
label : `string`
The label of the landmark manager that you wish to use. If no
label is passed, the convex hull of all landmarks is used.
reference_shape : :map:`PointCloud`
The reference shape that is used to resize all training images to
a consistent object size.
verbose: bool, optional
Flag that controls information and progress printing.
Returns
-------
normalized_images : :map:`MaskedImage` list
A list with the normalized images.
"""
normalized_images = []
for c, i in enumerate(images):
if verbose:
print_dynamic('- Normalizing images size: {}'.format(
progress_bar_str((c + 1.) / len(images),
show_bar=False)))
normalized_images.append(i.rescale_to_reference_shape(
reference_shape, group=group, label=label))
if verbose:
print_dynamic('- Normalizing images size: Done\n')
return normalized_images
开发者ID:OlivierML,项目名称:menpofit,代码行数:44,代码来源:trainer.py
示例15: __init__
def __init__(self, samples, centre=True, bias=False, verbose=False,
n_samples=None):
# get the first element as the template and use it to configure the
# data matrix
if n_samples is None:
# samples is a list
n_samples = len(samples)
template = samples[0]
samples = samples[1:]
else:
# samples is an iterator
template = next(samples)
n_features = template.n_parameters
template_vector = template.as_vector()
data = np.zeros((n_samples, n_features), dtype=template_vector.dtype)
# now we can fill in the first element from the template
data[0] = template_vector
del template_vector
if verbose:
print('Allocated data matrix {:.2f}'
'GB'.format(data.nbytes / 2 ** 30))
# 1-based as we have the template vector set already
for i, sample in enumerate(samples, 1):
if i >= n_samples:
break
if verbose:
print_dynamic(
'Building data matrix from {} samples - {}'.format(
n_samples,
progress_bar_str(float(i + 1) / n_samples, show_bar=True)))
data[i] = sample.as_vector()
# compute pca
e_vectors, e_values, mean = principal_component_decomposition(
data, whiten=False, centre=centre, bias=bias, inplace=True)
super(PCAModel, self).__init__(e_vectors, mean, template)
self.centred = centre
self.biased = bias
self._eigenvalues = e_values
# start the active components as all the components
self._n_active_components = int(self.n_components)
self._trimmed_eigenvalues = None
开发者ID:jacksoncsy,项目名称:menpo,代码行数:43,代码来源:pca.py
示例16: _build_appearance_model_block_diagonal
def _build_appearance_model_block_diagonal(all_patches_array, n_points,
patch_shape, n_channels,
n_appearance_parameters, level_str,
verbose):
# build appearance model
if verbose:
print_dynamic('{}Training appearance distribution per '
'patch'.format(level_str))
# compute mean appearance vector
app_mean = np.mean(all_patches_array, axis=-1)
# number of images
n_images = all_patches_array.shape[-1]
# appearance vector and patch vector lengths
patch_len = np.prod(patch_shape) * n_channels
# compute covariance matrix for each patch
all_cov = []
for e in range(n_points):
# print progress
if verbose:
print_dynamic('{}Training appearance distribution '
'per patch - {}'.format(
level_str,
progress_bar_str(float(e + 1) / n_points,
show_bar=False)))
# select patches and vectorize
patches_vector = all_patches_array[e, ...].reshape(-1, n_images)
# compute covariance
cov_mat = np.cov(patches_vector)
# compute covariance inverse
inv_cov_mat = _covariance_matrix_inverse(cov_mat,
n_appearance_parameters)
# store covariance
all_cov.append(inv_cov_mat)
# create final sparse covariance matrix
return app_mean, block_diag(all_cov).tocsr()
开发者ID:VLAM3D,项目名称:antonakoscvpr2015,代码行数:43,代码来源:builder.py
示例17: create_pyramid
def create_pyramid(images, n_levels, downscale, features, verbose=False):
r"""
Function that creates a generator function for Gaussian pyramid. The
pyramid can be created either on the feature space or the original
(intensities) space.
Parameters
----------
images: list of :map:`Image`
The set of landmarked images from which to build the AAM.
n_levels: int
The number of multi-resolution pyramidal levels to be used.
downscale: float
The downscale factor that will be used to create the different
pyramidal levels.
features: ``callable`` ``[callable]``
If a single callable, then the feature calculation will happen once
followed by a gaussian pyramid. If a list of callables then a
gaussian pyramid is generated with features extracted at each level
(after downsizing and blurring).
Returns
-------
list of generators :
The generator function of the Gaussian pyramid.
"""
will_take_a_while = is_pyramid_on_features(features)
pyramids = []
for i, img in enumerate(images):
if will_take_a_while and verbose:
print_dynamic(
'Computing top level feature space - {}'.format(
progress_bar_str((i + 1.) / len(images),
show_bar=False)))
pyramids.append(pyramid_of_feature_images(n_levels, downscale,
features, img))
return pyramids
开发者ID:csagonas,项目名称:menpofit,代码行数:41,代码来源:base.py
示例18: _warp_images
def _warp_images(images, group, label, patch_shape, as_vectors, level_str,
verbose):
r"""
returns numpy.array of size (68*16*16*36) x n_images
"""
# find length of each patch and number of points
n_points = images[0].landmarks[group][label].n_points
# TODO: introduce support for offsets
patches_image_shape = (n_points, 1, images[0].n_channels) + patch_shape
n_images = len(images)
# initialize the output
if as_vectors:
all_patches = np.empty(patches_image_shape + (n_images,))
else:
all_patches = []
# extract parts
for c, i in enumerate(images):
# print progress
if verbose:
print_dynamic('{}Extracting patches from images - {}'.format(
level_str,
progress_bar_str(float(c + 1) / len(images),
show_bar=False)))
# extract patches from this image
patches_image = build_parts_image(
i, i.landmarks[group][label], patch_shape)
# store
if as_vectors:
all_patches[..., c] = patches_image.pixels
else:
all_patches.append(patches_image)
return all_patches
开发者ID:nontas,项目名称:antonakoscvpr2015,代码行数:37,代码来源:builder.py
示例19: _warp_images
def _warp_images(images, group, label, patch_shape, as_vectors, level_str,
verbose):
r"""
returns numpy.array of size (68*16*16*36) x n_images
"""
# find length of each patch and number of points
n_points = images[0].landmarks[group][label].n_points
patches_len = np.prod(patch_shape) * images[0].n_channels * n_points
n_images = len(images)
# initialize the output
if as_vectors:
all_patches = np.empty((patches_len, n_images))
else:
all_patches = []
# extract parts
for c, i in enumerate(images):
# print progress
if verbose:
print_dynamic('{}Extracting patches from images - {}'.format(
level_str,
progress_bar_str(float(c + 1) / len(images),
show_bar=False)))
# extract patches from this image
patches_image = build_patches_image(i, None, patch_shape, group=group,
label=label)
# store
if as_vectors:
all_patches[..., c] = vectorize_patches_image(patches_image)
else:
all_patches.append(patches_image)
return all_patches
开发者ID:VLAM3D,项目名称:antonakoscvpr2015,代码行数:36,代码来源:builder.py
示例20: _normalization_wrt_reference_shape
def _normalization_wrt_reference_shape(cls, images, group, label,
normalization_diagonal,
interpolator, verbose=False):
r"""
Function that normalizes the images sizes with respect to the reference
shape (mean shape) scaling. This step is essential before building a
deformable model.
The normalization includes:
1) Computation of the reference shape as the mean shape of the images'
landmarks.
2) Scaling of the reference shape using the normalization_diagonal.
3) Rescaling of all the images so that their shape's scale is in
correspondence with the reference shape's scale.
Parameters
----------
images: list of :class:`menpo.image.MaskedImage`
The set of landmarked images from which to build the model.
group : string
The key of the landmark set that should be used. If None,
and if there is only one set of landmarks, this set will be used.
label: string
The label of of the landmark manager that you wish to use. If no
label is passed, the convex hull of all landmarks is used.
normalization_diagonal: int
During building an AAM, all images are rescaled to ensure that the
scale of their landmarks matches the scale of the mean shape.
If int, it ensures that the mean shape is scaled so that the
diagonal of the bounding box containing it matches the
normalization_diagonal value.
If None, the mean shape is not rescaled.
Note that, because the reference frame is computed from the mean
landmarks, this kwarg also specifies the diagonal length of the
reference frame (provided that features computation does not change
the image size).
interpolator: string
The interpolator that should be used to perform the warps.
verbose: bool, Optional
Flag that controls information and progress printing.
Default: False
Returns
-------
reference_shape : :map:`PointCloud`
The reference shape that was used to resize all training images to
a consistent object size.
normalized_images : :map:`MaskedImage` list
A list with the normalized images.
"""
# the reference_shape is the mean shape of the images' landmarks
if verbose:
print_dynamic('- Computing reference shape')
shapes = [i.landmarks[group][label] for i in images]
reference_shape = mean_pointcloud(shapes)
# fix the reference_shape's diagonal length if asked
if normalization_diagonal:
x, y = reference_shape.range()
scale = normalization_diagonal / np.sqrt(x**2 + y**2)
Scale(scale, reference_shape.n_dims).apply_inplace(reference_shape)
# normalize the scaling of all images wrt the reference_shape size
normalized_images = []
for c, i in enumerate(images):
if verbose:
print_dynamic('- Normalizing images size: {}'.format(
progress_bar_str((c + 1.) / len(images),
show_bar=False)))
normalized_images.append(i.rescale_to_reference_shape(
reference_shape, group=group, label=label,
interpolator=interpolator))
if verbose:
print_dynamic('- Normalizing images size: Done\n')
return reference_shape, normalized_images
开发者ID:yymath,项目名称:menpo,代码行数:79,代码来源:builder.py
注:本文中的menpo.visualize.progress_bar_str函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论