本文整理汇总了Python中pycocotools.mask.decode函数的典型用法代码示例。如果您正苦于以下问题:Python decode函数的具体用法?Python decode怎么用?Python decode使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了decode函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: testSingleImageGroundtruthExport
def testSingleImageGroundtruthExport(self):
masks = np.array(
[[[1, 1,], [1, 1]],
[[0, 0], [0, 1]],
[[0, 0], [0, 0]]], dtype=np.uint8)
boxes = np.array([[0, 0, 1, 1],
[0, 0, .5, .5],
[.5, .5, 1, 1]], dtype=np.float32)
coco_boxes = np.array([[0, 0, 1, 1],
[0, 0, .5, .5],
[.5, .5, .5, .5]], dtype=np.float32)
classes = np.array([1, 2, 3], dtype=np.int32)
is_crowd = np.array([0, 1, 0], dtype=np.int32)
next_annotation_id = 1
expected_counts = ['04', '31', '4']
# Tests exporting without passing in is_crowd (for backward compatibility).
coco_annotations = coco_tools.ExportSingleImageGroundtruthToCoco(
image_id='first_image',
category_id_set=set([1, 2, 3]),
next_annotation_id=next_annotation_id,
groundtruth_boxes=boxes,
groundtruth_classes=classes,
groundtruth_masks=masks)
for i, annotation in enumerate(coco_annotations):
self.assertEqual(annotation['segmentation']['counts'],
expected_counts[i])
self.assertTrue(np.all(np.equal(mask.decode(
annotation['segmentation']), masks[i])))
self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i])))
self.assertEqual(annotation['image_id'], 'first_image')
self.assertEqual(annotation['category_id'], classes[i])
self.assertEqual(annotation['id'], i + next_annotation_id)
# Tests exporting with is_crowd.
coco_annotations = coco_tools.ExportSingleImageGroundtruthToCoco(
image_id='first_image',
category_id_set=set([1, 2, 3]),
next_annotation_id=next_annotation_id,
groundtruth_boxes=boxes,
groundtruth_classes=classes,
groundtruth_masks=masks,
groundtruth_is_crowd=is_crowd)
for i, annotation in enumerate(coco_annotations):
self.assertEqual(annotation['segmentation']['counts'],
expected_counts[i])
self.assertTrue(np.all(np.equal(mask.decode(
annotation['segmentation']), masks[i])))
self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i])))
self.assertEqual(annotation['image_id'], 'first_image')
self.assertEqual(annotation['category_id'], classes[i])
self.assertEqual(annotation['iscrowd'], is_crowd[i])
self.assertEqual(annotation['id'], i + next_annotation_id)
开发者ID:ALISCIFP,项目名称:models,代码行数:53,代码来源:coco_tools_test.py
示例2: testExportSegmentsToCOCO
def testExportSegmentsToCOCO(self):
image_ids = ['first', 'second']
detection_masks = [np.array(
[[[0, 1, 0, 1], [0, 1, 1, 0], [0, 0, 0, 1], [0, 1, 0, 1]]],
dtype=np.uint8), np.array(
[[[0, 1, 0, 1], [0, 1, 1, 0], [0, 0, 0, 1], [0, 1, 0, 1]]],
dtype=np.uint8)]
for i, detection_mask in enumerate(detection_masks):
detection_masks[i] = detection_mask[:, :, :, None]
detection_scores = [np.array([.8], np.float), np.array([.7], np.float)]
detection_classes = [np.array([1], np.int32), np.array([1], np.int32)]
categories = [{'id': 0, 'name': 'person'},
{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'}]
output_path = os.path.join(tf.test.get_temp_dir(), 'segments.json')
result = coco_tools.ExportSegmentsToCOCO(
image_ids,
detection_masks,
detection_scores,
detection_classes,
categories,
output_path=output_path)
with tf.gfile.GFile(output_path, 'r') as f:
written_result = f.read()
written_result = json.loads(written_result)
mask_load = mask.decode([written_result[0]['segmentation']])
self.assertTrue(np.allclose(mask_load, detection_masks[0]))
self.assertAlmostEqual(result, written_result)
开发者ID:NoPointExc,项目名称:models,代码行数:31,代码来源:coco_tools_test.py
示例3: rle_masks_to_boxes
def rle_masks_to_boxes(masks):
"""Computes the bounding box of each mask in a list of RLE encoded masks."""
if len(masks) == 0:
return []
decoded_masks = [
np.array(mask_util.decode(rle), dtype=np.float32) for rle in masks
]
def get_bounds(flat_mask):
inds = np.where(flat_mask > 0)[0]
return inds.min(), inds.max()
boxes = np.zeros((len(decoded_masks), 4))
keep = [True] * len(decoded_masks)
for i, mask in enumerate(decoded_masks):
if mask.sum() == 0:
keep[i] = False
continue
flat_mask = mask.sum(axis=0)
x0, x1 = get_bounds(flat_mask)
flat_mask = mask.sum(axis=1)
y0, y1 = get_bounds(flat_mask)
boxes[i, :] = (x0, y0, x1, y1)
return boxes, np.where(keep)[0]
开发者ID:Alphonses,项目名称:Detectron,代码行数:26,代码来源:segms.py
示例4: evaluate_masks
def evaluate_masks(
json_dataset,
all_boxes,
all_segms,
output_dir,
use_salt=True,
cleanup=False
):
if cfg.CLUSTER.ON_CLUSTER:
# On the cluster avoid saving these files in the job directory
output_dir = '/tmp'
res_file = os.path.join(
output_dir, 'segmentations_' + json_dataset.name + '_results')
if use_salt:
res_file += '_{}'.format(str(uuid.uuid4()))
res_file += '.json'
results_dir = os.path.join(output_dir, 'results')
if not os.path.exists(results_dir):
os.mkdir(results_dir)
os.environ['CITYSCAPES_DATASET'] = DATASETS[json_dataset.name][RAW_DIR]
os.environ['CITYSCAPES_RESULTS'] = output_dir
# Load the Cityscapes eval script *after* setting the required env vars,
# since the script reads their values into global variables (at load time).
import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling \
as cityscapes_eval
roidb = json_dataset.get_roidb()
for i, entry in enumerate(roidb):
im_name = entry['image']
basename = os.path.splitext(os.path.basename(im_name))[0]
txtname = os.path.join(output_dir, basename + 'pred.txt')
with open(txtname, 'w') as fid_txt:
if i % 10 == 0:
logger.info('i: {}: {}'.format(i, basename))
for j in range(1, len(all_segms)):
clss = json_dataset.classes[j]
clss_id = cityscapes_eval.name2label[clss].id
segms = all_segms[j][i]
boxes = all_boxes[j][i]
if segms == []:
continue
masks = mask_util.decode(segms)
for k in range(boxes.shape[0]):
score = boxes[k, -1]
mask = masks[:, :, k]
pngname = os.path.join(
'results',
basename + '_' + clss + '_{}.png'.format(k))
# write txt
fid_txt.write('{} {} {}\n'.format(pngname, clss_id, score))
# save mask
cv2.imwrite(os.path.join(output_dir, pngname), mask * 255)
logger.info('Evaluating...')
cityscapes_eval.main([])
return None
开发者ID:csjunxu,项目名称:Detectron,代码行数:60,代码来源:cityscapes_json_dataset_evaluator.py
示例5: polys_to_mask_wrt_box
def polys_to_mask_wrt_box(polygons, box, M):
"""Convert from the COCO polygon segmentation format to a binary mask
encoded as a 2D array of data type numpy.float32. The polygon segmentation
is understood to be enclosed in the given box and rasterized to an M x M
mask. The resulting mask is therefore of shape (M, M).
"""
w = box[2] - box[0]
h = box[3] - box[1]
w = np.maximum(w, 1)
h = np.maximum(h, 1)
polygons_norm = []
for poly in polygons:
p = np.array(poly, dtype=np.float32)
p[0::2] = (p[0::2] - box[0]) * M / w
p[1::2] = (p[1::2] - box[1]) * M / h
polygons_norm.append(p)
rle = mask_util.frPyObjects(polygons_norm, M, M)
mask = np.array(mask_util.decode(rle), dtype=np.float32)
# Flatten in case polygons was a list
mask = np.sum(mask, axis=2)
mask = np.array(mask > 0, dtype=np.float32)
return mask
开发者ID:Alphonses,项目名称:Detectron,代码行数:25,代码来源:segms.py
示例6: crop_mask
def crop_mask(boxes,segmentations,flipped, imsize):
assert (boxes.shape[0]==len(segmentations))
psegmentations=[]
for i in xrange(len(segmentations)):
gts=segmentations[i]
box=boxes[i,:]
if type(gts) == list and gts:
assert (type(gts[0]) != dict)
prle= mask.frPyObjects(gts,imsize[1],imsize[0])
elif type(gts) == dict and type(gts['counts']) == list:
prle= mask.frPyObjects([gts],imsize[1],imsize[0])
elif type(gts) == dict and \
type(gts['counts'] == unicode or type(gts['counts']) == str):
prle = [gts]
else:
print '{} box has no segmentation'.format(i)
psegmentations.append([])
continue
if len(prle)==1:
prle=prle[0]
else:
prle= mask.merge(prle)
pmask=mask.decode([prle])
if flipped:
pmask=pmask[:,::-1,:]
pmask=np.copy(pmask[box[1]:box[3],box[0]:box[2],:],order='F')
psegmentations.append(mask.encode(pmask))
return psegmentations
开发者ID:shallowyuan,项目名称:cosegmentor,代码行数:28,代码来源:roidb.py
示例7: load_dataset
def load_dataset(self):
dataset = self.cfg.dataset
dataset_phase = self.cfg.dataset_phase
dataset_ann = self.cfg.dataset_ann
# initialize COCO api
annFile = '%s/annotations/%s_%s.json'%(dataset,dataset_ann,dataset_phase)
self.coco = COCO(annFile)
imgIds = self.coco.getImgIds()
data = []
# loop through each image
for imgId in imgIds:
item = DataItem()
img = self.coco.loadImgs(imgId)[0]
item.im_path = "%s/images/%s/%s"%(dataset, dataset_phase, img["file_name"])
item.im_size = [3, img["height"], img["width"]]
item.coco_id = imgId
annIds = self.coco.getAnnIds(imgIds=img['id'], iscrowd=False)
anns = self.coco.loadAnns(annIds)
all_person_keypoints = []
masked_persons_RLE = []
visible_persons_RLE = []
all_visibilities = []
# Consider only images with people
has_people = len(anns) > 0
if not has_people and self.cfg.coco_only_images_with_people:
continue
for ann in anns: # loop through each person
person_keypoints = []
visibilities = []
if ann["num_keypoints"] != 0:
for i in range(self.cfg.num_joints):
x_coord = ann["keypoints"][3 * i]
y_coord = ann["keypoints"][3 * i + 1]
visibility = ann["keypoints"][3 * i + 2]
visibilities.append(visibility)
if visibility != 0: # i.e. if labeled
person_keypoints.append([i, x_coord, y_coord])
all_person_keypoints.append(np.array(person_keypoints))
visible_persons_RLE.append(maskUtils.decode(self.coco.annToRLE(ann)))
all_visibilities.append(visibilities)
if ann["num_keypoints"] == 0:
masked_persons_RLE.append(self.coco.annToRLE(ann))
item.joints = all_person_keypoints
item.im_neg_mask = maskUtils.merge(masked_persons_RLE)
if self.cfg.use_gt_segm:
item.gt_segm = np.moveaxis(np.array(visible_persons_RLE), 0, -1)
item.visibilities = all_visibilities
data.append(item)
self.has_gt = self.cfg.dataset is not "image_info"
return data
开发者ID:PJunhyuk,项目名称:people-counting-pose,代码行数:60,代码来源:mscoco.py
示例8: compute_scmap_weights
def compute_scmap_weights(self, scmap_shape, joint_id, data_item):
size = scmap_shape[0:2]
scmask = np.ones(size)
m = maskUtils.decode(data_item.im_neg_mask)
if m.size:
scmask = 1.0 - imresize(m, size)
scmask = np.stack([scmask] * self.cfg.num_joints, axis=-1)
return scmask
开发者ID:PJunhyuk,项目名称:people-counting-pose,代码行数:8,代码来源:mscoco.py
示例9: annToMask
def annToMask(self, ann, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann, height, width)
m = maskUtils.decode(rle)
return m
开发者ID:PanZiqiAI,项目名称:FashionAI_Key_Points_Detection,代码行数:8,代码来源:coco.py
示例10: _flip_rle
def _flip_rle(rle, height, width):
if 'counts' in rle and type(rle['counts']) == list:
# Magic RLE format handling painfully discovered by looking at the
# COCO API showAnns function.
rle = mask_util.frPyObjects([rle], height, width)
mask = mask_util.decode(rle)
mask = mask[:, ::-1, :]
rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))
return rle
开发者ID:Alphonses,项目名称:Detectron,代码行数:9,代码来源:segms.py
示例11: draw_objdb_masks
def draw_objdb_masks(self, output_dir, objdb=None):
if objdb == None:
objdb = self.objdb
mask_dir = osp.join(output_dir, '{}_objdb_masks'.format(self._image_set))
img_dir = osp.join(output_dir, '{}_objdb_imgs'.format(self._image_set))
ds_utils.maybe_create(output_dir)
ds_utils.maybe_create(mask_dir)
ds_utils.maybe_create(img_dir)
for i in xrange(len(objdb)):
obj = objdb[i]
im_path = obj['image']
ann_id = obj['obj_id']
poly = obj['poly']
bb = obj['box'].astype(np.int16)
cls = obj['cls']
width = obj['width']
height = obj['height']
img = cv2.imread(im_path, cv2.IMREAD_COLOR)
msk = np.amax(COCOmask.decode(poly), axis=2)
# binarize the mask
msk = msk * 255
retVal, msk = cv2.threshold(msk, 127, 255, cv2.THRESH_BINARY)
msk = msk.astype(np.uint8)
# msk = ds_utils.dilate_mask(msk, 9)
# img = (1 - 0.5/255 * msk.reshape((height, width, 1))) * img + \
# 0.5/255 * msk.reshape((height, width, 1)) * \
# np.random.random((1, 3)) * 255
# cv2.rectangle(img, (bb[0], bb[1]), (bb[2], bb[3]), \
# (0, 255, 0), 2)
#
# fontScale = 0.0009 * math.sqrt(float(width*width + height*height))
#
#
# cv2.putText(img, '{:}'.format(self.classes[cls]), \
# (bb[0], bb[1] - 2), \
# cv2.FONT_HERSHEY_SIMPLEX, \
# fontScale, (0, 0, 255), 1)
im_name, im_ext = osp.splitext(osp.basename(im_path))
output_path = osp.join(mask_dir, im_name+'_'+str(ann_id).zfill(12)+im_ext)
# output_path = osp.join(mask_dir, im_name+im_ext)
cv2.imwrite(output_path, msk)
output_path = osp.join(img_dir, im_name+'_'+str(ann_id).zfill(12)+im_ext)
# output_path = osp.join(img_dir, im_name+im_ext)
cv2.imwrite(output_path, img)
print i
开发者ID:liuguoyou,项目名称:who_where,代码行数:57,代码来源:coco.py
示例12: get_mask
def get_mask(idx):
ann_ids = coco.getAnnIds(imgIds=img_ids[idx])
anns = coco.loadAnns(ann_ids)
img = coco.loadImgs(img_ids[idx])[0]
m = np.zeros((img['height'], img['width']))
for j in anns:
if j['iscrowd']:
rle = mask.frPyObjects(j['segmentation'], img['height'], img['width'])
m += mask.decode(rle)
return m < 0.5
开发者ID:cuizy15,项目名称:pose-ae-train,代码行数:10,代码来源:ref.py
示例13: convert
def convert(self, mode):
width, height = self.size
if mode == "mask":
rles = mask_utils.frPyObjects(
[p.numpy() for p in self.polygons], height, width
)
rle = mask_utils.merge(rles)
mask = mask_utils.decode(rle)
mask = torch.from_numpy(mask)
# TODO add squeeze?
return mask
开发者ID:laycoding,项目名称:maskrcnn-benchmark,代码行数:11,代码来源:segmentation_mask.py
示例14: draw_roidb_masks
def draw_roidb_masks(self, output_dir, roidb=None):
mask_dir = osp.join(output_dir, '{}_roidb_masks'.format(self._image_set))
img_dir = osp.join(output_dir, '{}_roidb_imgs'.format(self._image_set))
ds_utils.maybe_create(output_dir)
ds_utils.maybe_create(mask_dir)
ds_utils.maybe_create(img_dir)
if roidb == None:
roidb = self.roidb
for i in xrange(len(roidb)):
rois = roidb[i]
im_path = rois['image']
clses = rois['clses']
boxes = rois['boxes']
rles = rois['polys']
width = rois['width']
height = rois['height']
img = cv2.imread(im_path, cv2.IMREAD_COLOR)
msk = np.zeros((height, width), dtype=np.uint8)
for j in xrange(len(rles)):
rle = rles[j]
bb = boxes[j,:].astype(np.int)
cls = clses[j]
tmp = np.amax(COCOmask.decode(rle), axis=2) * 255
retVal, tmp = cv2.threshold(tmp, 127, 255, cv2.THRESH_BINARY)
tmp = tmp.astype(np.uint8)
tmp = ds_utils.dilate_mask(tmp, 9)
msk = np.maximum(msk, tmp)
# fontScale = 0.0009 * math.sqrt(float(width*width + height*height))
# cv2.rectangle(img, (bb[0], bb[1]), (bb[2], bb[3]), \
# (0, 255, 0), 2)
# cv2.putText(img, '{:}'.format(self.classes[cls]), \
# (bb[0], bb[1] - 2), \
# cv2.FONT_HERSHEY_SIMPLEX, \
# fontScale, (0, 0, 255), 1)
# img = (1 - 0.5/255 * msk.reshape((height, width, 1))) * img + \
# 0.5/255 * msk.reshape((height, width, 1)) * \
# np.random.random((1, 3)) * 255
output_path = osp.join(mask_dir, osp.basename(im_path))
cv2.imwrite(output_path, msk)
output_path = osp.join(img_dir, osp.basename(im_path))
cv2.imwrite(output_path, img)
print i
开发者ID:liuguoyou,项目名称:who_where,代码行数:54,代码来源:coco.py
示例15: polys_to_mask
def polys_to_mask(polygons, height, width):
"""Convert from the COCO polygon segmentation format to a binary mask
encoded as a 2D array of data type numpy.float32. The polygon segmentation
is understood to be enclosed inside a height x width image. The resulting
mask is therefore of shape (height, width).
"""
rle = mask_util.frPyObjects(polygons, height, width)
mask = np.array(mask_util.decode(rle), dtype=np.float32)
# Flatten in case polygons was a list
mask = np.sum(mask, axis=2)
mask = np.array(mask > 0, dtype=np.float32)
return mask
开发者ID:Alphonses,项目名称:Detectron,代码行数:12,代码来源:segms.py
示例16: segmentation_to_mask
def segmentation_to_mask(polys, height, width):
"""
Convert polygons to binary masks.
Args:
polys: a list of nx2 float array
Returns:
a binary matrix of (height, width)
"""
polys = [p.flatten().tolist() for p in polys]
rles = cocomask.frPyObjects(polys, height, width)
rle = cocomask.merge(rles)
return cocomask.decode(rle)
开发者ID:wu-yy,项目名称:tensorpack,代码行数:14,代码来源:common.py
示例17: vis_one_image_opencv
def vis_one_image_opencv(
im, boxes, segms=None, keypoints=None, thresh=0.9, kp_thresh=2,
show_box=False, dataset=None, show_class=False):
"""Constructs a numpy array with the detections visualized."""
if isinstance(boxes, list):
boxes, segms, keypoints, classes = convert_from_cls_format(
boxes, segms, keypoints)
if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
return im
if segms is not None:
masks = mask_util.decode(segms)
color_list = colormap()
mask_color_id = 0
# Display in largest to smallest order to reduce occlusion
areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
sorted_inds = np.argsort(-areas)
for i in sorted_inds:
bbox = boxes[i, :4]
score = boxes[i, -1]
if score < thresh:
continue
# show box (off by default)
if show_box:
im = vis_bbox(
im, (bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]))
# show class (off by default)
if show_class:
class_str = get_class_string(classes[i], score, dataset)
im = vis_class(im, (bbox[0], bbox[1] - 2), class_str)
# show mask
if segms is not None and len(segms) > i:
color_mask = color_list[mask_color_id % len(color_list), 0:3]
mask_color_id += 1
im = vis_mask(im, masks[..., i], color_mask)
# show keypoints
if keypoints is not None and len(keypoints) > i:
im = vis_keypoints(im, keypoints[i], kp_thresh)
return im
开发者ID:Alphonses,项目名称:Detectron,代码行数:48,代码来源:vis.py
示例18: _getIgnoreRegion
def _getIgnoreRegion(iid, coco):
img = coco.imgs[iid]
if not 'ignore_regions_x' in img.keys():
return None
if len(img['ignore_regions_x']) == 0:
return None
rgns_merged = []
for region_x, region_y in zip(img['ignore_regions_x'], img['ignore_regions_y']):
rgns = [iter(region_x), iter(region_y)]
rgns_merged.append(list(it.next() for it in itertools.cycle(rgns)))
rles = maskUtils.frPyObjects(rgns_merged, img['height'], img['width'])
rle = maskUtils.merge(rles)
return maskUtils.decode(rle)
开发者ID:Scratkong,项目名称:DensePose,代码行数:16,代码来源:densepose_cocoeval.py
示例19: _get_mask_targets
def _get_mask_targets(polygons):
mask_targets_blob = np.zeros((len(polygons), cfg.MWIDTH * cfg.MHEIGHT), dtype=np.float32)
mask_targets_weights=mp.zeros((len(polygons),1),dtype=np.float32)
img=np.ones( (cfg.MHEIGHT,cfg.MWIDTH, 1), dtype=np.float32)
for i, polygon in enumerate(polygons):
if not polygon:
continue
else:
#rle=COCOmask.frPyObjects(polygon,cfg.MHEIGHT,cfg.MWIDTH)
m = COCOmask.decode(polygon)
m = np.sum(m,axis=2)
assert max(m.ravel())==1
assert min(m.ravel())==0
m=simage.interpolation.zoom(input=m, zoom=(float(cfg.MHEIGHT)/m.shape[0],float(cfg.MWIDTH)/m.shape[1]), order = 2)
# debug
mask_targets_blob[i,:]=m.ravel()
mask_targets_weights[i]=1.
return mask_targets_blob,mask_targets_weights
开发者ID:shallowyuan,项目名称:cosegmentor,代码行数:18,代码来源:minibatch.py
示例20: polys_to_mask_wrt_box
def polys_to_mask_wrt_box(polygons, box, M):
w = box[2] - box[0]
h = box[3] - box[1]
w = np.maximum(w, 1)
h = np.maximum(h, 1)
polygons_norm = []
for poly in polygons:
p = np.array(poly, dtype=np.float32)
p[0::2] = (p[0::2] - box[0]) * M / w
p[1::2] = (p[1::2] - box[1]) * M / h
polygons_norm.append(p)
rle = mask_util.frPyObjects(polygons_norm, M, M)
mask = np.array(mask_util.decode(rle), dtype=np.float32)
mask = np.sum(mask, axis=2)
mask = np.array(mask > 0, dtype=np.float32)
return mask
开发者ID:TPNguyen,项目名称:DetectAndTrack,代码行数:19,代码来源:segms.py
注:本文中的pycocotools.mask.decode函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论