• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python vigra.readHDF5函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中vigra.readHDF5函数的典型用法代码示例。如果您正苦于以下问题:Python readHDF5函数的具体用法?Python readHDF5怎么用?Python readHDF5使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了readHDF5函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: get_target

def get_target(ds_str = "sopnetcompare_train"):

    assert ds_str in ("sopnetcompare_train",), ds_str # TODO more datasets!!!
    print "Loading Features and Labels for:", ds_str

    labelpath = '/home/constantin/Work/data_hdd/data_110915/sopnet_comparison/processed/facelabs/facelabs_mitooff.h5'
    ffeatpath = '/home/constantin/Work/data_hdd/cache/cached_datasets/sopnetcompare_train/features/ffeats/ffeat_bert_0_True.h5'

    feats = np.nan_to_num( vigra.readHDF5(ffeatpath, 'data') )

    import h5py
    lab_file = h5py.File(labelpath)
    key = lab_file.keys()[0]
    lab_file.close()

    labels = np.array( vigra.readHDF5(labelpath, key) )

    feats = feats[labels != 0.5]
    labels = labels[labels != 0.5]
    labels = labels[:,np.newaxis]

    assert all(np.unique(labels) == np.array([0, 1]))
    assert labels.shape[0] == feats.shape[0]

    labels = np.squeeze(labels)

    return (feats, labels)
开发者ID:constantinpape,项目名称:stuff_master,代码行数:27,代码来源:transfer_learning_experiments.py


示例2: make_superpix_isbi2013

def make_superpix_isbi2013(superpix = True):
    path_probs = "/home/constantin/Work/data_ssd/data_150615/isbi2013/pixel_probs/test-probs-nn.h5"
    key_probs  = "exported_data"

    path_raw = "/home/constantin/Work/data_ssd/data_150615/isbi2013/test-input.h5"
    key_raw  = "data"

    probs = vigra.readHDF5(path_probs, key_probs)
    probs = np.squeeze(probs)

    probs = np.array(probs)
    probs  = 1. - probs

    raw = vigra.readHDF5(path_raw, key_raw)

    #volumina_n_layer( (raw, probs) )
    #quit()

    if superpix:
        # use superpixel algorithm to segment the image
        # stack 2d segmented images
        segmentation = np.zeros( (probs.shape[0], probs.shape[1], probs.shape[2]) ,dtype = np.uint32)
        seeds = np.zeros( (probs.shape[0], probs.shape[1], probs.shape[2]) ,dtype = np.uint32)
        weights = np.zeros( (probs.shape[0], probs.shape[1], probs.shape[2]) ,dtype = np.uint32)
        # need offset to keep superpixel of the individual layers seperate!
        offset = 0
        for layer in range(probs.shape[2]):
        	if layer != 0:
        		offset = np.max(segmentation[:,:,layer-1])
        	#segmentation[:,:,layer] = watershed_superpixel_vigra(probs[:,:,layer], offset)
                res_wsdt = watershed_distancetransform_2d(probs[:,:,layer], offset)
        	segmentation[:,:,layer] = res_wsdt[0]
                seeds[:,:,layer] = res_wsdt[1]
        	weights[:,:,layer] = res_wsdt[2]

        #segmentation[:,:,2] = watershed_distancetransform_2d( probs[:,:,2], 0 )
        volumina_n_layer( (probs, segmentation, seeds, weights) )

    else:
        # use supervoxel algorithm to segment the image
        segmentation = watershed_distancetransform_3d(probs)
        volumina_n_layer( (raw, probs, segmentation) )

    print "Number of superpixels:", segmentation.max()
    #quit()

    path = "/home/constantin/Work/data_ssd/data_150615/isbi2013/superpixel/"

    name = "watershed_nn_dt_supervox_test"

    fpath = path + name + ".h5"
    vigra.impex.writeHDF5(segmentation, fpath, "superpixel" )
开发者ID:constantinpape,项目名称:stuff_master,代码行数:52,代码来源:superpix.py


示例3: get_source

def get_source(ds_str = "pedunculus"):
    assert ds_str in ("pedunculus",), ds_str # TODO more datasets!!!
    print "Loading Features and Labels for:", ds_str

    labelpath = '/home/constantin/Work/data_hdd/cache/cached_datasets/pedunculus/gt_face_segid1.h5'
    ffeatpath = '/home/constantin/Work/data_hdd/cache/cached_datasets/pedunculus/features/ffeats/ffeat_bert_1_True.h5'

    feats  = np.nan_to_num( vigra.readHDF5(ffeatpath, 'data') )
    labels = np.squeeze( vigra.readHDF5(labelpath, 'gt_face') )

    assert feats.shape[0] == labels.shape[0]

    return (feats, labels)
开发者ID:constantinpape,项目名称:stuff_master,代码行数:13,代码来源:transfer_learning_experiments.py


示例4: gt_isbi2012

def gt_isbi2012():
    labels_path = "/home/constantin/Work/data_ssd/data_090615/isbi2012/train-labels.h5"
    raw_path    = "/home/constantin/Work/data_ssd/data_090615/isbi2012/train-volume.h5"

    labels      = vigra.readHDF5(labels_path, "labels")
    raw         = vigra.readHDF5(raw_path, "data")

    labels      = preprocess_for_bgsmoothing_isbi2012(labels)
    gt          = smooth_background(labels).astype(np.uint32)

    volumina_n_layer( (raw, labels, gt) )

    gt_path = "/home/constantin/Work/data_ssd/data_090615/isbi2012/groundtruth/ground_truth_seg.h5"
开发者ID:constantinpape,项目名称:stuff_master,代码行数:13,代码来源:get_gt.py


示例5: singleFunctionTest

        def singleFunctionTest(feature_function, name):

            def singleFeatureTest(fu, typ, zDir):
                xname = 'feats_%s_%s_%i_xy.h5' % (name, typ, zDir)
                zname = 'feats_%s_%s_%i_z.h5' % (name, typ, zDir)
                xy_file = nh5.createFile(xname)
                z_file = nh5.createFile(zname)
                xy_shape = [
                    rag.totalNumberOfInSliceEdges if typ in ('xy', 'both') else 1,
                    9 if name == 'standard' else 9 * 12
                ]
                xy_chunks = [min(2500, xy_shape[0]), xy_shape[1]]
                z_shape = [
                    rag.totalNumberOfInBetweenSliceEdges if typ in ('z', 'both') else 1,
                    9 if name == 'standard' else 9 * 12
                ]
                z_chunks = [min(2500, z_shape[0]), z_shape[1]]
                xy_array = nh5.hdf5Array('float32', xy_file, 'data', xy_shape, xy_chunks)
                z_array = nh5.hdf5Array('float32', z_file, 'data', z_shape, z_chunks)
                fu(rag, self.dataArray, xy_array, z_array, zDirection=zDir)
                xfeats = xy_array.readSubarray([0, 0], xy_shape)
                zfeats = z_array.readSubarray([0, 0], z_shape)
                nh5.closeFile(xy_file)
                nh5.closeFile(z_file)
                os.remove(xname)
                os.remove(zname)
                return xname, zname, xfeats, zfeats

            for typ in ('both', 'xy', 'z'):
                if typ == 'both':
                    new_fu = partial(feature_function, keepXYOnly=False, keepZOnly=False)
                elif typ == 'xy':
                    new_fu = partial(feature_function, keepXYOnly=True, keepZOnly=False)
                elif typ == 'z':
                    new_fu = partial(feature_function, keepXYOnly=False, keepZOnly=True)

                if typ == 'z':
                    for zDir in (0, 1, 2):
                        _, zname, _, zfeats = singleFeatureTest(new_fu, typ, zDir)
                        ref_feats = vigra.readHDF5(os.path.join('./features', zname), 'data')
                        self.assertTrue(numpy.allclose(zfeats, ref_feats))

                else:
                    zDir = 0
                    xname, zname, xfeats, zfeats = singleFeatureTest(new_fu, typ, zDir)
                    ref_feats_xy = vigra.readHDF5(os.path.join('./features', xname), 'data')
                    self.assertTrue(numpy.allclose(xfeats, ref_feats_xy))
                    if typ == 'both':
                        ref_feats_z = vigra.readHDF5(os.path.join('./features', zname), 'data')
                        self.assertTrue(numpy.allclose(zfeats, ref_feats_z))
开发者ID:constantinpape,项目名称:nifty,代码行数:50,代码来源:test_accumulation_flat.py


示例6: load_very_small_neuro_data

def load_very_small_neuro_data():
    """
    Load the 1000 neuro dataset.

    :return: data_x, data_y
    """
    data_x = vigra.readHDF5("data/neuro/neuro_1000_raw_gt.h5", "raw")
    data_y = vigra.readHDF5("data/neuro/neuro_1000_raw_gt.h5", "gt")

    # Remove NaN values.
    to_remove = numpy.where(numpy.isnan(data_x))
    data_x = numpy.delete(data_x, to_remove, axis=0)
    data_y = numpy.delete(data_y, to_remove)

    return data_x, data_y
开发者ID:dagophil,项目名称:python_randomforest,代码行数:15,代码来源:tests.py


示例7: load_large_neuro_data

def load_large_neuro_data():
    """
    Load the large neuro dataset.

    :return: data_x, data_y
    """
    data_x = vigra.readHDF5("data/neuro/test/ffeat_br_segid0.h5", "ffeat_br")
    data_y = numpy.array(vigra.readHDF5("data/neuro/test/gt_face_segid0.h5", "gt_face")[:, 0])
    assert data_x.shape[0] == data_y.shape[0]

    # Remove NaN values.
    to_remove = numpy.where(numpy.isnan(data_x))
    data_x = numpy.delete(data_x, to_remove, axis=0)
    data_y = numpy.delete(data_y, to_remove)
    return data_x, data_y
开发者ID:dagophil,项目名称:python_randomforest,代码行数:15,代码来源:tests.py


示例8: make_superpix_sopnetcomparison

def make_superpix_sopnetcomparison():
    path_probs = "/home/constantin/Work/data_ssd/data_110915/sopnet_comparison/pixel_probabilities/probs-final_autocontext.h5"
    key_probs = "data"

    probs = vigra.readHDF5(path_probs, key_probs)

    segmentation = np.zeros( (probs.shape[0], probs.shape[1], probs.shape[2]) ,dtype = np.uint32)
    seeds        = np.zeros( (probs.shape[0], probs.shape[1], probs.shape[2]) ,dtype = np.uint32)
    weights      = np.zeros( (probs.shape[0], probs.shape[1], probs.shape[2]) ,dtype = np.uint32)

    # need offset to keep superpixel of the individual layers seperate!
    offset = 0
    for layer in range(probs.shape[2]):
    	if layer != 0:
    		offset = np.max(segmentation[:,:,layer-1])
    	#segmentation[:,:,layer] = watershed_superpixel_vigra(probs[:,:,layer], offset)
        res_wsdt = watershed_distancetransform_2d(probs[:,:,layer], offset)
    	segmentation[:,:,layer] = res_wsdt[0]
        seeds[:,:,layer] = res_wsdt[1]
    	weights[:,:,layer] = res_wsdt[2]

    #segmentation[:,:,2] = watershed_distancetransform_2d( probs[:,:,2], 0 )

    print "Number of superpixels:", segmentation.max()

    path = "/home/constantin/Work/data_ssd/data_110915/sopnet_comparison/superpixel/"
    name = "watershed_dt_mitooff"
    fpath = path + name + ".h5"

    vigra.impex.writeHDF5(segmentation, fpath, "superpixel" )
开发者ID:constantinpape,项目名称:stuff_master,代码行数:30,代码来源:superpix.py


示例9: label_names

    def label_names(self):
        """Returns the names of the labels of the dataset.

        :return: names of the labels of the dataset
        :rtype: numpy.ndarray
        """
        return vigra.readHDF5(self.project_filename, const.label_names())
开发者ID:dagophil,项目名称:autocontext,代码行数:7,代码来源:ilp.py


示例10: make_superpix_from_intepolation

def make_superpix_from_intepolation(prob_path, prob_key, save_path, anisotropy):
    from wsDtSegmentation import wsDtSegmentation

    pmem = vigra.readHDF5(prob_path, prob_key)

    print pmem.shape
    print anisotropy

    # for some datasets, we have to invert the probabilities
    #probs = 1. - probs

    # interpolate the probability in z - direction
    print "doing spline interpolation"
    pmem_interpol = vigra.sampling.resize(pmem, shape=(pmem.shape[0], pmem.shape[1], anisotropy* pmem.shape[2]))
    pmem_interpol = np.array(pmem_interpol)
    print "Finished interpolation"

    superpix = wsDtSegmentation(pmem_interpol, 0.45, 20, 100, 1.6, 2.)[0]

    superpix = superpix[:,:,::anisotropy]

    #volumina_n_layer( [pmem, superpix.astype(np.uint32)] )

    assert superpix.shape == pmem.shape

    vigra.writeHDF5(superpix, save_path, "superpixel")
开发者ID:constantinpape,项目名称:stuff_master,代码行数:26,代码来源:superpix.py


示例11: project_gt_isbi2012

def project_gt_isbi2012():
    labels_path = "/home/constantin/Work/data_ssd/data_090615/isbi2012/train-labels.h5"
    gt_path     = "/home/constantin/Work/data_ssd/data_090615/isbi2012/groundtruth/gt_mc.h5"
    raw_path    = "/home/constantin/Work/data_ssd/data_090615/isbi2012/train-volume.h5"

    labels  = vigra.readHDF5(labels_path, "labels")
    gt      = vigra.readHDF5(gt_path, "gt")
    raw     = vigra.readHDF5(raw_path, "data")

    gt = project_gt(labels, gt)

    save_path     = "/home/constantin/Work/data_ssd/data_090615/isbi2012/groundtruth/gt_mc_bkg.h5"

    volumina_n_layer( (raw, gt, labels) )

    vigra.writeHDF5(gt, save_path, "gt")
开发者ID:constantinpape,项目名称:stuff_master,代码行数:16,代码来源:project_gt.py


示例12: compare_rags_from_files

    def compare_rags_from_files(labels_file, labels_key):

        with vigra.Timer("Chunked Nifty Rag"):
            rag_c = chunked_rag(labels_file, labels_key, numberOfThreads = 1)
        edges_c = rag_c.numberOfEdges
        print edges_c
        nodes_c = rag_c.numberOfNodes
        del rag_c


        labels = vigra.readHDF5(labels_file, labels_key).astype('uint32')

        with vigra.Timer("Nifty Rag"):
            rag_n = normal_rag(labels, numberOfThreads = -1 )
        edges_n = rag_n.numberOfEdges
        nodes_n = rag_n.numberOfNodes

        with vigra.Timer("Vigra Rag"):
            rag_v = vigra.graphs.regionAdjacencyGraph(vigra.graphs.gridGraph(labels.shape), labels)
        nodes_v = rag_v.nodeNum
        edges_v = rag_v.edgeNum

        assert nodes_c == nodes_n, str(nodes_c) + " , " + str(nodes_n)
        #assert nodes_v == nodes_n, str(nodes_v) + " , " + str(nodes_n)

        assert edges_c == edges_n, str(edges_c) + " , " + str(edges_n)
        assert edges_v == edges_n, str(edges_v) + " , " + str(edges_n)

        print "Checks out"
开发者ID:hanslovsky,项目名称:nifty,代码行数:29,代码来源:test_rag_sliced.py


示例13: get_train_data

def get_train_data(path):

    label_to_num, num_to_label = get_label_dictionary(path)

    data   = []
    labels = []

    keys = label_to_num.keys()

    for key in keys:
        data_i = vigra.readHDF5(path, key)
        data_i = np.array([x.flatten() for x in data_i])
        label  = label_to_num[key]
        labels_i = label*np.ones( data_i.shape[0] )

        data.append(data_i)
        labels.append(labels_i)

    # shuffle all the data
    data   = np.concatenate(data)
    labels = np.concatenate(labels)

    assert len(data.shape) == 2
    assert len(labels.shape) == 1
    assert labels.shape[0] == data.shape[0]

    p = np.random.permutation(data.shape[0])

    data   = data[p]
    labels = labels[p]

    return data, labels
开发者ID:constantinpape,项目名称:fooling,代码行数:32,代码来源:train_imagenet.py


示例14: load_feats_and_gt_pedunculus

def load_feats_and_gt_pedunculus():

    #raw_data = vigra.readHDF5(
    #        "/home/constantin/Work/data_ssd/data_080515/pedunculus/150401pedunculus_middle_512x512_first30_sliced.h5",
    #        "data"
    #        )

    gt = vigra.readVolume(
            "/home/constantin/Work/data_ssd/data_080515/pedunculus/150401_pedunculus_membrane_labeling.tif")
    gt = np.squeeze(gt)

    # delete black slice
    gt = np.delete(gt, 6, axis = 2)

    gt[gt == 0.] = 1
    gt[gt == 255.] = 0
    gt = gt.astype(np.uint32)

    save_path = "/home/constantin/Work/data_ssd/data_080515/pedunculus/features"
    #compute_ilastik_2dfeatures(raw_data, save_path)

    feats_path = os.path.join( save_path, "all_features.h5")
    # make sure that features are computed!
    #feats = load_precomputed_feats(save_path, raw_data.shape)
    #vigra.writeHDF5(feats, feats_path, "data")

    feats = vigra.readHDF5(feats_path, "data")

    return (feats, gt)
开发者ID:constantinpape,项目名称:stuff_master,代码行数:29,代码来源:compare_rfs.py


示例15: get_output_data

    def get_output_data(self, data_nr):
        """Returns the dataset that was produced by ilastik.

        :param data_nr: number of dataset
        :return: output dataset of ilastik
        """
        return vigra.readHDF5(self._get_output_data_path(data_nr), const.default_export_key())
开发者ID:dagophil,项目名称:autocontext,代码行数:7,代码来源:ilp.py


示例16: make_superpix_isbi2012

def make_superpix_isbi2012():
    path_probs = "/home/constantin/Work/data_ssd/data_090615/isbi2012/pixel_probabilities/probs_train_final.h5"
    #path_unet = "/home/constantin/Work/data_ssd/data_090615/isbi2012/u-net_probs/u-net_probs_test.h5"
    key_probs  = "exported_data"

    probs = vigra.readHDF5(path_probs, key_probs)
    probs = np.squeeze(probs)
    #probs  = 1. - probs

    # use superpixel algorithm to segment the image
    # stack 2d segmented images
    segmentation = np.zeros( (probs.shape[0], probs.shape[1], probs.shape[2]) )

    # need offset to keep superpixel of the individual layers seperate!
    offset = 0

    for layer in range(probs.shape[2]):
    	if layer != 0:
    		offset = np.max(segmentation[:,:,layer-1])

        res_wsdt = watershed_distancetransform_2d(probs[:,:,layer], offset)
        segmentation[:,:,layer] = res_wsdt[0]

    #volumina_double_layer(probs,segmentation)
    #quit()

    path = "/home/constantin/Work/data_ssd/data_090615/isbi2012/superpixel/"

    name = "watershed_dt_train"

    fpath = path + name + ".h5"
    vigra.impex.writeHDF5(segmentation, fpath, "superpixel" )
开发者ID:constantinpape,项目名称:stuff_master,代码行数:32,代码来源:superpix.py


示例17: get_axisorder

    def get_axisorder(self, data_nr):
        """Returns the axisorder of the dataset.

        :param data_nr: number of dataset
        :return: axisorder of dataset
        :rtype: str
        """
        return vigra.readHDF5(self.project_filename, const.axisorder(data_nr))
开发者ID:dagophil,项目名称:autocontext,代码行数:8,代码来源:ilp.py


示例18: get_axistags

    def get_axistags(self, data_nr):
        """Returns the axistags of the dataset as they are in the project file.

        :param data_nr: number of dataset
        :return: axistags of dataset
        :rtype: str
        """
        return vigra.readHDF5(self.project_filename, const.axistags(data_nr))
开发者ID:dagophil,项目名称:autocontext,代码行数:8,代码来源:ilp.py


示例19: crossvalidation_mnist

def crossvalidation_mnist():
    dat_train = vigra.readHDF5("../data/mnist/mnist-train.h5", "data")
    dat_train = dat_train.reshape( (dat_train.shape[0], dat_train.shape[1]*dat_train.shape[2]) )
    lbl_train = vigra.readHDF5("../data/mnist/mnist-train.h5", "label")

    dat_test = vigra.readHDF5("../data/mnist/mnist-test.h5", "data")
    dat_test = dat_test.reshape( (dat_test.shape[0], dat_test.shape[1]*dat_test.shape[2]) )
    lbl_test = vigra.readHDF5("../data/mnist/mnist-test.h5", "label")

    min_error = 1.
    best_params = (0,0)

    out = open('crossvalidation_mnist.txt','w')
    out.write("Estimators, min_samples, test_error, train_time, test_time")
    out.write('\n')
    for estimators in (100,255,500,750):
        for min_samples in (1,5,10,20):
            print "Start run with n_estimators =",estimators, "min_samples =", min_samples

            # 5 runs to account for randomness
            errors = []
            times_train = []
            times_test  = []
            for _ in range(5):

                t_0 = t.time()
                rf = learn_rf(dat_train, lbl_train, estimators, min_samples)
                times_train.append(t.time() - t_0)

                t_1 = t.time()
                errors.append(evaluate_rf(rf, dat_test, lbl_test))
                times_test = t.time() - t_1

            err = np.mean(errors)
            t_train = np.mean(times_train)
            t_test  = np.mean(times_test)

            res = str(estimators) + '\t' + str(min_samples) + '\t ' + str(err) + '\t' + str(t_train) + '\t ' + str(t_test) + '\n'
            out.write(res)

            if err < min_error:
                min_error = err
                best_params = (estimators, min_samples)

    print "Cross Validation found best test_error:", min_error, "for", best_params
    return best_params
开发者ID:constantinpape,项目名称:fooling,代码行数:46,代码来源:train_mnist.py


示例20: train_mnist

def train_mnist(estimators, min_samples):
    dat_train = vigra.readHDF5("../data/mnist/mnist-train.h5", "data")
    dat_train = dat_train.reshape( (dat_train.shape[0], dat_train.shape[1]*dat_train.shape[2]) )
    lbl_train = vigra.readHDF5("../data/mnist/mnist-train.h5", "label")

    dat_test = vigra.readHDF5("../data/mnist/mnist-test.h5", "data")
    dat_test = dat_test.reshape( (dat_test.shape[0], dat_test.shape[1]*dat_test.shape[2]) )
    lbl_test = vigra.readHDF5("../data/mnist/mnist-test.h5", "label")

    print "Training sklearn on MNIST"
    rf_mnist = learn_rf(dat_train, lbl_train,estimators,min_samples)
    print "Finished Training"
    err = evaluate_rf(rf_mnist, dat_test, lbl_test)
    print "Error on training set:", err
    save_path ='../data/rf/rf_mnist.pkl'
    print "Saving RF to", save_path
    with open(save_path, 'wb') as f:
        cPickle.dump(rf_mnist,f)
开发者ID:constantinpape,项目名称:fooling,代码行数:18,代码来源:train_mnist.py



注:本文中的vigra.readHDF5函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python vigra.readImage函数代码示例发布时间:2022-05-26
下一篇:
Python vigra.defaultAxistags函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap