• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python mlab.prctile函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中matplotlib.mlab.prctile函数的典型用法代码示例。如果您正苦于以下问题:Python prctile函数的具体用法?Python prctile怎么用?Python prctile使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了prctile函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: bootstrapMedian

 def bootstrapMedian(data, N=5000):
     # determine 95% confidence intervals of the median
     M = len(data)
     percentile = [2.5,97.5]
     estimate = np.zeros(N)
     for n in range(N):
         bsIndex = np.random.random_integers(0,M-1,M)
         bsData = data[bsIndex]
         estimate[n] = mlab.prctile(bsData, 50)
     CI = mlab.prctile(estimate, percentile)
     return CI
开发者ID:amuxz,项目名称:prettyplotlib,代码行数:11,代码来源:_beeswarm.py


示例2: execute

 def execute(self, seed=0):
     """Test the difference in means with bootstrapping.
     
     Data is drawn randomly from group1 and group2, with resampling.
     From these bootstraps, estimates with confidence intervals are 
     calculated for the mean of each group and the difference in means.
     
     The estimated difference is positive if group2 > group1.
     
     Sets: mean1, CI_1, mean2, CI_2, diff_estimate, diff_CI, p1, p2
     
     p1 is the p-value estimated from the distribution of differences
     p2 is the p-value from a 1-sample ttest on that distribution
     """
     if len(self.data1) < self.min_bucket or len(self.data2) < self.min_bucket:
         #~ raise BootstrapError(
             #~ 'insufficient data in bucket in bootstrap_two_groups')
         raise ValueError(
             'insufficient data in bucket in bootstrap_two_groups')
     
     if seed is not None:
         np.random.seed(seed)
     
     # Generate random samples, shape (n_boots, len(group))
     self.idxs1 = np.random.randint(0, len(self.data1), 
         (self.n_boots, len(self.data1)))
     self.idxs2 = np.random.randint(0, len(self.data2), 
         (self.n_boots, len(self.data2)))
     
     # Draw from the data
     self.draws1 = self.data1[self.idxs1]
     self.draws2 = self.data2[self.idxs2]
     
     # Bootstrapped means of each group
     self.means1 = self.draws1.mean(axis=1)
     self.means2 = self.draws2.mean(axis=1)
     
     # CIs on group means
     self.CI_1 = mlab.prctile(self.means1, (2.5, 97.5))
     self.CI_2 = mlab.prctile(self.means2, (2.5, 97.5))
     
     # Bootstrapped difference between the groups
     self.diffs = self.means2 - self.means1
     self.CI_diff = mlab.prctile(self.diffs, (2.5, 97.5))
     
     # p-value
     self.p_from_dist = pvalue_of_distribution(self.diffs, 0)
     
     # save memory
     del self.idxs1
     del self.idxs2
     del self.draws1
     del self.draws2
开发者ID:NSalem,项目名称:Rodgers2014,代码行数:53,代码来源:bootstrap.py


示例3: harmonize_clim_in_subplots

def harmonize_clim_in_subplots(fig=None, axa=None, clim=(None, None), 
    center_clim=False, trim=1):
    """Set clim to be the same in all subplots in figur
    
    f : Figure to grab all axes from, or None
    axa : the list of subplots (if f is None)
    clim : tuple of desired c-limits. If either or both values are
        unspecified, they are derived from the data.
    center_clim : if True, the mean of the new clim is always zero
        May overrule specified `clim`
    trim : does nothing if 1 or None
        otherwise, sets the clim to truncate extreme values
        for example, if .99, uses the 1% and 99% values of the data
    """
    # Which axes to operate on
    if axa is None:
        axa = fig.get_axes()
    axa = np.asarray(axa)

    # Two ways of getting new clim
    if trim is None or trim == 1:
        # Get all the clim
        all_clim = []        
        for ax in axa.flatten():
            for im in ax.get_images():
                all_clim.append(np.asarray(im.get_clim()))
        
        # Find covering clim and optionally center
        all_clim_a = np.array(all_clim)
        new_clim = [np.min(all_clim_a[:, 0]), np.max(all_clim_a[:, 1])]
    else:
        # Trim to specified prctile of the image data
        data_l = []
        for ax in axa.flatten():
            for im in ax.get_images():
                data_l.append(np.asarray(im.get_array()).flatten())
        data_a = np.concatenate(data_l)
        
        # New clim
        new_clim = list(mlab.prctile(data_a, (100.*(1-trim), 100.*trim)))
    
    # Take into account specified clim
    try:
        if clim[0] is not None:
            new_clim[0] = clim[0]
        if clim[1] is not None:
            new_clim[1] = clim[1]
    except IndexError:
        print "warning: problem with provided clim"
    
    # Optionally center
    if center_clim:
        new_clim = np.max(np.abs(new_clim)) * np.array([-1, 1])
    
    # Set to new value
    for ax in axa.flatten():
        for im in ax.get_images():
            im.set_clim(new_clim)
    
    return new_clim
开发者ID:cxrodgers,项目名称:my,代码行数:60,代码来源:plot.py


示例4: createAsaInfo

    def createAsaInfo(self):
        'Return True on error.'
        if self.showMessages:
            nTdebug( "Fetching WHATIF per-atom surface accessibility info..." )

        fileNames = glob.glob(os.path.join(self.whatIfDataDir, "wsvacc*.log"))

        self.allWhatIfInfo = {'chains': {}}
        for fileName in fileNames:
            if self.readWhatIfAsaInfoFile(fileName): # fills self.allWhatIfInfo
                nTerror("Failed %s when reading file." % (getCallerName()))
                return True
        # end for
        
        #
        # Now determine the median ASA for each
        #
        # whatIfInfo is used in super class whereas allWhatIfInfo was filled before. 
        self.whatIfInfo = self.allWhatIfInfo
        d = self.whatIfInfo['chains']
#        medianIndex = None
        for chainCode in d.keys():
            for seqKey in d[chainCode].keys():
                for atomName in d[chainCode][seqKey]['atoms'].keys():
                    asaList =   d[chainCode][seqKey]['atoms'][atomName]
                    asaList.sort()
#                    if not medianIndex:
#                    medianIndex = int((len(asaList) / 2.0) + 0.5) # fails with round off on single element lists.
                    ml = mlab.prctile(asaList,[50])                    
#                    if medianIndex < 0 or medianIndex >= len(asaList):
#                        nTerror("Found improper median index %s for %s" % (medianIndex, str(asaList)))
#                        return True
#                    d[chainCode][seqKey]['atoms'][atomName] = [asaList[medianIndex]] # Resetting list to only include median
                    d[chainCode][seqKey]['atoms'][atomName] = [ml[0]] 
开发者ID:VuisterLab,项目名称:cing,代码行数:34,代码来源:vascoCingRefCheck.py


示例5: bootstrapped_intercluster_mahalanobis

def bootstrapped_intercluster_mahalanobis(cluster1, cluster2, n_boots=1000,
    fix_covariances=True):
    """Bootstrap the intercluster distance.
    
    Returns:
        m - The mean distance
        CI - 95% confidence interval on the distance
        distances - an array of the distances measured on each boot
    """
    d_l = []
    
    # Determine the covariance matrices, or recalculate each time
    if fix_covariances:
        icov1 = np.linalg.inv(np.cov(cluster1, rowvar=0))
        icov2 = np.linalg.inv(np.cov(cluster2, rowvar=0))
    else:
        icov1, icov2 = None, None
    
    # Bootstrap
    for n_boot in range(n_boots):
        # Draw
        idxs1 = np.random.randint(0, len(cluster1), len(cluster1))
        idxs2 = np.random.randint(0, len(cluster2), len(cluster2))
        
        # Calculate and store
        d_l.append(intercluster_mahalanobis(
            cluster1[idxs1], cluster2[idxs2], icov1, icov2))
    
    # Statistics
    d_a = np.asarray(d_l)
    m = np.mean(d_a)
    CI = mlab.prctile(d_a, (2.5, 97.5))
    return m, CI, d_a
开发者ID:NSalem,项目名称:Rodgers2014,代码行数:33,代码来源:mahala.py


示例6: get_sample_percentiles

    def get_sample_percentiles(self, percents):
        'It returns the percentiles given a percent list'
        if not self._sample:
            raise ValueError('No data to calculate percentiles')

        vect = numpy.ravel(self.sample)
        percentiles = mlab.prctile(vect, percents)
        return list(percentiles)
开发者ID:BioinformaticsArchive,项目名称:franklin,代码行数:8,代码来源:statistics.py


示例7: difference_CI_bootstrap_wrapper

def difference_CI_bootstrap_wrapper(data, **boot_kwargs):
    """Given parsed data from single ulabel, return difference CIs.
    
    data : same format as bootstrap_main_effect expects
    
    Will calculate the following statistics:
        means : mean of each condition, across draws
        CIs : confidence intervals on each condition
        mean_difference : mean difference between conditions
        difference_CI : confidence interval on difference between conditions
        p : two-tailed p-value of 'no difference'
    
    Returns:
        dict of those statistics
    """
    # Yields a 1000 x 2 x N_trials matrix:
    # 1000 draws from the original data, under both conditions.
    bh = bootstrap_main_effect(data, meth=keep, **boot_kwargs)

    # Find the distribution of means of each draw, across trials
    # This is 1000 x 2, one for each condition
    # hist(means_of_all_draws) shows the comparison across conditions
    means_of_all_draws = bh.mean(axis=2)

    # Confidence intervals across the draw means for each condition
    condition_CIs = np.array([
        mlab.prctile(dist, (2.5, 97.5)) for dist in means_of_all_draws.T])

    # Means of each ulabel (centers of the CIs, basically)
    condition_means = means_of_all_draws.mean(axis=0)

    # Now the CI on the *difference between conditions*
    difference_of_conditions = np.diff(means_of_all_draws).flatten()
    difference_CI = mlab.prctile(difference_of_conditions, (2.5, 97.5)) 

    # p-value of 0. in the difference distribution
    cdf_at_value = np.sum(difference_of_conditions < 0.) / \
        float(len(difference_of_conditions))
    p_at_value = 2 * np.min([cdf_at_value, 1 - cdf_at_value])
    
    # Should probably floor the p-value at 1/n_boots

    return {'p' : p_at_value, 
        'means' : condition_means, 'CIs': condition_CIs,
        'mean_difference': difference_of_conditions.mean(), 
        'difference_CI' : difference_CI}
开发者ID:NSalem,项目名称:Rodgers2014,代码行数:46,代码来源:bootstrap.py


示例8: percentile_box_plot

def percentile_box_plot(ax, data, indexer=None, box_top=75, 
                        box_bottom=25,whisker_top=98,whisker_bottom=2):
    if indexer is None:
        indexed_data = zip(range(1,len(data)+1), data)
    else:
        indexed_data = [(indexer(datum), datum) for datum in data]

    for index, x in indexed_data:
        if whisker_top != None and whisker_bottom != None:
            bp = boxplotter(*(prctile(x,(50,box_top,box_bottom,whisker_top,whisker_bottom))))
            bp.draw_on(ax, index, data=x)

        elif whisker_top == None and whisker_bottom == None:
            bp = boxplotter(*(prctile(x,(50,box_top,box_bottom))))
            bp.draw_on(ax, index)
        else:
            raise Exception("Just one whisker? That's silly.")
开发者ID:blackoutjack,项目名称:jamweaver,代码行数:17,代码来源:perc_box_plot.py


示例9: identify_outliers

def identify_outliers(test, chains, x):
    """
    Determine which chains have converged on a local maximum much lower than
    the maximum likelihood.

    *test* is the name of the test to use (one of IQR, Grubbs, Mahal or none).
    *chains* is a set of log likelihood values of shape (chain len, num chains)
    *x* is the current population of shape (num vars, num chains)

    See :module:`outliers` for details.
    """
    # Determine the mean log density of the active chains
    v = mean(chains, axis=0)

    # Check whether any of these active chains are outlier chains
    test = test.lower()
    if test == 'iqr':
        # Derive the upper and lower quartile of the chain averages
        Q1,Q3 = prctile(v,[25,75])
        # Derive the Inter Quartile Range (IQR)
        IQR = Q3 - Q1
        # See whether there are any outlier chains
        outliers = where(v < Q1 - 2*IQR)[0]

    elif test == 'grubbs':
        # Compute zscore for chain averages
        zscore = (mean(v) - v) / std(v, ddof=1)
        # Determine t-value of one-sided interval
        N = len(v)
        t2 = tinv(1 - 0.01/N,N-2)**2; # 95% interval
        # Determine the critical value
        Gcrit = ((N - 1)/sqrt(N)) * sqrt(t2/(N-2 + t2))
        # Then check against this
        outliers = where(zscore > Gcrit)[0]

    elif test == 'mahal':
        # Use the Mahalanobis distance to find outliers in the population
        alpha = 0.01
        Npop, Nvar = x.shape
        Gcrit = ACR(Nvar,Npop-1,alpha)
        #print "alpha",alpha,"Nvar",Nvar,"Npop",Npop,"Gcrit",Gcrit
        # Find which chain has minimum log_density
        minidx = argmin(v)
        # Then check the Mahalanobis distance of the current point to other chains
        d1 = mahalanobis(x[minidx,:], x[minidx!=arange(Npop),:])
        #print "d1",d1,"minidx",minidx
        # and see if it is an outlier
        outliers = [minidx] if d1 > Gcrit else []

    elif test == 'none':
        outliers = []

    else:
        raise ValueError("Unknown outlier test "+test)

    return outliers
开发者ID:RONNCC,项目名称:bumps,代码行数:56,代码来源:outliers.py


示例10: prctile

 def prctile(self, p = (2.5, 97.5)):
     ''' Returns the standard percentiles of the bootstrapped statistic.
     
     Arguments
     ---------
     perc : 
         A sequence of percentile values or a scalar
     '''
     
     from matplotlib.mlab import prctile
     return prctile(self.dist, p = p)
开发者ID:mcleonard,项目名称:memory,代码行数:11,代码来源:stats.py


示例11: ThresCal

 def ThresCal(self):
     SampNum = 1000
     self.KL = []
     for i in range(0, SampNum):
         x = chain(self.mu_0, self.P, self.n)
         mu = np.reshape(self.mu, (self.N, self.N))
         self.KL.append(KL_est(x, mu))  # Get the actual relative entropy (K-L divergence)
     self.eta = prctile(self.KL, 100 * (1 - self.beta))
     KL = self.KL
     eta = self.eta
     return KL, eta
开发者ID:jingzbu,项目名称:ROCHM,代码行数:11,代码来源:util.py


示例12: modeifyer

def modeifyer(times,fluxes,window=500,p=20,minpoints=10):
    """Uses percentile p of points around each datapoint "flux",
    being within time window to detrend fluxes. Returns
    corrected fluxes. For now done with a slow loop..."""
    detrend = fluxes.copy()
    for i in range(len(times)):
        near_fluxes = fluxes[where((times<times[i]+window/2)*(
            times>times[i]-window/2))]
        trend = prctile(near_fluxes,p)
        detrend[i] = fluxes[i] - trend
    return detrend
开发者ID:martindurant,项目名称:misc,代码行数:11,代码来源:lomb.py


示例13: _calculate_percentiles

def _calculate_percentiles(numbers, percents):
    'It calculates the percentiles for some numbers'
    #we need a numpy array
    if 'any' not in dir(numbers):
        numbers = numpy.ravel(numbers)
    if not numbers.any():
        raise ValueError('No data to calculate percentiles')

    mlab = sys.modules['matplotlib.mlab']

    percentiles = mlab.prctile(numbers, percents)
    return list(percentiles)
开发者ID:BioinformaticsArchive,项目名称:franklin,代码行数:12,代码来源:statistics.py


示例14: test_prctile

def test_prctile():
    # test odd lengths
    x=[1,2,3]
    assert mlab.prctile(x,50)==np.median(x)

    # test even lengths
    x=[1,2,3,4]
    assert mlab.prctile(x,50)==np.median(x)

    # derived from email sent by jason-sage to MPL-user on 20090914
    ob1=[1,1,2,2,1,2,4,3,2,2,2,3,4,5,6,7,8,9,7,6,4,5,5]
    p        = [0,   75, 100]
    expected = [1,  5.5,   9]

    # test vectorized
    actual = mlab.prctile(ob1,p)
    assert np.allclose( expected, actual )

    # test scalar
    for pi, expectedi in zip(p,expected):
        actuali = mlab.prctile(ob1,pi)
        assert np.allclose( expectedi, actuali )
开发者ID:CTPUG,项目名称:matplotlib-py3,代码行数:22,代码来源:test_mlab.py


示例15: bootstrap_regress

def bootstrap_regress(x, y, n_boot=1000):
    from matplotlib import mlab

    x = np.asarray(x)
    y = np.asarray(y)

    m_l, b_l = [], []
    for n in range(n_boot):
        msk = np.random.randint(0, len(x), size=len(x))
        m, b, rval, pval, stderr = scipy.stats.stats.linregress(x[msk], y[msk])
        m_l.append(m)
        b_l.append(b)

    res = {
        "slope_m": np.mean(m_l),
        "slope_l": mlab.prctile(m_l, p=2.5),
        "slope_h": mlab.prctile(m_l, p=97.5),
        "intercept_m": np.mean(b_l),
        "intercept_l": mlab.prctile(b_l, p=2.5),
        "intercept_h": mlab.prctile(b_l, p=97.5),
    }
    return res
开发者ID:cxrodgers,项目名称:my,代码行数:22,代码来源:stats.py


示例16: simple_bootstrap

def simple_bootstrap(data, n_boots=1000, min_bucket=20):
    if len(data) < min_bucket:
        raise BootstrapError("too few samples")
    
    res = []
    data = np.asarray(data)
    for boot in range(n_boots):
        idxs = np.random.randint(0, len(data), len(data))
        draw = data[idxs]
        res.append(np.mean(draw))
    res = np.asarray(res)
    CI = mlab.prctile(res, (2.5, 97.5))
    
    return res, res.mean(), CI
开发者ID:NSalem,项目名称:Rodgers2014,代码行数:14,代码来源:bootstrap.py


示例17: HoeffdingRuleMarkov

def HoeffdingRuleMarkov(beta, rho, G, H, W, Chi, FlowNum):
    """
    Estimate the K-L divergence and the threshold by use of weak convergence
    ----------------
    beta: the false alarm rate
    mu: the stationary distribution 
    G: the gradient
    H: the Hessian
    Sigma: the covariance matrix
    W: a sample path of the Gaussian empirical measure
    Chi: a sample path of the "Chi-Square" estimation
    FlowNum: the number of flows
    ----------------
    """
    _, SampNum, N = W.shape  # Here, N equals the number of states in the new chain Z

    # Estimate K-L divergence using 2nd-order Taylor expansion
    KL_1 = []
    for j in range(0, SampNum):
        t = (1.0 / sqrt(FlowNum)) * np.dot(G, W[0, j, :]) + \
                (1.0 / 2) * (1.0 / FlowNum) * \
                    np.dot(np.dot(W[0, j, :], H), W[0, j, :])
        # print t.tolist()
        # break
        KL_1.append(np.array(t.real)[0])
    # Get the threshold
    eta1 = prctile(KL_1, 100 * (1 - beta))
    KL_2 = [Chi[idx] / (2 * FlowNum) for idx in xrange(len(Chi))]
    # Using the simplified formula
    # eta2 = 1.0 / (2 * FlowNum) * rho * chi2.ppf(1 - beta, N)
    eta2 = prctile(KL_2, 100 * (1 - beta))
    # print N

    # print(KL)
    # assert(1 == 2)
    return KL_1, KL_2, eta1, eta2
开发者ID:jingzbu,项目名称:TAHTMA,代码行数:36,代码来源:util.py


示例18: bootstrapMedian

def bootstrapMedian(data, N=5000):
    '''Bootstraper to refine estimate of a percentile from data
    N = number of iterations for the bootstrapping
    M = number of data points
    output = MU.bootStrapper(data, 50, 10000)
    '''
    import numpy as np
    import matplotlib.mlab as mlab

    M = len(data)
    percentile = 50

    estimate = np.array([])
    for k in range(N):
        bsIndex = np.random.random_integers(0,M-1,M)
        bsData = data[bsIndex]
        tmp = mlab.prctile(bsData, percentile)
        estimate = np.hstack((estimate, tmp))


    CI = mlab.prctile(estimate, [2.5,97.5])
    med = np.mean(estimate)

    return med, CI, estimate
开发者ID:phobson,项目名称:bridgescour,代码行数:24,代码来源:NumUtils.py


示例19: test_Median

    def test_Median(self):
        'test median'
# Wiki: If there is an even number of observations, then there is no single middle value; the median is then usually defined to be the 
# mean of the two middle values.[1][2]      
        lol = [ 
#               [], # fails
               [1.2],
               [1.0, 2.0], # Get 1.5 (matplotlib 1.0.1 or 2.0 (matplotlib 0.99.3) 
               [1.0, 2.0, 4.0],
               ]
        expectedMedianList              = [ 1.2, 1.5, 2.0] # matplotlib 1.0.1
        expectedMedianListOldMatplotlib = [ 1.2, 2.0, 2.0] # matplotlib 0.99.3
        for i,floatList in enumerate(lol):
            ml = mlab.prctile(floatList,[50])
            nTdebug("Found: %s and expected (by new matplotlib): %s" % (ml[0], expectedMedianList[i]))
            if ml[0] != expectedMedianList[i]:
                self.assertEqual(ml[0], expectedMedianListOldMatplotlib[i])
开发者ID:VuisterLab,项目名称:cing,代码行数:17,代码来源:test_Utils.py


示例20: boxpoints

def boxpoints(d, outlier_distance=1.5):
	# implementation pretty much the same as matplotlib axes.boxplot

    # get median and quartiles
    q1, med, q3 = mlab.prctile(d,[25,50,75])
  	# min(data), max(data)

    iq = q3 - q1
    hi_val = q3 + outlier_distance*iq
    lo_val = q1 - outlier_distance*iq
 
    print iq, q1, q3, '---', hi_val, lo_val
    # print (d > hi_val)
    # print (d < lo_val)
    outliers = r_[d[d>hi_val], d[d<lo_val]]
    # print 'outliers', outliers
    inliers = list(set(data)-set(outliers))
    # print 'inliers', inliers
    min_without_outliers = min(inliers)
    max_without_outliers = max(inliers)

    return outliers, min_without_outliers, q1, med, q3, max_without_outliers
开发者ID:dplass,项目名称:etframes,代码行数:22,代码来源:demo_box.py



注:本文中的matplotlib.mlab.prctile函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python mlab.psd函数代码示例发布时间:2022-05-27
下一篇:
Python mlab.normpdf函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap