• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python pylab.gcf函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中matplotlib.pylab.gcf函数的典型用法代码示例。如果您正苦于以下问题:Python gcf函数的具体用法?Python gcf怎么用?Python gcf使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了gcf函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_one_box

def test_one_box(box,tree,graphics=False,callback=None):#,f):
	print 'box',box[0],box[1],':',
	s = tree.search(box)
	print ""
	print "box search:", s
	print "len(s):", len( s )
	boxes = tree.boxes()
	if graphics:
		plt.close()
		gfx.show_uboxes(boxes)
		gfx.show_uboxes(boxes, S=s, col='r')
	if len(s) < ((tree.dim**tree.depth)/2): # dim^depth/2
		t = tree.insert(box)
		if graphics:
			boxes = tree.boxes()
			gfx.show_uboxes(boxes, S=t, col='c')
		print 'ins:',t
 	else:
 		t = tree.remove(s)
		print 'rem:',t

	if graphics:
		gfx.show_box(box,col='g',alpha=0.5)
		if callback:
			plt.gcf().canvas.mpl_connect('button_press_event', callback)
		plt.show()
开发者ID:caja-matematica,项目名称:climate_attractors,代码行数:26,代码来源:test_tree.py


示例2: channel_transform

def channel_transform(fitsfiles, h5file, iref= None):
    """
    Channel Transformation

    Take a list of k2 pixel files (must be from the same
    channel). Find the centroids of each image and solve for the
    linear transformation that takes one scene to another
    """
    nstars = len(fitsfiles)

    # Pull the first file to get length and data type
    fitsfile0 = fitsfiles[0]
    cent0 = fits_to_chip_centroid(fitsfile0)
    channel = get_channel(fitsfile0)
    print "Using channel = %i" % channel

    # Determine the refence frame
    if iref==None:
        dfcent0 = pd.DataFrame(LE(cent0))
        ncad = len(dfcent0)
        med = dfcent0.median()
        dfcent0['dist'] = (
            (dfcent0['centx'] - med['centx'])**2 +
            (dfcent0['centy'] - med['centy'])**2
            )
        dfcent0 = dfcent0.iloc[ncad/4:-ncad/4]
        dfcent0 = dfcent0.dropna(subset=['centx','centy'])
        iref = dfcent0['dist'].idxmin()
    
    print "using reference frame %i" % iref
    assert np.isnan(cent0['centx'][iref])==False,\
        "Must select a valid reference cadence. No nans"

    cent = np.zeros((nstars,cent0.shape[0]), cent0.dtype)
    for i,fitsfile in enumerate(fitsfiles):
        if (i%10)==0:
            print i
        cent[i] = fits_to_chip_centroid(fitsfile)
        channel_i = get_channel(fitsfile)
        assert channel==channel_i,"%i != %i" % (channel, channel_i)

    trans,pnts = imtran.linear_transform(cent['centx'],cent['centy'],iref)
    trans = pd.DataFrame(trans)
    trans = pd.concat([trans,pd.DataFrame(LE(cent0))[['t','cad']]],axis=1)
    trans = trans.to_records(index=False)

    keys = cent.dtype.names
    pnts = mlab.rec_append_fields(pnts,keys,[cent[k] for k in keys])

    if h5file!=None:
        with h5plus.File(h5file) as h5:
            h5['trans'] = trans
            h5['pnts'] = pnts
            
    trans,pnts = read_channel_transform(h5file)
    plot_trans(trans, pnts)
    figpath = h5file[:-3] + '.png'
    plt.gcf().savefig(figpath)
    print "saving %s " % figpath
    return cent
开发者ID:petigura,项目名称:k2phot,代码行数:60,代码来源:channel_transform.py


示例3: make_report

def make_report(event, dataframes, sequence, scores, part, n_iter, report_dir_base):

    # Run through the sequence of decisions.
    df = evaluate_sequence(sequence, dataframes)
    df = pd.concat([df, scores], axis=1)
    ns = ['a', 'b', 'c', 'd', 'e', 'f']
    l_ns = map(lambda x: "l_" + x, ns)
    o_ns = map(lambda x: "o_" + x, ns)

    cols = [u'acc', u'rec', u'avg. gain', u'action', u'gain', 
            u'max gain', #u'num nuggets', u'max nuggets',
            u'min select score', u'next score',] + l_ns + o_ns
    print df[cols]
                
    report_dir = os.path.join(
        report_dir_base, "iter-{}".format(n_iter + 1), part)
    if not os.path.exists(report_dir): os.makedirs(report_dir)

    results_path = os.path.join(report_dir, event.fs_name() + ".tsv")
    with open(results_path, "w") as f:
        df.to_csv(f, index=False, sep="\t")
    df["timestamp"] = df["timestamp"].apply(datetime.utcfromtimestamp)
    df.set_index("timestamp")[["acc", "rec", "avg. gain"]].plot()
    plt.gcf().suptitle(event.title+ " " + learner + " iter-{}".format(n_iter + 1))
    plt.gcf().savefig(os.path.join(report_dir, "{}.png".format(event.fs_name())))
开发者ID:kedz,项目名称:cuttsum,代码行数:25,代码来源:vwlearner.py


示例4: set_axis_0

def set_axis_0():
    pylab.xlabel('time (days)')
    pylab.gcf().subplots_adjust(top=1.0-0.13, bottom=0.2, right=1-0.02,
                                left=0.2)
    a = list(pylab.axis())
    na = [a[0], a[1], 0, a[3]*1.05]
    pylab.axis(na)
开发者ID:AndreaCensi,项目名称:busymail,代码行数:7,代码来源:plot.py


示例5: plotStateSeq

def plotStateSeq(jobname, showELBOInTitle=1, **kwargs):
  global dataName, StateColorMap
  if 'cmap' not in kwargs:
      kwargs['cmap'] = StateColorMap
  axes, zBySeq = bnpy.viz.SequenceViz.plotSingleJob(dataName, jobname,
      showELBOInTitle=showELBOInTitle, **kwargs)
  pylab.gcf().set_size_inches(ZW, ZH);
  return axes
开发者ID:dchouren,项目名称:thesis,代码行数:8,代码来源:PlotUtil.py


示例6: plot_sun_image

def plot_sun_image(img, filename, wavelength=193, title = ''):
    #cmap = plt.get_cmap('sdoaia{}'.format(wavelength))
    cmap = plt.get_cmap('sohoeit195')
    plt.title(title)
    cax = plt.imshow(img,cmap=cmap,origin='lower',vmin=0, vmax=3000)#,vmin=vmin, vmax=vmax)
    plt.gcf().colorbar(cax)
    plt.savefig(filename)
    plt.close("all")
开发者ID:Yukorin5,项目名称:pythonscript,代码行数:8,代码来源:test-eit-plot.py


示例7: XGB_native

def XGB_native(train,test,features,features_non_numeric):
    depth = 13
    eta = 0.01
    ntrees = 8000
    mcw = 3
    params = {"objective": "reg:linear",
              "booster": "gbtree",
              "eta": eta,
              "max_depth": depth,
              "min_child_weight": mcw,
              "subsample": 0.9,
              "colsample_bytree": 0.7,
              "silent": 1
              }
    print "Running with params: " + str(params)
    print "Running with ntrees: " + str(ntrees)
    print "Running with features: " + str(features)

    # Train model with local split
    tsize = 0.05
    X_train, X_test = cross_validation.train_test_split(train, test_size=tsize)
    dtrain = xgb.DMatrix(X_train[features], np.log(X_train[goal] + 1))
    dvalid = xgb.DMatrix(X_test[features], np.log(X_test[goal] + 1))
    watchlist = [(dvalid, 'eval'), (dtrain, 'train')]
    gbm = xgb.train(params, dtrain, ntrees, evals=watchlist, early_stopping_rounds=100, feval=rmspe_xg, verbose_eval=True)
    train_probs = gbm.predict(xgb.DMatrix(X_test[features]))
    indices = train_probs < 0
    train_probs[indices] = 0
    error = rmspe(np.exp(train_probs) - 1, X_test[goal].values)
    print error

    # Predict and Export
    test_probs = gbm.predict(xgb.DMatrix(test[features]))
    indices = test_probs < 0
    test_probs[indices] = 0
    submission = pd.DataFrame({myid: test[myid], goal: np.exp(test_probs) - 1})
    if not os.path.exists('result/'):
        os.makedirs('result/')
    submission.to_csv("./result/dat-xgb_d%s_eta%s_ntree%s_mcw%s_tsize%s.csv" % (str(depth),str(eta),str(ntrees),str(mcw),str(tsize)) , index=False)
    # Feature importance
    if plot:
      outfile = open('xgb.fmap', 'w')
      i = 0
      for feat in features:
          outfile.write('{0}\t{1}\tq\n'.format(i, feat))
          i = i + 1
      outfile.close()
      importance = gbm.get_fscore(fmap='xgb.fmap')
      importance = sorted(importance.items(), key=operator.itemgetter(1))
      df = pd.DataFrame(importance, columns=['feature', 'fscore'])
      df['fscore'] = df['fscore'] / df['fscore'].sum()
      # Plotitup
      plt.figure()
      df.plot()
      df.plot(kind='barh', x='feature', y='fscore', legend=False, figsize=(25, 15))
      plt.title('XGBoost Feature Importance')
      plt.xlabel('relative importance')
      plt.gcf().savefig('Feature_Importance_xgb_d%s_eta%s_ntree%s_mcw%s_tsize%s.png' % (str(depth),str(eta),str(ntrees),str(mcw),str(tsize)))
开发者ID:AdityaRon,项目名称:kaggle-for-fun,代码行数:58,代码来源:rossmann-native-xgb-mine.py


示例8: plot_series

def plot_series(x, y_array, labels):
    for y_arr, label in zip(y_array, labels):
        plt.plot(x, y_arr, label=label)
        plt.xlabel('Datetime')
        plt.ylabel('Demand')
        plt.title('Models of demand using trends and ARMA')
    plt.gcf().set_size_inches(26,20)
    plt.legend()
    plt.show()
开发者ID:racheltho,项目名称:YelpSysRec,代码行数:9,代码来源:demandpredict_main.py


示例9: plotK

def plotK(JDict, xscale='linear', n='', **kwargs):
  paths = JDict.values()
  names = JDict.keys()
  bnpy.viz.PlotTrace.plotJobs(MakePaths(paths,n), names, MakeStyles(names),
                                     yvar='K', tickfontsize=tickfontsize,
                                     density=1, **kwargs)
  set_xscale(xscale)
  pylab.ylim(Klims); pylab.yticks(Kticks);
  pylab.gca().yaxis.grid() # horizontal lines
  pylab.gcf().set_size_inches(W, H);
开发者ID:dchouren,项目名称:thesis,代码行数:10,代码来源:PlotUtil.py


示例10: plotHammingDistVsELBO

def plotHammingDistVsELBO(JDict, n='', **kwargs):
  names, paths = filterJDictForRunsWithELBO(JDict)
  bnpy.viz.PlotTrace.plotJobs(MakePaths(paths, n), names, MakeStyles(names),
                                     yvar='hamming-distance',
                                     xvar='evidence', 
                                     tickfontsize=tickfontsize, 
                                     density=1, **kwargs)
  pylab.ylim(Hlims); 
  pylab.yticks(Hticks);
  pylab.gcf().set_size_inches(W, H);
开发者ID:dchouren,项目名称:thesis,代码行数:10,代码来源:PlotUtil.py


示例11: draw

def draw(x, y, title='K value for kNN'):
    plt.plot(x, y, label='k value')
    plt.title(title)
    plt.xlabel('k')
    plt.ylabel('Score')
    plt.grid(True)
    plt.legend(loc='best', framealpha=0.5, prop={'size':'small'})
    plt.tight_layout(pad=1)
    plt.gcf().set_size_inches(8,4)
    plt.show()
开发者ID:brenden17,项目名称:iris,代码行数:10,代码来源:iris_cv.py


示例12: plotELBO

def plotELBO(JDict, xscale='linear', n='', **kwargs):
  names, paths = filterJDictForRunsWithELBO(JDict)
  bnpy.viz.PlotTrace.plotJobs(MakePaths(paths,n), names, MakeStyles(names),
                                     yvar='evidence', tickfontsize=tickfontsize,
                                     density=1, **kwargs)
  set_xscale(xscale)
  if ELBOlims is not None:
      pylab.ylim(ELBOlims);
  if ELBOticks is not None:
      pylab.yticks(ELBOticks);
  pylab.gca().yaxis.grid() # horizontal lines
  pylab.gcf().set_size_inches(W, H);
开发者ID:dchouren,项目名称:thesis,代码行数:12,代码来源:PlotUtil.py


示例13: matplotlib_make_figure

def matplotlib_make_figure(figsize=(10,7), style='seaborn-dark'):
    try:
        plt.style.use(style)
    except ValueError:
        warning(" matplotlib style %s not found." % style)
        pass

    fig=plt.figure('scatter3d', figsize)
    plt.gcf().set_tight_layout(True)
    ax=fig.add_subplot(111,projection='3d')

    return fig, ax
开发者ID:vlas-sokolov,项目名称:pyscatter-3d,代码行数:12,代码来源:use_matplotlib.py


示例14: plotStateSeq

def plotStateSeq(jobname, showELBOInTitle=1, xticks=None, **kwargs):
  global dataName, StateColorMap
  if 'cmap' not in kwargs:
      kwargs['cmap'] = StateColorMap
  axes, zBySeq = bnpy.viz.SequenceViz.plotSingleJob(dataName, jobname,
      showELBOInTitle=showELBOInTitle, **kwargs)
  pylab.subplots_adjust(top=0.85, bottom=0.1);
  axes[-1].tick_params(axis='both', which='major', labelsize=20)
  if xticks is not None:
      axes[-1].set_xticks(xticks);
  pylab.gcf().set_size_inches(ZW, ZH);
  pylab.draw();
  return axes
开发者ID:dchouren,项目名称:thesis,代码行数:13,代码来源:PlotUtil.py


示例15: plot_episode

def plot_episode(args):
    """Plot an episode plucked from the large h5 database"""
    print "plot_episode"
    # load the data file
    tblfilename = "bf_optimize_mavlink.h5"
    h5file = tb.open_file(tblfilename, mode = "a")
    # get the table handle
    table = h5file.root.v2.evaluations

    # selected episode
    episode_row = table.read_coordinates([int(args.epinum)])
    # compare episodes
    episode_row_1 = table.read_coordinates([2, 3, 22, 46]) # bad episodes
    print "row_1", episode_row_1.shape
    # episode_row = table.read_coordinates([3, 87])
    episode_target = episode_row["alt_target"]
    episode_target_1 = [row["alt_target"] for row in episode_row_1]
    print "episode_target_1.shape", episode_target_1
    episode_timeseries = episode_row["timeseries"][0]
    episode_timeseries_1 = [row["timeseries"] for row in episode_row_1]
    print "row", episode_timeseries.shape
    print "row_1", episode_timeseries_1

    sl_start = 0
    sl_end = 2500
    sl_len = sl_end - sl_start
    sl = slice(sl_start, sl_end)
    pl.plot(episode_timeseries[sl,1], "k-", label="alt", lw=2.)
    print np.array(episode_timeseries_1)[:,:,1]
    pl.plot(np.array(episode_timeseries_1)[:,:,1].T, "k-", alpha=0.2)
    # alt_hold = episode_timeseries[:,0] > 4
    alt_hold_act = np.where(episode_timeseries[sl,0] == 11)
    print "alt_hold_act", alt_hold_act[0].shape, sl_len
    alt_hold_act_min = np.min(alt_hold_act)
    alt_hold_act_max = np.max(alt_hold_act)
    print "min, max", alt_hold_act_min, alt_hold_act_max, alt_hold_act_min/float(sl_len), alt_hold_act_max/float(sl_len),

    # pl.plot(episode_timeseries[sl,0] * 10, label="mode")
    pl.axhspan(-100., 1000,
               alt_hold_act_min/float(sl_len),
               alt_hold_act_max/float(sl_len),
               facecolor='0.5', alpha=0.25)
    pl.axhline(episode_target, label="target")
    pl.xlim((0, sl_len))
    pl.xlabel("Time steps [1/50 s]")
    pl.ylabel("Alt [cm]")
    pl.legend()
    if args.plotsave:
        pl.gcf().set_size_inches((10, 3))
        pl.gcf().savefig("%s.pdf" % (sys.argv[0][:-3]), dpi=300, bbox_inches="tight")
    pl.show()
开发者ID:koro,项目名称:python-multiwii,代码行数:51,代码来源:bf_optimize_mavlink_analyze.py


示例16: _plot_eigenvalues

def _plot_eigenvalues(figure_id, model, figure_size, x_scale, y_scale):
    r"""
    Helper function that plots a model's eigenvalues.

    Parameters
    -----------
    figure_id : matplotlib.pyplot.Figure instance
        The handle of the figure to be saved.

    model : :map:`PCAModel` or subclass
       The model to be used.

    figure_size : (`int`, `int`)
        The size of the plotted figures.

    x_scale : `float`
        The scale of x axis.

    y_scale : `float`
        The scale of y axis.
    """
    # select figure
    figure_id = plt.figure(figure_id.number)

    # plot eigenvalues ratio
    plt.subplot(211)
    plt.bar(range(len(model.eigenvalues_ratio())),
            model.eigenvalues_ratio())
    plt.ylabel('Variance Ratio')
    plt.xlabel('Component Number')
    plt.title('Variance Ratio per Eigenvector')
    plt.grid("on")

    # plot eigenvalues cumulative ratio
    plt.subplot(212)
    plt.bar(range(len(model.eigenvalues_cumulative_ratio())),
            model.eigenvalues_cumulative_ratio())
    plt.ylim((0., 1.))
    plt.ylabel('Cumulative Variance Ratio')
    plt.xlabel('Component Number')
    plt.title('Cumulative Variance Ratio')
    plt.grid("on")

    # set figure size
    #plt.gcf().tight_layout()
    plt.gcf().set_size_inches([x_scale, y_scale] * np.asarray(figure_size))

    plt.show()

    return figure_id
开发者ID:csagonas,项目名称:menpo,代码行数:50,代码来源:base.py


示例17: plot_p

def plot_p(frame,file_prefix='claw_p',path='./_output/_p',plot_slices=True,plot_pcolor=True,slices_limits=None,xshift=0.0,name='',title=True):
    sol_ref=Solution(frame+450,file_format='petsc',read_aux=False,path='_output/reference/_p/',file_prefix=file_prefix)
    sol=Solution(frame,file_format='petsc',read_aux=False,path=path,file_prefix=file_prefix)
    x=sol.state.grid.x.centers; y=sol.state.grid.y.centers
    x=x+xshift
    mx=len(x); my=len(y)    
    yy,xx = np.meshgrid(y,x)

    if frame < 10:
        str_frame = "00"+str(frame)
    elif frame < 100:
        str_frame = "0"+str(frame)
    else:
        str_frame = str(frame)

    p=sol.state.q[0,:,:]
    p_ref=sol_ref.state.q[0,:,:]

    if plot_pcolor:
        pl.pcolormesh(xx,yy,p,cmap=cm.OrRd)
        pl.title("t= "+str(sol.state.t),fontsize=20)
        pl.xlabel('x',fontsize=20); pl.ylabel('y',fontsize=20)
        pl.xticks(size=20); pl.yticks(size=20)
        cb = pl.colorbar();
        #pl.clim(colorbar_min,colorbar_max);
        imaxes = pl.gca(); pl.axes(cb.ax)
        pl.yticks(fontsize=20); pl.axes(imaxes)
        pl.axis([np.min(x),np.max(x),np.min(y),np.max(y)])
        #pl.axis([0.25,60.25,0.25,60.25])
        pl.savefig('./_plots_to_paper/co-interaction_'+str_frame+name+'.png')
        #pl.show()                            
        pl.close()
    if plot_slices:
        pl.figure(figsize=(8,3))
        pl.gcf().subplots_adjust(left=0.10)
        # plot reference
        pl.plot(x,p_ref[:,my/4.],'--b',linewidth=1)
        pl.plot(x,p_ref[:,3*my/4.],'--r',linewidth=1)
        # plot solution of interaction
        pl.plot(x,p[:,3*my/4.],'-r',linewidth=2)
        pl.plot(x,p[:,my/4.],'-b',linewidth=2)
        pl.title("t= "+str(sol.state.t),fontsize=20)
        pl.xlabel('x',fontsize=20)
        if title:
            pl.ylabel('Stress',fontsize=20)
        pl.xticks(size=20); pl.yticks(size=20)
        if slices_limits is not None:
            pl.axis([slices_limits[0]+xshift,slices_limits[1]+xshift,slices_limits[2],slices_limits[3]])
        pl.savefig('./_plots_to_paper/co-interaction_'+str_frame+name+'.eps')
        pl.close()
开发者ID:ketch,项目名称:diffractons_RR,代码行数:50,代码来源:plots_to_paper.py


示例18: main

def main():
    # Process input data
    #json_data=open('C:/Users/rthomas/Documents/DemandPrediction/demand_prediction.json')
    json_data = open(sys.argv[1])
    x, y, last, total_hours = process_input(json_data)
    FUTURE_DAYS = 15  # will make prediciton 15 days into future

    # I looked at a few different regression families in sm.GLM but found very similar rms errors so I chose to use a simple linear regression
    trend_model = sm.OLS(y, sm.add_constant(range(len(x)), prepend=True)).fit()
    trend = trend_model.fittedvalues

    # y1 is y with the trend line (growth over time) removed
    y1 = y - trend
    
    # y2 is y1 with hour-of-week trends removed
    hours = [w.hour + 24*w.weekday() for w in x]
    hours_mean = [np.mean([y1[i] for i in range(len(y1)) if hours[i] == k]) for k in range(7*24)]
    y2 = [y1[i] - hours_mean[hours[i]] for i in range(len(y1))]

    trend_y = [hours_mean[hours[i]] + trend[i] for i in range(len(trend))]
    
    future_hours = FUTURE_DAYS*24 + total_hours
    future_trend = trend_model.predict(sm.add_constant(range(total_hours, future_hours), prepend=True))
    future_x = [last + datetime.timedelta(hours=k) for k in range(1,FUTURE_DAYS*24+1)]
    future_hours = [w.hour + 24*w.weekday() for w in future_x]
    future_hours_trend = [hours_mean[future_hours[i]] for i in range(len(future_x))]
    future_y = [sum(pair) for pair in zip(future_trend, future_hours_trend)] 
    
    plt.plot(x, y, label='Original Time Series')
    plt.plot(x + future_x, trend_y + future_y, label='Model')
    plt.xlabel('Datetime')
    plt.ylabel('Demand')
    plt.title('Original data and model of demand')
    plt.gcf().set_size_inches(26,20)
    plt.legend()
    plt.show()
            
    app = flask.Flask(__name__)
    app.run()
    
    @app.route('/api/prediction')
    def predictcsv():
        filename = 'prediction.csv'
        data = [[x, y] for (x,y) in zip(future_x, future_y)]
        lines = csv2string(data)
        resp = flask.Response(lines, status=200, mimetype='text/csv')
        resp.headers['Content-Disposition'] = 'attachment; filename=' + filename
        return resp 
开发者ID:racheltho,项目名称:YelpSysRec,代码行数:48,代码来源:demandpredict_main.py


示例19: get_histogram_scale

def get_histogram_scale(distances_dict, nbins):
    """Draws histogram to outfile_name.
    """
    scale_dict = defaultdict(list)
    #draw histograms
    for d_dict in distances_dict.values():
        for i, (field, data) in enumerate(d_dict.items()):
            if len(data) < 1:
                continue
            histogram = hist(data,bins=nbins)
            
            fig  = gcf()
            axis = fig.gca()

            #get height scale: y/x
            ymin,ymax = axis.get_ylim()
        
            xmin,xmax = axis.get_xlim()
            scale_dict['ymin'].append(ymin)
            scale_dict['ymax'].append(ymax)
            scale_dict['xmin'].append(xmin)
            scale_dict['xmax'].append(xmax)

            clf()
    
    yscale = (min(scale_dict['ymin']),max(scale_dict['ymax']))
    xscale = (min(scale_dict['xmin']),max(scale_dict['xmax']))
    
    return xscale,yscale
开发者ID:cmhill,项目名称:qiime,代码行数:29,代码来源:make_distance_histograms.py


示例20: SD_rule_of_thumb_skewed

def SD_rule_of_thumb_skewed(mult, ax=None, bins=30, regions=(), **opts):

   sample = np.random.exponential(size=15000) * 1.1 + np.random.uniform(size=15000) * 2.

   if ax is None:
      fig = plt.gcf()
      ax = fig.add_subplot(111)
      
   ax, density, CDF = sample_density(sample, bins=bins, **opts)
   SD = np.std(sample)
   ax.annotate('Average', xy=(np.mean(sample), 0),
              arrowprops=dict(facecolor='black'), xytext=(np.mean(sample),-0.1),
              fontsize=20,
              horizontalalignment='center')

   interval = np.linspace(np.mean(sample) - mult * SD,
                          np.mean(sample) + mult * SD,
                          500)
   ax.fill_between(interval, 0*interval, density(interval), 
                   hatch='/',
                   facecolor='yellow')

   standY = (sample - np.mean(sample)) / SD
   within = (np.fabs(standY) <= mult).sum() * 1. / sample.shape[0] * 100
   ax.set_title('Percentage within %0.1f SD: %d %%' % (mult, int(within)), fontsize=20, color='red')
   ax.set_yticks([])
   ax.set_xlim([-2,12])
   ax.set_ylim([0,ax.get_ylim()[1]])
   return ax
开发者ID:jonathan-taylor,项目名称:stats60,代码行数:29,代码来源:week1.py



注:本文中的matplotlib.pylab.gcf函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python pylab.get_cmap函数代码示例发布时间:2022-05-27
下一篇:
Python pylab.gca函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap