本文整理汇总了Python中matplotlib.pylab.hist函数的典型用法代码示例。如果您正苦于以下问题:Python hist函数的具体用法?Python hist怎么用?Python hist使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了hist函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: plotHist
def plotHist(self, parsList=None):
"""
Plots distributions for a number of traces.
Parameters
----------
parsList : string or list of strings, optional,
Refers to a parameter name or a list of parameter names.
If None, all available parameters are plotted.
"""
if not ic.check["matplotlib"]:
PE.warn(PE.PyARequiredImport("To use 'plotHists', matplotlib has to be installed.", \
solution="Install matplotlib."))
return
if isinstance(parsList, basestring):
parsList = [parsList]
tracesDic = {}
if parsList is not None:
for parm in parsList:
self._parmCheck(parm)
tracesDic[parm] = self[parm]
else:
# Use all available traces
for parm in self.availableParameters():
tracesDic[parm] = self[parm]
cols, rows = self.__plotsizeHelper(len(tracesDic))
for i,[pars,trace] in enumerate(tracesDic.items()):
if len(parsList) > 1:
plt.subplot(rows, cols, i+1)
plt.hist(trace, label=pars + " hist")
plt.legend()
开发者ID:dhomeier,项目名称:PyAstronomy,代码行数:33,代码来源:anaMCMCTraces.py
示例2: fit_plot
def fit_plot(self, data, topn=0, bins=20):
""" Create a plot. """
from matplotlib import pylab as pl
distros = self.get_topn(topn)
xx = numpy.linspace(data.min(), data.max(), 300)
table = []
nparms = max(len(x.parms) for x in distros)
tcolours = []
for dd in distros:
patch = pl.plot(xx, [dd.pdf(p) for p in xx], label='%10.2f%% %s' % (100.0*dd.rss/dd.dss, dd.name))
row = ['', dd.name, '%10.2f%%' % (100.0*dd.rss/dd.dss,)] + ['%0.2f' % x for x in dd.parms]
while len(row) < 3 + nparms:
row.append('')
table.append(row)
tcolours.append([patch[0].get_markerfacecolor()] + ['w'] * (2+nparms))
# add a historgram with the data
pl.hist(data, bins=bins, normed=True)
tab = pl.table(cellText=table, cellColours=tcolours,
colLabels=['', 'Distribution', 'Res. SS/Data SS'] + ['P%d' % (x + 1,) for x in range(nparms)],
bbox=(0.0, 1.0, 1.0, 0.3))
#loc='top'))
#pl.legend(loc=0)
tab.auto_set_font_size(False)
tab.set_fontsize(10.)
开发者ID:BackupGGCode,项目名称:pywafo,代码行数:28,代码来源:twolumps.py
示例3: summary
def summary(self, file_idx=0, show_plot=False):
print "Cluster output"
s = self.cluster_membership.sum(0)
nnz = (s>0).sum()
print "Number of non-empty clusters: " + str(nnz) + " (of " + str(s.size) + ")"
si = (self.cluster_membership).sum(0)
print
print "Size: count"
for i in np.arange(0,si.max()+1):
print str(i) + ": " + str((si==i).sum())
t = (self.peak_data.possible.multiply(self.cluster_membership)).data
t -= 1
print
print "Trans: count"
for i in np.arange(len(self.peak_data.transformations)):
print self.peak_data.transformations[i].name + ": " + str((t==i).sum())
if show_plot:
plt.figure()
x = []
cx = self.cluster_model.Z.tocoo()
for i,j,v in itertools.izip(cx.row, cx.col, cx.data):
x.append(v)
x = np.array(x)
# x = x[~np.isnan(x)]
plt.hist(x, 20)
plt.title('Precursor mass clustering -- Z for file ' + str(file_idx))
plt.xlabel('Probabilities')
plt.ylabel('Count')
plt.show()
开发者ID:sdrogers,项目名称:metabolomics_tools,代码行数:34,代码来源:plotting.py
示例4: distance_to_purchase_histogram
def distance_to_purchase_histogram(purchases):
distances = calculate_distance_to_purchase_histogram(purchases)
log_distances = [np.log10(0.1+d) for d in distances if d is not None]
plt.hist(log_distances, 60, alpha=0.5)
plt.xlabel('$log_{10}$ ( distances in miles )')
plt.title('Distances between purchase and billing address')
return distances
开发者ID:dave31415,项目名称:mitch,代码行数:7,代码来源:plotting.py
示例5: icsd_progress
def icsd_progress():
n = 60
tasks = Task.objects.filter(project_set='icsd', entry__natoms__lte=n)
data = tasks.values_list('entry__natoms', 'state')
done = []
failed = []
idle = []
running = []
for task in data:
if task[1] == 2:
done.append(task[0])
elif task[1] == 1:
running.append(task[0])
elif task[1] == 0:
idle.append(task[0])
elif task[1] == -1:
failed.append(task[0])
plt.hist([ done, running, failed, idle], histtype='barstacked',
label=['done', 'running' ,'failed', 'waiting'],
bins=n)#, cumulative=True)
plt.legend(loc='best')
plt.xlabel('# of atoms in primitive cell')
plt.ylabel('# of entries')
img = StringIO.StringIO()
plt.savefig(img, dpi=75, bbox_inches='tight')
data_uri = 'data:image/jpg;base64,'
data_uri += img.getvalue().encode('base64').replace('\n', '')
plt.close()
return data_uri
开发者ID:PingjieTang,项目名称:qmpy,代码行数:33,代码来源:__init__.py
示例6: plot_fitted_model
def plot_fitted_model(self, sample, data, fig=None, xmin=-1, xmax=12, npoints=1000, nbins=100, epsilon=0.25):
"""Plot fitted model"""
# fetch group
group = [i for i, item in enumerate(data.groups.items()) if sample in item[1]][0]
# fetch data
counts = data.counts_norm[sample].values.astype('float')
counts[counts < 1] = epsilon
counts = np.log(counts)
# compute fitted model
x = np.reshape(np.linspace(xmin, xmax, npoints), (-1, 1))
xx = np.exp(x)
loglik = _compute_loglik(xx, self.log_phi, self.log_mu, self.beta[self.z[group]])
y = xx * np.exp(loglik) / self.nfeatures
# plot
fig = pl.figure() if fig is None else fig
pl.figure(fig.number)
pl.hist(counts, nbins, histtype='stepfilled', linewidth=0, normed=True, color='gray')
pl.plot(x, np.sum(y, 1), 'r')
pl.grid()
pl.xlabel('log counts')
pl.ylabel('density')
pl.legend(['model', 'data'], loc=0)
pl.tight_layout()
开发者ID:dvav,项目名称:dgeclust,代码行数:29,代码来源:nbinom.py
示例7: scaleTestMinFinding
def scaleTestMinFinding():
xs = range(10)
distances = []
noise = 3.5
n = 1000000
for i in range(n):
a = random()
b = random()
c = random()
ys = [x*x*a + x*b + c + random() * noise for x in xs]
#print a, b, c, polynomialFit(xs, ys)[::-1]
minExp, unc = polynomialFindMinimum(xs, ys, returnErrors = True)
minCalc = -b/(2.0*a)
dist = (minCalc - minExp) / unc
#print minCalc, minExp, unc, dist
distances.append(dist)
print 'mean: %f' % stats.mean(distances)
print 'stdDev: %f' % stats.stdDev(distances)
for sigma in [1, 2, 3]:
print 'With %d sigma: %f%%' % (sigma, 100.0 * sum([int(abs(d) < sigma) for d in distances]) / n)
pylab.hist(distances, bins = 50, range = (-5, 5))
pylab.show()
开发者ID:NickStupich,项目名称:PythonDFT-Analysis,代码行数:25,代码来源:polyFitTest.py
示例8: main
def main():
criticidad = {'sup': 0, 'med': 0, 'inf': 0}
promedios = []
for i in range(EXPERIMENTOS):
tiempo = 0
for j in range(CORRIDAS):
# romper huevos + revolver huevos
sup = np.random.uniform(2, 4)+np.random.exponential(4)
# sumo fin cocinar huevos
sup += np.random.uniform(2, 4)
med = np.random.uniform(6, 12) # hacer tostadas + tostadas con matequilla
inf = np.random.uniform(6, 12) # freir tocino
valores = {'sup': sup , 'med': med, 'inf': inf}
maximo = max(valores.values())
k_maximo = kmaximo(valores, maximo)
valores[k_maximo] += 1
criticidad[k_maximo] += 1
tiempo += valores[k_maximo]
promedios.append(tiempo/CORRIDAS)
desv = np.std(promedios)
promedio = np.average(promedios)
print "Valores: ", valores.values()
print "Desvio %s " % desv
print "Promedio %s" % promedio
print "Intervalos de confianza %.2f <= u <= %.2f , con un 99%% de confianza" \
% (promedio - 2.57 * desv, promedio + 2.57 * desv)
for k, v in criticidad.iteritems():
print "criticidad %s, %.2f %%" % (k, v*100.00/(CORRIDAS*EXPERIMENTOS))
hist(promedios,6)
show()
开发者ID:Pazitos10,项目名称:Modelos-y-simulacion,代码行数:30,代码来源:tp6.py
示例9: mood_hist
def mood_hist(index):
n_bins = 10
data1 = pd.read_csv('data/split_class/large_IGNORE_406_mood_+1.txt', sep=' ', header=None)
data2 = pd.read_csv('data/split_class/large_IGNORE_406_mood_-1.txt', sep=' ', header=None)
mood_sum1 = pd.Series([0] * data1.shape[0])
mood_sum2 = pd.Series([0] * data2.shape[0])
# for i in np.arange(1, 7):
for i in np.arange(1, 6):
print(i)
mood_sum1 += data1[i]
mood_sum2 += data2[i]
col1 = data1[index] / mood_sum1
col2 = data2[index] / mood_sum2
print(col1, col2)
print(col1.mean())
# print(col1.describe())
print(col2.mean())
# print(col2.describe())
plt.subplot(1, 2, 1)
plt.hist(col1, n_bins, alpha=0.8, color='r', linewidth=1.5)
# plt.xlim(0, 0.5)
plt.ylabel("frequency")
plt.subplot(1, 2, 2)
plt.hist(col2, n_bins, alpha=0.8, color='b', linewidth=1.5)
# plt.xlim(0, 0.5)
# plt.ylabel("frequency")
plt.show()
开发者ID:kayzhou,项目名称:character_analysis,代码行数:30,代码来源:paint.py
示例10: shopping_hist
def shopping_hist():
n_bins = 100
data1 = pd.read_csv('data/split_class/large_IGNORE_404_shopping_+1.txt', sep=' ', header=None)
data2 = pd.read_csv('data/split_class/large_IGNORE_404_shopping_-1.txt', sep=' ', header=None)
shopping1 = data1[2]
shopping2 = data2[2]
for i in np.arange(3, 17):
shopping1 += data1[i]
shopping2 += data2[i]
col1 = shopping1 / data1[1]
print(col1.describe())
col2 = shopping2 / data2[1]
print(col2.describe())
plt.subplot(1, 2, 1)
plt.hist(col1, n_bins, normed=True, stacked=True, alpha=0.8, color='r', linewidth=1.5)
plt.xlim(0, 0.5)
plt.ylabel("frequency")
plt.subplot(1, 2, 2)
plt.hist(col2, n_bins, normed=True, stacked=True, alpha=0.8, color='b', linewidth=1.5)
plt.xlim(0, 0.5)
plt.ylabel("frequency")
# plt.hist(data1[1], n_bins, normed=1, alpha=0.6, color='b', cumulative=True)
# plt.hist(data2[1], alpha=0.6, color='r')
plt.show()
开发者ID:kayzhou,项目名称:character_analysis,代码行数:28,代码来源:paint.py
示例11: hist_extraversion
def hist_extraversion():
'''
外倾性分数的分布, 及其正态分布曲线
:return:
'''
n_bins = 10
data = pd.read_csv('data/regress_train_data.txt', sep=' ', header=None)
ext = data[1]
mu = ext.mean()
sigma = ext.std()
print(mu, sigma)
fig = plt.figure(figsize=(10, 8))
# --- for *.eps --- #
fig.set_rasterized(True)
# plt.title("The distribution of score on extraversion")
plt.xlabel("$Score\ on\ extraversion$", fontsize=20)
plt.ylabel("$Probability$", fontsize=20)
plt.grid(True)
plt.hist(ext, n_bins, normed=1, alpha=0.8, rwidth=0.85)
x = np.linspace(0, 60, 100)
y = mlab.normpdf(x, mu, sigma)
plt.xlim(0, 60)
plt.ylim(0, 0.055)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.plot(x, y, 'r--')
# plt.tight_layout()
plt.savefig('figure/ext_dist.eps', dpi=300)
plt.show()
开发者ID:kayzhou,项目名称:character_analysis,代码行数:29,代码来源:paint.py
示例12: hist_shortest_path
def hist_shortest_path(g, filename, show=0):
g.delete_vertices(
[i for i, degree in enumerate(g.degree()) if degree == 0])
# print g.degree()
shortest_paths = g.shortest_paths_dijkstra(mode='all')
# print shortest_paths
# ig.plot(g)
plt.hist(
np.hstack(shortest_paths),
range=[0, 5],
bins=5,
rwidth=1.,
align='left',
normed=True,
)
plt.xlabel('Number of steps')
plt.ylabel('Proportion')
plt.title(
'Number of steps to each member (mean: %.2f)'
% np.mean(shortest_paths))
if show:
plt.show()
else:
plt.savefig(filename)
开发者ID:keldLundgaard,项目名称:ThousandNetwork_simulation,代码行数:27,代码来源:simulation_stat_plots.py
示例13: plot_age_distribution_over_time
def plot_age_distribution_over_time(g_states, filename=None):
num_plots = len(g_states)
fig = plt.gcf()
max_cols = 5
if int(np.ceil(np.sqrt(num_plots))) >= max_cols:
cols = max_cols
else:
cols = int(np.ceil(np.sqrt(num_plots)))
rows = int(np.ceil(num_plots/cols))
fig.set_size_inches(14, 5*rows)
for i, g in enumerate(g_states):
plt.subplot(rows, int(np.ceil(num_plots/float(rows))), i+1)
plt.hist(
g.vs['age'],
bins=27,
range=[18, 45],
normed=True,
label='t: %i' % i,
)
plt.legend()
if filename:
plt.savefig(filename)
fig.clf()
开发者ID:keldLundgaard,项目名称:ThousandNetwork_simulation,代码行数:30,代码来源:simulation_stat_plots.py
示例14: compareHist
def compareHist(data1, data2,_title,tag1='data1', tag2='data2'):
pl.figure()
pl.show()
pl.hist(data1, normed=True, alpha=0.5, color='b')
pl.hist(data2, normed=True, alpha=0.5, color='r')
# Fit a normal distribution to the data:
mu1, std1 = stats.norm.fit(data1)
xmin, xmax = pl.xlim()
x = np.linspace(xmin, xmax, 100)
p = stats.norm.pdf(x, mu1, std1)
pl.plot(x, p, 'k', linewidth=2, color='b')
# Fit a normal distribution to the data:
mu2, std2 = stats.norm.fit(data2)
xmin, xmax = pl.xlim()
x = np.linspace(xmin, xmax, 100)
p = stats.norm.pdf(x, mu2, std2)
pl.plot(x, p, 'k', linewidth=2, color='r')
pl.title(_title)
pl.savefig(data_DIR + '/'+ _title + '.png',bbox_inches='tight')
pl.close()
return
开发者ID:XiaoxiaoLiu,项目名称:morphology_analysis,代码行数:25,代码来源:visualizeData.py.bk.py
示例15: tst_for_dataset
def tst_for_dataset(self, creator, filename):
from dials.array_family import flex
from dials.algorithms.shoebox import MaskCode
print filename
rlist = flex.reflection_table.from_pickle(filename)
shoebox = rlist['shoebox']
background = [sb.background.deep_copy() for sb in shoebox]
success = creator(shoebox)
assert(success.count(True) == len(success))
diff = []
for i in range(len(rlist)):
mask = flex.bool([(m & MaskCode.Foreground) != 0 for m in shoebox[i].mask])
px1 = background[i].select(mask)
px2 = shoebox[i].background.select(mask)
den = max([flex.mean(px1), 1.0])
diff.append(flex.mean(px2 - px1) / den)
diff = flex.double(diff)
mv = flex.mean_and_variance(flex.double(diff))
mean = mv.mean()
sdev = mv.unweighted_sample_standard_deviation()
try:
assert(abs(mean) < 0.01)
except Exception:
print "Mean: %f, Sdev: %f", mean, sdev
from matplotlib import pylab
pylab.hist(diff)
pylab.show()
raise
开发者ID:biochem-fan,项目名称:dials,代码行数:28,代码来源:tst_creator.py
示例16: study_redmapper_2d
def study_redmapper_2d():
# I just want to know the typical angular separation for RM clusters.
# I'm going to do this in a lazy way.
hemi = 'north'
rm = load_redmapper(hemi=hemi)
ra = rm['ra']
dec = rm['dec']
ncl = len(ra)
dist = np.zeros((ncl, ncl))
for i in range(ncl):
this_ra = ra[i]
this_dec = dec[i]
dra = this_ra-ra
ddec = this_dec-dec
dxdec = dra*np.cos(this_dec*np.pi/180.)
dd = np.sqrt(dxdec**2. + ddec**2.)
dist[i,:] = dd
dist[i,i] = 99999999.
d_near_arcmin = dist.min(0)*60.
pl.clf(); pl.hist(d_near_arcmin, bins=100)
pl.title('Distance to Nearest Neighbor for RM clusters')
pl.xlabel('Distance (arcmin)')
pl.ylabel('N')
fwhm_planck_217 = 5.5 # arcmin
sigma = fwhm_planck_217/2.355
frac_2sigma = 1.*len(np.where(d_near_arcmin>2.*sigma)[0])/len(d_near_arcmin)
frac_3sigma = 1.*len(np.where(d_near_arcmin>3.*sigma)[0])/len(d_near_arcmin)
print '%0.3f percent of RM clusters are separated by 2-sigma_planck_beam'%(100.*frac_2sigma)
print '%0.3f percent of RM clusters are separated by 3-sigma_planck_beam'%(100.*frac_3sigma)
ipdb.set_trace()
开发者ID:amanzotti,项目名称:vksz,代码行数:30,代码来源:vksz.py
示例17: test_flux
def test_flux(self):
tol = 150.
inputcat = catalog.read(os.path.join(self.args.tmp_path, 'ccd_1.cat'))
pixradius = 3*self.target["psf"]/self.instrument["PIXEL_SCALE"]
positions = list(zip(inputcat["X_IMAGE"]-1, inputcat["Y_IMAGE"]-1))
fluxes = image.simple_aper_phot(self.im[1], positions, pixradius)
sky_background = image.annulus_photometry(self.im[1], positions,
pixradius+5, pixradius+8)
total_bg_pixels = np.shape(image.build_annulus_mask(pixradius+5, pixradius+8, positions[0]))[1]
total_source_pixels = np.shape(image.build_circle_mask(pixradius,
positions[0]))[1]
estimated_fluxes = fluxes - sky_background*1./total_bg_pixels*total_source_pixels
estimated_magnitude = image.flux2mag(estimated_fluxes,
self.im[1].header['SIMMAGZP'], self.target["exptime"])
expected_flux = image.mag2adu(17.5, self.target["zeropoint"][0],
exptime=self.target["exptime"])
p.figure()
p.hist(fluxes, bins=50)
p.title('Expected flux: {:0.2f}, mean flux: {:1.2f}'.format(expected_flux, np.mean(estimated_fluxes)))
p.savefig(os.path.join(self.figdir,'Fluxes.png'))
assert np.all(np.abs(fluxes-expected_flux) < tol)
开发者ID:rfahed,项目名称:extProcess,代码行数:27,代码来源:photometry_test.py
示例18: study_redmapper_lrg_3d
def study_redmapper_lrg_3d(hemi='north'):
# create 3d grid object
grid = grid3d(hemi=hemi)
# load SDSS data
sdss = load_sdss_data_both_catalogs(hemi)
# load redmapper catalog
rm = load_redmapper(hemi=hemi)
# get XYZ positions (Mpc) of both datasets
x_sdss, y_sdss, z_sdss = grid.xyz_from_radecz(sdss['ra'], sdss['dec'], sdss['z'], applyzcut=False)
x_rm, y_rm, z_rm = grid.xyz_from_radecz(rm['ra'], rm['dec'], rm['z_spec'], applyzcut=False)
pos_sdss = np.vstack([x_sdss, y_sdss, z_sdss]).T
pos_rm = np.vstack([x_rm, y_rm, z_rm]).T
# build a couple of KDTree's, one for SDSS, one for RM.
from sklearn.neighbors import KDTree
tree_sdss = KDTree(pos_sdss, leaf_size=30)
tree_rm = KDTree(pos_rm, leaf_size=30)
lrg_counts = tree_sdss.query_radius(pos_rm, 100., count_only=True)
pl.clf()
pl.hist(lrg_counts, bins=50)
ipdb.set_trace()
开发者ID:amanzotti,项目名称:vksz,代码行数:27,代码来源:vksz.py
示例19: plotMassFunction
def plotMassFunction(im, pm, outbase, mmin=9, mmax=13, mstep=0.05):
"""
Make a comparison plot between the input mass function and the
predicted projected correlation function
"""
plt.clf()
nmbins = ( mmax - mmin ) / mstep
mbins = np.logspace( mmin, mmax, nmbins )
mcen = ( mbins[:-1] + mbins[1:] ) /2
plt.xscale( 'log', nonposx = 'clip' )
plt.yscale( 'log', nonposy = 'clip' )
ic, e, p = plt.hist( im, mbins, label='Original Halos', alpha=0.5, normed = True)
pc, e, p = plt.hist( pm, mbins, label='Added Halos', alpha=0.5, normed = True)
plt.legend()
plt.xlabel( r'$M_{vir}$' )
plt.ylabel( r'$\frac{dN}{dM}$' )
#plt.tight_layout()
plt.savefig( outbase+'_mfcn.png' )
mdtype = np.dtype( [ ('mcen', float), ('imcounts', float), ('pmcounts', float) ] )
mf = np.ndarray( len(mcen), dtype = mdtype )
mf[ 'mcen' ] = mcen
mf[ 'imcounts' ] = ic
mf[ 'pmcounts' ] = pc
fitsio.write( outbase+'_mfcn.fit', mf )
开发者ID:j-dr,项目名称:ADDHALOS,代码行数:30,代码来源:validation.py
示例20: EstimateDensity
def EstimateDensity(self,name,df,histogram,f,s,ax):
# if the desired output is in Histogram format
if(histogram):
finRes = []
lab = []
for i in xrange(5):
res = np.array(df[ df[f] == i][s])
if(res.shape[0]>0):
finRes.append(res)
lab.append(name[0]+ ' = ' + str(i))
pl.hist(finRes, bins=2, normed=True, histtype='bar',label = lab)
# if the desired output is simple plot
else:
for i in xrange(5):
res = np.array(df[ df[f] == i][s])
if(res.shape[0]>0):
res = res.reshape(res.shape[0],1)
X_plot = np.array(np.linspace(-1, 5,20)).reshape(20,1)
kde= KernelDensity(kernel='exponential', bandwidth=0.05)
kde.fit(res)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot,np.exp(log_dens),label=name[0]+ ' = ' + str(i))
ax.legend()
ax.set_title(name[1] + " distrubution for changing " + name[0])
开发者ID:ugur47,项目名称:AllState_Purchase_Prediction_Kaggle_Challange,代码行数:25,代码来源:reporting.py
注:本文中的matplotlib.pylab.hist函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论