本文整理汇总了Python中pylab.load函数的典型用法代码示例。如果您正苦于以下问题:Python load函数的具体用法?Python load怎么用?Python load使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了load函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: doit
def doit():
map = Basemap(projection='lcc',
llcrnrlon=80,
urcrnrlon=160,
llcrnrlat=-50,
urcrnrlat=-8,
#lat_ts=-35,
lat_0=-35,
lon_0=120,
resolution='c',
area_thresh=1000.)
p.clf()
map.drawcoastlines()
# map.drawcountries()
# map.drawrivers()
map.drawmeridians(p.arange(0,360,10),labels=[0,0,1,0])
map.drawparallels(p.arange(-90,0,10),labels=[1,0,0,0])
traj=p.load('example_traj.dat')
coast=p.load('/media/sda4/map-data/aust-coast-noaa-2000000-1.dat')
traj_x,traj_y = map(traj[:,1],traj[:,0])
# coast_x,coast_y = map(coast[:,0],coast[:,1])
p.plot(traj_x,traj_y)
p.plot(coast_x,coast_y,color='black')
map.drawmapboundary()
p.show()
return map
开发者ID:citterio,项目名称:physplit,代码行数:32,代码来源:plotcoast.py
示例2: InvokeMap
def InvokeMap(coastfile='/media/sda4/map-data/aust-coast-noaa-2000000-1.dat',
lllon=80,
urlon=166,
lllat=-47,
urlat=-9,
draw_map=True):
global PYLIB_PATH
map = Basemap(projection='cyl',
llcrnrlon=lllon,
urcrnrlon=urlon,
llcrnrlat=lllat,
urcrnrlat=urlat,
#lat_ts=-35,
lat_0=-35,
lon_0=120,
resolution='l',
area_thresh=1000.)
try:
coast = p.load(coastfile)
coast = p.load(coastfile)
coast_x,coast_y = map(coast[:,0],coast[:,1])
p.plot(coast_x,coast_y,color='black')
except IOError:
map.drawcoastlines()
map.drawmapboundary()
map.drawmeridians(p.arange(0,360,10),labels=[0,0,1,0])
map.drawparallels(p.arange(-90,0,10),labels=[1,0,0,0])
return map
开发者ID:citterio,项目名称:physplit,代码行数:33,代码来源:hplot.py
示例3: load_csv
def load_csv(self,f):
"""
Loading data from a csv file. Uses pylab's load function. Seems much faster
than scipy.io.read_array.
"""
varnm = f.readline().split(',')
# what is the date variable's key if any, based on index passed as argument
if self.date_key != '':
try:
rawdata = pylab.load(f, delimiter=',',converters={self.date_key:pylab.datestr2num}) # don't need to 'skiprow' here
except ValueError: # if loading via pylab doesn't work use csv
rawdata = self.load_csv_nf(f)
# converting the dates column to a date-number
rawdata[self.date_key] = pylab.datestr2num(rawdata[self.date_key])
self.date_key = varnm[self.date_key]
else:
try:
rawdata = pylab.load(f, delimiter=',') # don't need to 'skiprow' here
except ValueError: # if loading via pylab doesn't work use csv
rawdata = self.load_csv_nf(f)
# making sure that the variable names contain no leading or trailing spaces
varnm = [i.strip() for i in varnm]
# transforming the data into a dictionary
if type(rawdata) == list:
# if the csv module was used
self.data = dict(zip(varnm,rawdata))
else:
# if the pylab.load module was used
self.data = dict(zip(varnm,rawdata.T))
开发者ID:BKJackson,项目名称:SciPy-CookBook,代码行数:34,代码来源:dbase.0.3.py
示例4: __init__
def __init__(self,xlmrpath,xlmipath,maptype):
xlmr = pylab.load(xlmrpath)
xlmi = pylab.load(xlmipath)
xlmrname = os.path.basename(xlmrpath)
self.outfileprefix = '-'.join( re.split( '-' , xlmrname )[:-1] )
try:
assert numpy.shape(xlmr) == numpy.shape(xlmi)
self.xlmr , self.xlmi = xlmr , xlmi
except AssertionError:
print 'The two arrays loaded must have the same dimensions.'
self.xlm = self.xlmr + 1j*self.xlmi
print 'Multiple moments files read successfully!'
ntrunc = int( numpy.sqrt( numpy.shape(self.xlm)[0] ) - 1 )
self.ntrunc = ntrunc
self.gotpix = False
self.maptype = maptype
# print 'Applying quick-fix I to multiple moments...'
# indxpn = LISAresponse.getMLvec( self.ntrunc , 'pn' )
# xlm = numpy.zeros( numpy.shape(self.xlm) , dtype=complex )
# for i,ml in enumerate(indxpn):
# m , l = ml[0] , ml[1]
# k = indxpn.index( ( -m,l ) )
# xlm[i] = (-1)**m*self.xlm[k]
# self.xlm = xlm
return
开发者ID:qAp,项目名称:LisaMapp,代码行数:25,代码来源:Utilities2.py
示例5: plot_covs
def plot_covs(filein, fileout):
import pylab as P
data1 = P.load(filein)
data2 = P.load(fileout)
P.plot(data1[:,0], data1[:,1], 'o')
P.plot(data2[:,0], data2[:,1])
P.grid('True')
P.show()
开发者ID:fspaolo,项目名称:code,代码行数:8,代码来源:covfit.py
示例6: compute_wishart_A
def compute_wishart_A(p):
g = pylab.load('81vectors.txt')
B = prepareB(math.sqrt(1500.0)*g)
ew = [0.0015,0.0004,0.0004]
ev1 = pylab.load('81vectors.txt')
ev2 = pylab.load('321vectors.txt')
A1 = assemble_wishart_matrix(B,ev1,ew,p)
A2 = assemble_wishart_matrix(B,ev2,ew,p)
return A1,A2
开发者ID:matthew-brett,项目名称:diffusion_mri,代码行数:9,代码来源:test_condition_number.py
示例7: getParamCovMat
def getParamCovMat(prefix,dlogpower = 2, theoconstmult = 1.,dlogfilenames = ['dlogpnldloga.dat'],volume=256.**3,startki = 0, endki = 0, veff = [0.]):
"""
Calculates parameter covariance matrix from the power spectrum covariance matrix and derivative term
in the prefix directory
"""
nparams = len(dlogfilenames)
kpnl = M.load(prefix+'pnl.dat')
k = kpnl[startki:,0]
nk = len(k)
if (endki == 0):
endki = nk
pnl = M.array(kpnl[startki:,1],M.Float64)
covarwhole = M.load(prefix+'covar.dat')
covar = covarwhole[startki:,startki:]
if len(veff) > 1:
sqrt_veff = M.sqrt(veff[startki:])
else:
sqrt_veff = M.sqrt(volume*M.ones(nk))
dlogs = M.reshape(M.ones(nparams*nk,M.Float64),(nparams,nk))
paramFishMat = M.reshape(M.zeros(nparams*nparams*(endki-startki),M.Float64),(nparams,nparams,endki-startki))
paramCovMat = paramFishMat * 0.
# Covariance matrices of dlog's
for param in range(nparams):
if len(dlogfilenames[param]) > 0:
dlogs[param,:] = M.load(prefix+dlogfilenames[param])[startki:,1]
normcovar = M.zeros(M.shape(covar),M.Float64)
for i in range(nk):
normcovar[i,:] = covar[i,:]/(pnl*pnl[i])
M.save(prefix+'normcovar.dat',normcovar)
f = k[1]/k[0]
if (volume == -1.):
volume = (M.pi/k[0])**3
#theoconst = volume * k[1]**3 * f**(-1.5)/(12.*M.pi**2) #1 not 0 since we're starting at 1
for ki in range(1,endki-startki):
for p1 in range(nparams):
for p2 in range(nparams):
paramFishMat[p1,p2,ki] = M.sum(M.sum(\
M.inverse(normcovar[:ki+1,:ki+1]) *
M.outerproduct(dlogs[p1,:ki+1]*sqrt_veff[:ki+1],\
dlogs[p2,:ki+1]*sqrt_veff[:ki+1])))
paramCovMat[:,:,ki] = M.inverse(paramFishMat[:,:,ki])
return k[1:],paramCovMat[:,:,1:]
开发者ID:JohanComparat,项目名称:pyLPT,代码行数:55,代码来源:info.py
示例8: test_mrf_EM
def test_mrf_EM():
"""EXAMPLE: EM learning on a MRF"""
"""Define MRF graph structure"""
C = 0
S = 1
R = 2
W = 3
nodes = 4
adj_mat = sparse.lil_matrix((nodes, nodes), dtype=int)
adj_mat[C, [R, S]] = 1
adj_mat[R, W] = 1
adj_mat[S, W] = 1
adj_mat[R, S] = 1
"""Define clique domains and node sizes"""
ns = 2 * np.ones((1, nodes))
clq_doms = [[0], [0, 1], [0, 2], [1, 2, 3]]
"""Define cliques and potentials"""
clqs = []
clqs.append(cliques.discrete_clique(0, clq_doms[0], np.array([2])))
clqs.append(cliques.discrete_clique(1, clq_doms[1], np.array([2, 2])))
clqs.append(cliques.discrete_clique(2, clq_doms[2], np.array([2, 2])))
clqs.append(cliques.discrete_clique(3, clq_doms[3], np.array([2, 2, 2])))
"""Create the MRF"""
net = models.mrf(adj_mat, ns, clqs)
"""
Load the samples, and set one sample of one node to be unobserved, this
should not effect the learnt parameter much, and will demonstrate that
the algorithm can handle unobserved samples.
"""
samples = (np.array(pylab.load('./Data/lawn_samples.txt')) - 1).tolist()
samples[0][0] = []
"""Learn the parameters"""
net.learn_params_EM(samples[:])
"""Initialize the inference engine"""
net.init_inference_engine(exact=True)
"""Create and enter evidence"""
evidences = create_all_evidence(4, 2)
mlcs = np.array([[0, 0, 0, 0]])
for evidence in evidences:
mlc = net.max_sum(evidence)
mlcs = np.vstack((mlcs, mlc))
"""Read in expected values"""
exp_mlcs = np.array(pylab.load('./Data/mrf_em_exact_max_sum_res.txt'))
"""Assert that the output matched the expected values"""
assert_array_equal(mlcs, exp_mlcs)
开发者ID:bhrzslm,项目名称:uncertainty-reasoning,代码行数:54,代码来源:test_learning.py
示例9: degraderesolution
def degraderesolution(prefix,factor,dlogstring):
covar = M.load(prefix+'covar.dat')
pnl = M.load(prefix+'pnl.dat')
dlog = M.load(prefix+dlogstring)[:,1]
k = pnl[:,0]*1.
p = pnl[:,1]*1.
gausspart = M.load(prefix+'gausspart.dat')
nbins = len(k)
nongausspart = covar - gausspart
nongausspartnew = nongausspart[:nbins-factor:factor,:nbins-factor:factor]*0.
knew = k[:nbins-factor:factor]*0.
pnew = p[:nbins-factor:factor]*0.
gausspartnew = gausspart[:nbins-factor:factor,:nbins-factor:factor]*0.
nbinsnew = len(knew)
dlognew = dlog[:nbins-factor:factor]*0.
for i1 in range(0,nbins-factor,factor):
i1new = i1/factor
print i1,i1+factor-1,nbins
print i1new,nbinsnew
weights = k[i1:i1+factor-1]**3
sumweights = M.sum(weights)
pnew[i1new] = M.sum(p[i1:i1+factor-1]*weights)/sumweights
knew[i1new] = M.sum(k[i1:i1+factor-1]*weights)/sumweights
dlognew[i1new] = M.sum(dlog[i1:i1+factor-1]*weights)/sumweights
sqrtkfact = M.sqrt(k[1]/k[0])
for i1 in range(0,nbins-factor,factor):
i1new = i1/factor
for i2 in range(0,nbins-factor,factor):
i2new = i2/factor
weights2 = M.outer(k[i1:i1+factor-1]**3,k[i2:i2+factor-1]**3)
sumweights2 = M.sum(M.sum(weights2))
nongausspartnew[i1new,i2new] = M.sum(M.sum(nongausspart[i1:i1+factor-1,i2:i2+factor-1]*weights2))/sumweights2
if i1new == i2new:
vk = (4.*M.pi/3.)*((k[i1+factor-1]*sqrtkfact)**3 - (k[i1]/sqrtkfact)**3)
gausspartnew[i1new,i2new] = (2.*M.pi)**3 * 2.*(pnew[i1new]**2)/vk
covarnew = gausspartnew + nongausspartnew
prefixnew = prefix+'degrade'+str(factor)+'/'
os.system('mkdir '+prefixnew)
M.save(prefixnew+'pnl.dat',M.transpose([knew,pnew]), fmt = '%18.16e')
M.save(prefixnew+'covar.dat',covarnew, fmt = '%18.16e')
M.save(prefixnew+'gausspart.dat',gausspartnew, fmt = '%18.16e')
M.save(prefixnew+dlogstring,M.transpose([knew,dlognew]), fmt = '%18.16e')
M.save(prefix+'nbins.dat',M.array([nbinsnew],shape=(1,1,)), fmt = '%d')
开发者ID:astrofanlee,项目名称:project_TL,代码行数:52,代码来源:halo.py
示例10: LoadColormapMirrored
def LoadColormapMirrored(filename):
data = pylab.load(filename)
samples = len(data)/2
t = linspace(0,1,samples)
r = list(data[0::4])
g = list(data[1::4])
b = list(data[2::4])
r.reverse()
g.reverse()
b.reverse()
r = list(reversed(b)) + r
g = list(reversed(g)) + g
b = list(reversed(r)) + b
red = []
green = []
blue = []
for i in range(samples):
red.append((t[i], r[i], r[i]))
green.append((t[i], g[i], g[i]))
blue.append((t[i], b[i], b[i]))
cdict = { "red": red, "green": green, "blue": blue }
cmap = matplotlib.colors.LinearSegmentedColormap("my_colors", cdict, 1024)
return cmap
开发者ID:AtomAleks,项目名称:PyProp,代码行数:31,代码来源:load_cmap.py
示例11: readBinnedPower
def readBinnedPower(file):
"""
@brief reads in a binned power spectrum from a file
The file must have columns specficed as : binLeft,binRight,l,cl
"""
binLeft,binRight,l,cl = pylab.load(file,skiprows= 50,unpack=True,usecols=[0,1,2,3])
return l,cl
开发者ID:msyriac,项目名称:flipper,代码行数:7,代码来源:fftTools.py
示例12: GetSparseMatrix
def GetSparseMatrix(psi, config):
matrix = pylab.load("d130_50stk-matel")
row = array(matrix[:,0], dtype=int) - 1
col = array(matrix[:,1], dtype=int) - 1
matelem = array(matrix[:,2], dtype=complex)
return row, col, matelem
开发者ID:AtomAleks,项目名称:PyProp,代码行数:7,代码来源:example.py
示例13: makeplot
def makeplot(filename):
T0 = 2452525.374416
P = 0.154525
X = pl.load(filename)
x = X[:,0]
y = X[:,1]
print x[0] # check for HJD faults
#orbital phase
p = (x-T0)/P
pl.figure(figsize=(6,4))
pl.subplots_adjust(hspace=0.47,left=0.16)
pl.subplot(211)
pl.scatter(p,y,marker='o',s=0.1,color='k')
pl.ylim(-0.06,0.06)
pl.xlim(pl.average(p)-1.25,pl.average(p)+1.25)
pl.ylabel('Intensity')
pl.xlabel('Orbital Phase')
pl.subplot(212)
f,a = ast.signal.dft(x,y,0,4000,1)
pl.plot(f,a,'k')
pl.ylabel('Amplitude')
pl.xlabel('Frequency (c/d)')
#pl.ylim(yl[0],yl[1])
#pl.vlines(3636,0.002,0.0025,color='k',linestyle='solid')
#pl.vlines(829,0.002,0.0025,color='k',linestyle='solid')
#pl.text(3500,0.00255,'DNO',fontsize=11)
#pl.text(700,0.00255,'lpDNO',fontsize=11)
pl.ylim(0.0,0.004)
pl.savefig('%spng'%filename[:-3])
开发者ID:ezietsman,项目名称:msc-thesis,代码行数:35,代码来源:make_archive_plots.py
示例14: load_default
def load_default(path, closure):
from pylab import load, save
try:
return load(path)
except IOError:
obj = closure()
save(obj, path)
return obj
开发者ID:barapa,项目名称:HF-RNN,代码行数:8,代码来源:persistence.py
示例15: rejuice
def rejuice(d63,d63_2,d63_4,d63_8):
#pinit = M.load('mill/s63/pm.pnl.dat')
p1 = M.load('mill/s63/pm.pnl.dat')
plog1 = M.load('mill/s63/plogm.pnl.dat')
p2 = M.load('mill/s63r2/pm.pnl.dat')
plog2 = M.load('mill/s63r2/plogm.pnl.dat')
p4 = M.load('mill/s63r4/pm.pnl.dat')
plog4 = M.load('mill/s63r4/plogm.pnl.dat')
p8 = M.load('mill/s63r8/pm.pnl.dat')
plog8 = M.load('mill/s63r8/plogm.pnl.dat')
f63= N.exp(-N.mean(N.log(d63.flatten())))
f63_2= N.exp(-N.mean(N.log(d63_2.flatten())))
f63_4= N.exp(-N.mean(N.log(d63_4.flatten())))
f63_8= N.exp(-N.mean(N.log(d63_8.flatten())))
#M.loglog(p1[:,0],p1[:,1]/(plog1[:,1]*f63),'b--')
#M.loglog(p2[:,0],p2[:,1]/(plog2[:,1]*f63_2),'g--')
#M.loglog(p4[:,0],p4[:,1]/(plog4[:,1]*f63_4),'r--')
#M.loglog(p8[:,0],p8[:,1]/(plog8[:,1]*f63_8),'y--')
#xis = N.mean(d63.flatten()**2)
#xis_2 = N.mean(d63_2.flatten()**2)
#xis_4 = N.mean(d63_4.flatten()**2)
#xis_8 = N.mean(d63_8.flatten()**2)
xis = (1.+ 0.5*N.sqrt(N.var(d63.flatten())))
xis_2 = (1.+0.5*N.sqrt(N.var(d63_2.flatten())))
xis_4 = (1.+0.5*N.sqrt(N.var(d63_4.flatten())))
xis_8 = 1.+0.5*N.sqrt(N.var(d63_8.flatten()))
print 'exps:',f63,f63_2,f63_4,f63_8
print 'xis:',xis, xis_2,xis_4,xis_8
M.loglog(plog1[:,0],p1[:,1]/(plog1[:,1]*f63)*(1.+2.*xis**2),'b')
M.loglog(plog2[:,0],p2[:,1]/(plog2[:,1]*f63_2)*(1.+2.*xis_2**2),'g')
M.loglog(plog4[:,0],p4[:,1]/(plog4[:,1]*f63_4)*(1.+2.*xis_4**2),'r')
M.loglog(plog8[:,0],p8[:,1]/(plog8[:,1]*f63_8)*(1.+2.*xis_8**2),'y')
M.loglog(plog1[:,0],p1[:,1]/(plog1[:,1]*xis),'b')
M.loglog(plog2[:,0],p2[:,1]/(plog2[:,1]*xis_2),'g')
M.loglog(plog4[:,0],p4[:,1]/(plog4[:,1]*xis_4),'r')
M.loglog(plog8[:,0],p8[:,1]/(plog8[:,1]*xis_8),'y')
M.xlabel(r'$k\ [\rm{Mpc}/h]$',fontsize=20)
M.ylabel(r'$P_\delta(k)/P_{\log (1+\delta)}(k)$',fontsize=20)
bias1 = N.sum(p1[:5,1]*p1[:5,2])/N.sum(plog1[:5,1]*plog1[:5,2])
bias2 = N.sum(p2[:5,1]*p2[:5,2])/N.sum(plog2[:5,1]*plog2[:5,2])
bias4 = N.sum(p4[:5,1]*p4[:5,2])/N.sum(plog4[:5,1]*plog4[:5,2])
bias8 = N.sum(p8[:5,1]*p8[:5,2])/N.sum(plog8[:5,1]*plog8[:5,2])
print bias1,bias2,bias4,bias8#, N.log(bias1),N.log(bias2),N.log(bias4)
M.show()
开发者ID:astrofanlee,项目名称:project_TL,代码行数:56,代码来源:distrib.py
示例16: load_from_pylab
def load_from_pylab(cls, pl, r0, r1):
import pylab
raw_beads = pylab.load(pl)
span = r1 - r0
table = [set() for i in range(span)]
for r,c in raw_beads:
table[int(r)-r0].add(int(c))
return len(raw_beads), table
开发者ID:Jorges1000,项目名称:TS,代码行数:8,代码来源:beadmask.py
示例17: test_bnet_EM
def test_bnet_EM():
"""EXAMPLE: EM learning on a BNET"""
"""Create all data required to instantiate the bnet object"""
nodes = 4
dag = np.zeros((nodes, nodes))
C = 0
S = 1
R = 2
W = 3
dag[C, [R, S]] = 1
dag[R, W] = 1
dag[S, W] = 1
ns = 2 * np.ones((1, nodes))
"""Instantiate the model"""
net = models.bnet(dag, ns, [])
"""
Load the samples, and set one sample of one node to be unobserved, this
should not effect the learnt parameter much, and will demonstrate that
the algorithm can handle unobserved samples.
"""
samples = (np.array(pylab.load('./Data/lawn_samples.txt')) - 1).tolist()
samples[0][0] = []
"""Learn the parameters"""
net.learn_params_EM(samples[:])
"""Initialize the inference engine"""
net.init_inference_engine(exact=True)
"""Create and enter evidence"""
evidences = create_all_evidence(4, 2)
mlcs = np.array([[0, 0, 0, 0]])
for evidence in evidences:
mlc = net.max_sum(evidence)
mlcs = np.vstack((mlcs, mlc))
"""Read in expected values"""
exp_mlcs = np.array(pylab.load('./Data/bnet_mle_exact_max_sum_res.txt'))
"""Assert that the output matched the expected values"""
assert_array_equal(mlcs, exp_mlcs)
开发者ID:bhrzslm,项目名称:uncertainty-reasoning,代码行数:44,代码来源:test_learning.py
示例18: OnButton
def OnButton(self, evt):
'''Handle button click event'''
# Get title of clicked button
label = evt.GetEventObject().GetLabel()
if label == "Get Atmospheric Factors": # Calculate
try:
sampleLat = float(self.lat.GetValue())
sampleLon = float(self.lon.GetValue())
NCEP = load(self.repo.GetClimateDataPath())
Temperature = NCEP[0:73,:];seaLevelPress = NCEP[73:146,:];
LapseRate = NCEP[146:219,:];topo = NCEP[219:292,:]
Temperature = NCEP[73:0:-1,:];seaLevelPress = NCEP[146:73:-1,:];
LapseRate = NCEP[219:146:-1,:];topo = NCEP[292:73:-1,:]
lat = arange(90,-91,-2.5);lon = arange(0, 361,2.5)
#localCoords is the site coordinates relative to the NCEP data coords
#For interpolation the field is considered to bound 1 -> nx-1 , 1 -> ny-1
xfac = len(lat) - 1
yfac = len(lon) - 1
localX = (max(lat) - sampleLat) * xfac / (max(lat) - min(lat)) + 1
localY = sampleLon / max(lon) * yfac + 1
localCoords = array([[ localX],[ localY ]])
AnnualMeanSLP = ndimage.map_coordinates(seaLevelPress, localCoords)
AnnualMeanTemp = ndimage.map_coordinates(Temperature, localCoords)
AnnualMeanLapse = ndimage.map_coordinates(LapseRate, localCoords)
sltempVal = "%3.1f" % (float(AnnualMeanTemp))
slprecVal = "%3.1f" % (float(AnnualMeanSLP))
LapseRate = "%3.1f" % (float(AnnualMeanLapse*-1))
# Ignore empty calculation
#if not compute.strip():
if not sltempVal:
return
# Calculate result
# result = eval(compute)
# Add to history
self.sltemp.Insert(str(sltempVal), 0)
self.slprec.Insert(str(slprecVal), 0)
self.lapse.Insert(str(LapseRate), 0)
# Show result
#self.display.SetValue(str(result))
self.sltemp.SetValue(str(sltempVal))
self.slprec.SetValue(str(slprecVal))
self.lapse.SetValue(str(LapseRate))
#self.slprec.SetValue(str(slprecVal))
except Exception, e:
wx.LogError(str(e))
return
开发者ID:Rhombus13,项目名称:Calvin,代码行数:56,代码来源:AtmosphericFactors.py
示例19: makeplot
def makeplot(X,hjd,filename,xlo,xhi):
# archive ephem
T0 = 2452525.374416
# august ephem
#T0 = 2453964.330709
P = 0.154525
# set some lower and upper time axis limits. set xlo to None for auto limits
xlo = xlo
xhi = xhi
X = pl.load(filename)
a = X[:,0][:-1]
p = X[:,1][:-1]
x = (X[:,2][:-1]+hjd-T0)/P - int(((X[:,2][:-1]+hjd-T0)/P)[0])
#x = X[:,2][:-1]
siga = X[:,3][:-1]
sigp = X[:,4][:-1]
pl.figure(figsize=(6,4))
pl.subplots_adjust(left=0.14,hspace=0.001)
# plot the amplitude
ax1 = pl.subplot(211)
pl.errorbar(x,a,siga,fmt='ro')
pl.xlabel('Orbital Phase')
pl.ylabel('Amplitude')
yt = pl.yticks()
ax1.set_yticks(yt[0][1:-1])
if xlo != None:
pl.xlim(xlo,xhi)
else:
pl.xlim(min(x)-0.02, max(x)+0.02)
pl.grid()
# plot the phase
ax2 = pl.subplot(212)
pl.errorbar(x,p,sigp,fmt='go')
pl.xlabel('Orbital Phase')
pl.ylabel('Phase (O-C)')
yt = pl.yticks()
ax2.set_yticks(yt[0][1:-1])
if xlo != None:
pl.xlim(xlo,xhi)
else:
pl.xlim(min(x)-0.02, max(x)+0.02)
pl.grid()
#pl.ylim(-1.0,0.5)
# remove the amplitude graph's x-axis
pl.setp(ax1.get_xticklabels() , visible=False)
#pl.savefig(filename[:-3]+'png')
pl.show()
开发者ID:ezietsman,项目名称:msc-thesis,代码行数:56,代码来源:plotoc_archive.py
示例20: load_csv
def load_csv(self,f):
"""
Loading data from a csv file. Uses pylab's load function. Seems much faster
than scipy.io.read_array.
"""
varnm = f.readline().split(',')
# what is the date variable's key if any, based on index passed as argument
if self.date_key != []:
rawdata = pylab.load(f, delimiter=',',converters={self.date_key[0]:pylab.datestr2num}) # don't need to 'skiprow' here
self.date_key = varnm[self.date_key[0]]
else:
rawdata = pylab.load(f, delimiter=',') # don't need to 'skiprow' here
# making sure that the variable names contain no leading or trailing spaces
varnm = [i.strip() for i in varnm]
# transforming the data into a dictionary
self.data = dict(zip(varnm,rawdata.T))
开发者ID:BKJackson,项目名称:SciPy-CookBook,代码行数:19,代码来源:dbase.py
注:本文中的pylab.load函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论