本文整理汇总了Python中matplotlib.mlab.csv2rec函数的典型用法代码示例。如果您正苦于以下问题:Python csv2rec函数的具体用法?Python csv2rec怎么用?Python csv2rec使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了csv2rec函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: makediffs
def makediffs(models = _allmodels, verbose = False, kpp = True):
for model in models:
model = os.path.splitext(os.path.basename(model))[0]
if kpp:
kppdat = csv2rec(os.path.join(model, model + '.dat'), delimiter = ' ')
else:
if model not in _modelconfigs:
raise IOError('If KPP is not properly installed, you cannot run tests on mechanisms other than cbm4, saprc99, and small_strato.')
kppdat = csv2rec(os.path.join(os.path.dirname(__file__), model + '.dat'), delimiter = ' ')
pykppdat = csv2rec(os.path.join(model, model + '.pykpp.dat'), delimiter = ',')
diff = pykppdat.copy()
pct = pykppdat.copy()
keys = set(kppdat.dtype.names).intersection(pykppdat.dtype.names)
notkeys = set(pykppdat.dtype.names).difference(kppdat.dtype.names)
notkeys.remove('t')
for k in notkeys:
diff[k] = np.nan
pct[k] = np.nan
for k in keys:
diff[k] = pykppdat[k] - kppdat[k][:]
pct[k] = diff[k] / kppdat[k][:] * 100
diff['t'] = pykppdat['t'] - (kppdat['time'] * 3600. + pykppdat['t'][0])
pct['t'] = diff['t'] / (kppdat['time'] * 3600. + pykppdat['t'][0]) * 100
rec2csv(diff, os.path.join(model, model + '.diff.csv'), delimiter = ',')
rec2csv(pct, os.path.join(model, model + '.pct.csv'), delimiter = ',')
开发者ID:barronh,项目名称:pykpp,代码行数:27,代码来源:test.py
示例2: extract_lai_fpar
def extract_lai_fpar(above_par_dat, below_par_dat):
above_par_ra = mlab.csv2rec(above_par_dat)
below_par_ra = mlab.csv2rec(below_par_dat)
points_ra = mlab.csv2rec('lonlat_threet.csv')
plot = below_par_ra['plot']
date = below_par_ra['date']
below_par = below_par_ra['par']
lats = np.array(points_ra['latitude'].tolist()*2)
lons = np.array(points_ra['longitude'].tolist()*2)
above_par = []
fapar = []
for time in enumerate(date):
par_idx = find_nearest_idx(above_par_ra['date'], time[1])
above_par.append(np.mean((above_par_ra['par'][par_idx-1], above_par_ra['par'][par_idx],
above_par_ra['par'][par_idx+1])))
if above_par_ra['par'][par_idx] < below_par[time[0]]:
fapar.append(0)
else:
fapar.append((above_par_ra['par'][par_idx] - below_par[time[0]]) /
above_par_ra['par'][par_idx])
above_par = np.array(above_par)
fapar = np.array(fapar)
newra = np.column_stack((date, plot, lats, lons, above_par, below_par, fapar))
new_ra = np.core.records.fromarrays(newra.transpose(),
dtype=[('date', 'object'),
('plot', 'i'), ('lat', 'f'),
('lon', 'f'), ('above_par', 'f'),
('below_par', 'f'), ('fapar', 'f')])
return new_ra
开发者ID:Ewan82,项目名称:ah_data,代码行数:29,代码来源:lai_cept.py
示例3: _make
def _make(self, output_file, basin_poly, ba_csv, fa_ncons_csv, area_csv, arid_thresh=0.03, use_thresh=0.012, **kwargs):
print "loading data"
ba = np.genfromtxt(ba_csv,np.double,skip_header=1,delimiter=',')
area_arr = mlab.csv2rec(area_csv)
nc_arr = mlab.csv2rec(fa_ncons_csv)
ids = ba[:,0]
mean_ba = np.mean(ba[:,1:],1)
ncons = gen_merge.arrange_vector_by_ids(nc_arr["ncons"],nc_arr["basinid"],ids).astype(np.double)
area = gen_merge.arrange_vector_by_ids(area_arr["f_area"],area_arr["basinid"],ids).astype(np.double)
wri = ncons/mean_ba
miscmask = (ncons/area<use_thresh)*(mean_ba/area<arid_thresh)
wri_s = self.score(wri)
wri_s[miscmask] = MINSCORE
wri_cat = self.categorize(wri_s, miscmask)
joinarray = np.rec.fromarrays((ba[:,0],mean_ba,ncons,wri,wri_s,wri_cat),names=(BASIN_ID_FIELD,"BA","FA_NCONS",self.plot_field_name,"%s_s" % self.plot_field_name,"%s_cat" % self.plot_field_name))
print "joining data"
ap.CopyFeatures_management(basin_poly,output_file)
ap.da.ExtendTable(output_file,BASIN_ID_FIELD,joinarray,BASIN_ID_FIELD)
开发者ID:fgassert,项目名称:aqueduct_atlas,代码行数:25,代码来源:gen_WRI.py
示例4: test_sanity
def test_sanity():
from nipy.modalities.fmri.fmristat.tests import FIACdesigns
"""
Single subject fitting of FIAC model
"""
# Based on file
# subj3_evt_fonc1.txt
# subj3_bloc_fonc3.txt
for subj, run, dtype in [(3, 1, "event"), (3, 3, "block")]:
nvol = 191
TR = 2.5
Tstart = 1.25
volume_times = np.arange(nvol) * TR + Tstart
volume_times_rec = formula.make_recarray(volume_times, "t")
path_dict = {"subj": subj, "run": run}
if exists(pjoin(DATADIR, "fiac_%(subj)02d", "block", "initial_%(run)02d.csv") % path_dict):
path_dict["design"] = "block"
else:
path_dict["design"] = "event"
experiment = csv2rec(pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s", "experiment_%(run)02d.csv") % path_dict)
initial = csv2rec(pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s", "initial_%(run)02d.csv") % path_dict)
X_exper, cons_exper = design.event_design(experiment, volume_times_rec, hrfs=delay.spectral)
X_initial, _ = design.event_design(initial, volume_times_rec, hrfs=[hrf.glover])
X, cons = design.stack_designs((X_exper, cons_exper), (X_initial, {}))
Xf = np.loadtxt(StringIO(FIACdesigns.designs[dtype]))
for i in range(X.shape[1]):
yield nitest.assert_true, (matchcol(X[:, i], Xf.T)[1] > 0.999)
开发者ID:yarikoptic,项目名称:NiPy-OLD,代码行数:35,代码来源:fiac_util.py
示例5: test_transform_data
def test_transform_data():
"""
Testing the transformation of the data from raw data to functions
used for fitting a function.
"""
# We start with actual data. We test here just that reading the data in
# different ways ultimately generates the same arrays.
from matplotlib import mlab
ortho = mlab.csv2rec(op.join(data_path, 'ortho.csv'))
para = mlab.csv2rec(op.join(data_path, 'para.csv'))
x1, y1, n1 = sb.transform_data(ortho)
x2, y2, n2 = sb.transform_data(op.join(data_path, 'ortho.csv'))
npt.assert_equal(x1, x2)
npt.assert_equal(y1, y2)
# We can also be a bit more critical, by testing with data that we
# generate, and should produce a particular answer:
my_data = pd.DataFrame(
np.array([[0.1, 2], [0.1, 1], [0.2, 2], [0.2, 2], [0.3, 1],
[0.3, 1]]),
columns=['contrast1', 'answer'])
my_x, my_y, my_n = sb.transform_data(my_data)
npt.assert_equal(my_x, np.array([0.1, 0.2, 0.3]))
npt.assert_equal(my_y, np.array([0.5, 0, 1.0]))
npt.assert_equal(my_n, np.array([2, 2, 2]))
开发者ID:amsjavan,项目名称:nazarkav,代码行数:25,代码来源:test_nazarkav.py
示例6: get_experiment_initial
def get_experiment_initial(path_dict):
"""Get the record arrays for the experimental/initial designs.
Parameters
----------
path_dict : dict
containing key 'rootdir', 'run', 'subj'
Returns
-------
experiment, initial : Two record arrays.
"""
# The following two lines read in the .csv files
# and return recarrays, with fields
# experiment: ['time', 'sentence', 'speaker']
# initial: ['time', 'initial']
rootdir = path_dict['rootdir']
if not exists(pjoin(rootdir, "experiment_%(run)02d.csv") % path_dict):
e = "can't find design for subject=%(subj)d,run=%(subj)d" % path_dict
raise IOError(e)
experiment = csv2rec(pjoin(rootdir, "experiment_%(run)02d.csv") % path_dict)
initial = csv2rec(pjoin(rootdir, "initial_%(run)02d.csv") % path_dict)
return experiment, initial
开发者ID:GaelVaroquaux,项目名称:nipy,代码行数:27,代码来源:fiac_util.py
示例7: test_fractionate
def test_fractionate(self):
data1 = csv2rec('arch1.csv')
data2 = csv2rec('arch2.csv')
dl = [data1, data2]
fr = fractionate(dl, (10, 10), (5, 5), ['row', 'column'])
self.assertTrue(fr[0]['row'][3] == 5)
self.assertTrue(fr[1]['column'][2] == 0)
开发者ID:ethanwhite,项目名称:macroeco,代码行数:7,代码来源:test_form_func.py
示例8: _make
def _make(self, output_file, basin_poly, ba_csv, withdrawal_csv, consumption_csv, area_csv, arid_thresh=0.03, use_thresh=0.012, **kwargs):
print "loading data"
ba = np.genfromtxt(ba_csv,np.double,skip_header=1,delimiter=',')
area_arr = mlab.csv2rec(area_csv)
ut_arr = mlab.csv2rec(withdrawal_csv)
ct_arr = mlab.csv2rec(consumption_csv)
ids = ba[:,0]
mean_ba = np.mean(ba[:,1:],1)
ut = gen_merge.arrange_vector_by_ids(ut_arr["ut"],ut_arr["basinid"],ids).astype(np.double)
uc = gen_merge.arrange_vector_by_ids(ct_arr["ct"],ct_arr["basinid"],ids).astype(np.double)
area = gen_merge.arrange_vector_by_ids(area_arr["f_area"],area_arr["basinid"],ids).astype(np.double)
bws = ut/mean_ba
miscmask = (ut/area<use_thresh)*(mean_ba/area<arid_thresh)
#miscmask2 = (ut/area[:,1]<use_thresh)*(mean_ba/area[:,1]<arid_thresh)*(bws<.8)
bws_s = self.score(bws)
bws_s[miscmask] = MAXSCORE
bws_cat = self.categorize(bws_s, miscmask)
joinarray = np.rec.fromarrays((ba[:,0],mean_ba,ut,uc,bws,bws_s,bws_cat,area),names=(BASIN_ID_FIELD,"BA","WITHDRAWAL","CONSUMPTION",self.plot_field_name,"%s_s" % self.plot_field_name,"%s_cat" % self.plot_field_name,"AREAM3"))
print "joining data"
ap.CopyFeatures_management(basin_poly,output_file)
ap.da.ExtendTable(output_file,BASIN_ID_FIELD,joinarray,BASIN_ID_FIELD)
开发者ID:fgassert,项目名称:aqueduct_atlas,代码行数:26,代码来源:gen_BWS.py
示例9: test_merge_formatted
def test_merge_formatted(self):
data1 = csv2rec('arch1.csv')
data2 = csv2rec('arch2.csv')
dl = [data1, data2]
merged = merge_formatted(dl)
self.assertTrue(sum(merged['rew']) == 2)
self.assertTrue(sum(merged['column']) == 12)
开发者ID:ethanwhite,项目名称:macroeco,代码行数:7,代码来源:test_form_func.py
示例10: rewrite_spec
def rewrite_spec(subj, run, root = "/home/jtaylo/FIAC-HBM2009"):
"""
Take a FIAC specification file and get two specifications
(experiment, begin).
This creates two new .csv files, one for the experimental
conditions, the other for the "initial" confounding trials that
are to be modelled out.
For the block design, the "initial" trials are the first
trials of each block. For the event designs, the
"initial" trials are made up of just the first trial.
"""
if exists(pjoin("%(root)s", "fiac%(subj)d", "subj%(subj)d_evt_fonc%(run)d.txt") % {'root':root, 'subj':subj, 'run':run}):
designtype = 'evt'
else:
designtype = 'bloc'
# Fix the format of the specification so it is
# more in the form of a 2-way ANOVA
eventdict = {1:'SSt_SSp', 2:'SSt_DSp', 3:'DSt_SSp', 4:'DSt_DSp'}
s = StringIO()
w = csv.writer(s)
w.writerow(['time', 'sentence', 'speaker'])
specfile = pjoin("%(root)s", "fiac%(subj)d", "subj%(subj)d_%(design)s_fonc%(run)d.txt") % {'root':root, 'subj':subj, 'run':run, 'design':designtype}
d = np.loadtxt(specfile)
for row in d:
w.writerow([row[0]] + eventdict[row[1]].split('_'))
s.seek(0)
d = csv2rec(s)
# Now, take care of the 'begin' event
# This is due to the FIAC design
if designtype == 'evt':
b = np.array([(d[0]['time'], 1)], np.dtype([('time', np.float),
('initial', np.int)]))
d = d[1:]
else:
k = np.equal(np.arange(d.shape[0]) % 6, 0)
b = np.array([(tt, 1) for tt in d[k]['time']], np.dtype([('time', np.float),
('initial', np.int)]))
d = d[~k]
designtype = {'bloc':'block', 'evt':'event'}[designtype]
fname = pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s", "experiment_%(run)02d.csv") % {'root':root, 'subj':subj, 'run':run, 'design':designtype}
rec2csv(d, fname)
experiment = csv2rec(fname)
fname = pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s", "initial_%(run)02d.csv") % {'root':root, 'subj':subj, 'run':run, 'design':designtype}
rec2csv(b, fname)
initial = csv2rec(fname)
return d, b
开发者ID:GaelVaroquaux,项目名称:nipy,代码行数:59,代码来源:fiac_util.py
示例11: append_rec
def append_rec(recs):
base = mlab.csv2rec(recs[0]["file"])
for nw in recs[1:]:
append = mlab.csv2rec(nw["file"])
for k,v in append.dtype.fields.iteritems():
base = mlab.recs_join("sys_tick",k,[base,append],missing=0)
return base
开发者ID:archaelus,项目名称:emetric,代码行数:8,代码来源:merge.py
示例12: test_format_dense
def test_format_dense(self):
data1 = csv2rec('arch1.csv')
data2 = csv2rec('arch2.csv')
dl = [data1, data2]
form = format_dense(dl, 3, (4,4))
self.assertTrue(np.all(form[0]['count'][:4] == np.array([1,1,3,3])))
self.assertTrue(np.all(form[1]['count'] ==
np.array([1,1,3,3,1,1,5,1])))
开发者ID:ethanwhite,项目名称:macroeco,代码行数:8,代码来源:test_form_func.py
示例13: test_add_data_fields
def test_add_data_fields(self):
data1 = csv2rec('arch1.csv')
data2 = csv2rec('arch2.csv')
dl = [data1, data2]
alt_data = add_data_fields(dl, {'year': (1998, 2002)})
self.assertTrue(np.all(alt_data[0]['year'] == '1998'))
self.assertTrue(np.all(alt_data[1]['year'] == '2002'))
alt_data = add_data_fields(dl, {'year' : (1998, 2002), 'why': ('h',
'a')})
self.assertTrue(np.all(alt_data[0]['why'] == 'h'))
开发者ID:ethanwhite,项目名称:macroeco,代码行数:10,代码来源:test_form_func.py
示例14: plotGraphs
def plotGraphs():
global gDateStr, gTimeStr
print "Plotting..."
print "temperatures"
filename = "./data/" + gDateStr + "_temperatures.csv";
r = mlab.csv2rec(filename, delimiter=',')
fig = Figure(figsize=(6,6))
canvas = FigureCanvas(fig)
ax = fig.add_subplot(111)
ax.set_title('Temperatures '+gDateStr,fontsize=14)
ax.set_xlabel('Time',fontsize=6)
ax.set_ylabel('Temperature (C)',fontsize=6)
ax.grid(True,linestyle='-',color='0.75')
# run two sanitize passes over the data
r[r.dtype.names[1]] = arrayops.sanitize( r[r.dtype.names[1]] )
r[r.dtype.names[2]] = arrayops.sanitize( r[r.dtype.names[2]] )
# Generate the plot.
ax.plot(r[r.dtype.names[0]],r[r.dtype.names[1]],color='tomato');
ax.plot(r[r.dtype.names[0]],r[r.dtype.names[2]],color='green');
# plot pump on times
print "pump on"
filename = "./data/" + gDateStr + "_pumpON.csv";
if os.path.exists(filename):
r = mlab.csv2rec(filename, delimiter=',')
ax.scatter(r[r.dtype.names[0]],r[r.dtype.names[1]],color='orange');
# plot pump off times
print "pump off"
filename = "./data/" + gDateStr + "_pumpOFF.csv";
if os.path.exists(filename):
r = mlab.csv2rec(filename, delimiter=',')
ax.scatter(r[r.dtype.names[0]],r[r.dtype.names[1]],color='blue');
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(6)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(6)
ax.set_ylim(-5, 35)
# Save the generated Plot to a PNG file.
filename = "/var/www/Prometheus/data/"+gDateStr+"_temperatures.png"
canvas.print_figure(filename,dpi=100)
os.system('ln -sf '+filename+' /var/www/Prometheus/data/current_temperatures.png')
开发者ID:opiesche,项目名称:Prometheus,代码行数:55,代码来源:prometheus_controller.py
示例15: main
def main():
inputlist = ["bin/global_BWS_20121015.csv","bin/global_WRI_20121015.csv"]
lhs = mlab.csv2rec("bin/global_GU_20121015.csv")
rhslist = []
for x in inputlist:
rhslist.append(mlab.csv2rec(x))
rhslist[0]["basinid"] = rhslist[0]["basinid"].astype(np.long)
keys = ("basinid","countryid","id")
lhs = join_recs_on_keys(lhs,rhslist,keys)
mlab.rec2csv(lhs,"bin/test.csv")
print "complete"
开发者ID:fgassert,项目名称:aqueduct_atlas,代码行数:12,代码来源:gen_merge.py
示例16: test_event_design
def test_event_design():
block = csv2rec(StringIO(altdescr["block"]))
event = csv2rec(StringIO(altdescr["event"]))
t = np.arange(191) * 2.5 + 1.25
bkeep = np.not_equal((np.arange(block.time.shape[0])) % 6, 0)
ekeep = np.greater(np.arange(event.time.shape[0]), 0)
# Even though there is a FIAC block experiment
# the design is represented as an event design
# with the same event repeated several times in a row...
Xblock, cblock = design.event_design(block[bkeep], t, hrfs=delay.spectral)
Xevent, cevent = design.event_design(event[ekeep], t, hrfs=delay.spectral)
开发者ID:fperez,项目名称:nipy,代码行数:14,代码来源:test_FIAC.py
示例17: test_sanity
def test_sanity():
from nipy.modalities.fmri import design, hrf
import nipy.modalities.fmri.fmristat.hrf as fshrf
from nipy.modalities.fmri.fmristat.tests import FIACdesigns
from nipy.modalities.fmri.fmristat.tests.test_FIAC import matchcol
from nipy.algorithms.statistics import formula
from nose.tools import assert_true
"""
Single subject fitting of FIAC model
"""
# Based on file
# subj3_evt_fonc1.txt
# subj3_bloc_fonc3.txt
for subj, run, design_type in [(3, 1, 'event'), (3, 3, 'block')]:
nvol = 191
TR = 2.5
Tstart = 1.25
volume_times = np.arange(nvol)*TR + Tstart
volume_times_rec = formula.make_recarray(volume_times, 't')
path_dict = {'subj':subj, 'run':run}
if exists(pjoin(DATADIR, "fiac_%(subj)02d",
"block", "initial_%(run)02d.csv") % path_dict):
path_dict['design'] = 'block'
else:
path_dict['design'] = 'event'
experiment = csv2rec(pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s", "experiment_%(run)02d.csv")
% path_dict)
initial = csv2rec(pjoin(DATADIR, "fiac_%(subj)02d", "%(design)s", "initial_%(run)02d.csv")
% path_dict)
X_exper, cons_exper = design.event_design(experiment,
volume_times_rec,
hrfs=fshrf.spectral)
X_initial, _ = design.event_design(initial,
volume_times_rec,
hrfs=[hrf.glover])
X, cons = design.stack_designs((X_exper, cons_exper), (X_initial, {}))
# Get original fmristat design
Xf = FIACdesigns.fmristat[design_type]
# Check our new design can be closely matched to the original
for i in range(X.shape[1]):
# Columns can be very well correlated negatively or positively
assert_true(abs(matchcol(X[:,i], Xf)[1]) > 0.999)
开发者ID:GaelVaroquaux,项目名称:nipy,代码行数:50,代码来源:fiac_util.py
示例18: replace_vals
def replace_vals(filename, replace, delim=','):
'''
Replace the values in filename with specified values in replace_values
Parameters
----------
filename : string
Will be read into a rec array
replace_values : tuple
First object is value to replace and second object is what to replace
it with
'''
data = csv2rec(filename, delimiter=delim, missing=replace[0])
for nm in data.dtype.names:
try:
# Missing float
isNaN = (np.isnan(data[nm]))
except:
isNaN = np.zeros(len(data[nm]), dtype=bool)
isBlank = np.array([it == '' for it in data[nm]])
isMinusOne = (data[nm] == -1)# Missing int
# Missing other
isNone = np.array([i == None for i in data[nm]])
ind = np.bitwise_or(isNaN, isBlank)
ind = np.bitwise_or(ind, isMinusOne)
ind = np.bitwise_or(ind, isNone)
data[nm][ind] = replace[1]
return data
开发者ID:gavinsimpson,项目名称:macroeco,代码行数:31,代码来源:format_data.py
示例19: shift
def shift(self):
"""
Print the expected gain/loss of each party.
"""
nincum = np.zeros(3) #dem,gop,ind
nnew = np.zeros(3) #dem,gop,ind
rec = mlab.csv2rec('senate_polls.csv')
states = np.unique(rec.state)
for state in states:
data = self.polldat(state)
data = data[0,1:4]
candidates = self.candidates(state)
iincum = (np.where(self.partyarr == candidates[3]))[0][0]
ileader = np.argmax(data)
nnew[ileader] = nnew[ileader] +1
nincum[iincum] = nincum[iincum] + 1
shift = nnew - nincum
print 'Expected Shift in Senate Party Balance'
print self.partyarr
print shift
开发者ID:eptune,项目名称:Homework,代码行数:26,代码来源:poll.py
示例20: open_dense_data
def open_dense_data(filenames, direct, delim=','):
'''
This function takes in a list of dense data file names, opens
them and returns them as list of rec arrays.
Parameters
----------
filenames : list
A list of filenames
direct : string
The directory within data/archival/ where the files are.
Example 'ANBO_2010' or 'LBRI'
delim : string
The default file delimiter is ','
Returns
-------
: list
A list of rec arrays
'''
assert direct.find('/') == -1, "%s should not contain a '/'" % (direct)
filedir = jp(pd(pd(gcwd())), 'archival', direct)
datayears = []
for name in filenames:
data = plt.csv2rec(jp(filedir, name), delimiter=delim)
datayears.append(data)
return datayears
开发者ID:ethanwhite,项目名称:macroeco,代码行数:31,代码来源:form_func.py
注:本文中的matplotlib.mlab.csv2rec函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论