本文整理汇总了Python中pyhdf.SD.SD类的典型用法代码示例。如果您正苦于以下问题:Python SD类的具体用法?Python SD怎么用?Python SD使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SD类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: get_variable_names
def get_variable_names(self, filenames, data_type=None):
try:
from pyhdf.SD import SD
from pyhdf.HDF import HDF
except ImportError:
raise ImportError("HDF support was not installed, please reinstall with pyhdf to read HDF files.")
valid_variables = set([])
for filename in filenames:
# Do VD variables
datafile = HDF(filename)
vdata = datafile.vstart()
variables = vdata.vdatainfo()
# Assumes that latitude shape == longitude shape (it should):
# dim_length = [var[3] for var in variables if var[0] == 'Latitude'][0]
for var in variables:
# if var[3] == dim_length:
valid_variables.add(var[0])
# Do SD variables:
sd = SD(filename)
datasets = sd.datasets()
# if 'Height' in datasets:
# valid_shape = datasets['Height'][1]
for var in datasets:
# if datasets[var][1] == valid_shape:
valid_variables.add(var)
return valid_variables
开发者ID:duncanwp,项目名称:cis_plugins,代码行数:29,代码来源:cloudsat_modis.py
示例2: read_rrc
def read_rrc(inpath):
'''Read rrc data m*n from hdf file'''
'''b1-5;b13-16 for MODIS Rrc
Rrc_1238 Rrc_443-862 ozone senz solz for VIIRS rrc
'''
hdf = SD(inpath, SDC.READ)
#dts = sorted(hdf.datasets().keys())
modis_key = ['CorrRefl_01','CorrRefl_02','CorrRefl_03','CorrRefl_04','CorrRefl_05',
'CorrRefl_13','CorrRefl_14','CorrRefl_15','CorrRefl_16']
viirs_key = ['Rrc_443','Rrc_486','Rrc_551','Rrc_671','Rrc_745','Rrc_862','Rrc_1238']
mission = os.path.basename(inpath)[0]
if mission =='A' or mission =='T':keys = modis_key
elif mission=='V':keys = viirs_key
else:keys = hdf.datasets().keys()
for i,dt in enumerate(keys):
print(i,dt)
band = hdf.select(dt)[:,:]
if i==0:
limit = (band.shape[0],band.shape[1],len(keys))
rrc = np.zeros(limit,dtype = np.float)
rrc[:,:,i] = band
else:
rrc[:,:,i] = band
hdf.end()
print(rrc.shape)
return rrc
开发者ID:zgcao,项目名称:learningpy,代码行数:27,代码来源:disp_rrc_aqua_viirs.py
示例3: run
def run(FILE_NAME):
DATAFIELD_NAME = 'dHat'
if USE_NETCDF4:
from netCDF4 import Dataset
nc = Dataset(FILE_NAME)
var = nc.variables[DATAFIELD_NAME]
# This datafield has scale factor and add offset attributes, but no
# fill value. We'll turn off automatic scaling and do it ourselves.
var.set_auto_maskandscale(False)
data = nc.variables[DATAFIELD_NAME][:].astype(np.float64)
# Retrieve scale/offset attributes.
scale_factor = var.scale_factor
add_offset = var.add_offset
# Retrieve the geolocation data.
latitude = nc.variables['geolocation'][:,:,0]
longitude = nc.variables['geolocation'][:,:,1]
else:
from pyhdf.SD import SD, SDC
hdf = SD(FILE_NAME, SDC.READ)
ds = hdf.select(DATAFIELD_NAME)
data = ds[:,:].astype(np.double)
# Handle scale/osffset attributes.
attrs = ds.attributes(full=1)
sfa=attrs["scale_factor"]
scale_factor = sfa[0]
aoa=attrs["add_offset"]
add_offset = aoa[0]
# Retrieve the geolocation data.
geo = hdf.select('geolocation')
latitude = geo[:,:,0]
longitude = geo[:,:,1]
data = data / scale_factor + add_offset
# Draw an equidistant cylindrical projection using the high resolution
# coastline database.
m = Basemap(projection='cyl', resolution='h',
llcrnrlat=30, urcrnrlat = 36,
llcrnrlon=121, urcrnrlon = 133)
m.drawcoastlines(linewidth=0.5)
m.drawparallels(np.arange(30, 37), labels=[1, 0, 0, 0])
m.drawmeridians(np.arange(121, 133, 2), labels=[0, 0, 0, 1])
m.pcolormesh(longitude, latitude, data, latlon=True)
cb = m.colorbar()
cb.set_label('Unit:mm')
basename = os.path.basename(FILE_NAME)
plt.title('{0}\n{1}'.format(basename, DATAFIELD_NAME))
fig = plt.gcf()
# plt.show()
pngfile = "{0}.py.png".format(basename)
fig.savefig(pngfile)
开发者ID:hdfeos,项目名称:zoo_python,代码行数:60,代码来源:TRMM_2B31_CSI_dHat_zoom.py
示例4: load
def load(self, fldname, **kwargs):
""" Load Cali Current fields for a given day"""
self._timeparams(**kwargs)
if fldname == 'chl':
filename = "/C%04i%03i_chl_mapped.hdf" % (self.yr, self.yd)
#ncfieldname = 'chl_%04i_%03i' % (yr,yd)
def scale(PV): return 10**(PV*0.015-2)
elif fldname == 'sst':
filename = "/M%04i%03i_sst_mapped.hdf" % (self.yr, self.yd)
#ncfieldname = 'sst_%04i_%03i' % (yr,yd)
def scale(PV): return PV*0.15000001-3
if not os.path.isfile(self.datadir + filename):
print "Downloading " + filename
self.download(fldname, self.jd)
h = SD(self.datadir + filename,SDC.READ)
ncfieldname = h.datasets().keys()[0]
fld = h.select(ncfieldname)
attr = fld.attributes()
PV = fld[:].astype(np.float)
PV[PV<0] = PV[PV<0]+256
PV[PV==0] = np.nan
PV[PV==255] = np.nan
setattr(self, fldname, scale(PV)[self.j1:self.j2, self.i1:self.i2])
开发者ID:brorfred,项目名称:njord,代码行数:25,代码来源:mati.py
示例5: main
def main():
varname_to_rpn_name = {
"precipitation": "PR",
"relativeError": "RERR"
}
varnames = list(varname_to_rpn_name.keys())
target_dir = "/skynet3_rech1/huziy/from_hdf4"
source_dir = "/st1_fs2/winger/Validation/TRMM/HDF_format"
for f_name in os.listdir(source_dir):
if not f_name.endswith("HDF"):
continue
path = os.path.join(source_dir, f_name)
ds = SD(path)
print(ds.datasets())
target_path = os.path.join(target_dir, f_name + ".rpn")
r_obj = RPN(target_path, mode="w")
for varname in varnames:
var_data = ds.select(varname)[0, :, :]
r_obj.write_2D_field(
name=varname_to_rpn_name[varname],
data=var_data, label=varname, grid_type="L",
ig = [25, 25, 4013, 18012])
r_obj.close()
开发者ID:guziy,项目名称:RPN,代码行数:28,代码来源:hdf_test.py
示例6: main
def main(cal_file, with_cp):
from pyhdf.SD import SD
if with_cp:
cmd = 'cp %s /home/noel/scratch/' % (cal_file)
print "running "+cmd
os.system(cmd)
filename = os.path.basename(cal_file)
cal_file = '/home/noel/scratch/' + filename
print 'Reading ' + cal_file
vars = ['Latitude', 'Longitude',
'Total_Attenuated_Backscatter_532', 'Attenuated_Backscatter_1064', 'Perpendicular_Attenuated_Backscatter_532',
'Pressure', 'Temperature', 'Molecular_Number_Density', 'Tropopause_Height', 'Surface_Elevation']
hdf = SD(cal_file)
for var in vars:
print 'Reading ' + var
hdf_var = hdf.select(var)
data = hdf_var.get()
hdf_var.endaccess()
hdf.end()
print 'ok.'
if with_cp:
print 'Removing '+filename
cmd = 'rm -f /home/noel/scratch/' + filename
os.system(cmd)
开发者ID:vnoel,项目名称:CEL2,代码行数:30,代码来源:test_read_speed.py
示例7: rainfall_anunal_car
def rainfall_anunal_car(year):
file = glob.glob('/Users/yuewang/Documents/DATA/atl/ATL_3B42V7_rain_accum.'+ str(year)+'*')
rainfall_0 = []
for i in file:
atl =SD(i,SDC.READ)
rainfall = atl.select('RAIN_TOTAL')
rainfall_value = rainfall.get()
rainfall_0.append(rainfall_value)
rainfall_single = np.array(rainfall_0)
rainfall_anunal = sum(rainfall_single)
rainfall_anunal_car = rainfall_anunal[238:286,372:476]
# calculation none-zone mean value
ind = np.where(rainfall_anunal_car != 0)
rf_annual = []
for i,j in zip(*ind):
mm = rainfall_anunal_car[i,j]
rf_annual.append(mm)
rf_annual = np.array(rf_annual)
d = np.mean(rf_annual)
return d
开发者ID:yueewang,项目名称:research-,代码行数:26,代码来源:mean_cal.py
示例8: rainfall_anunal_GMX
def rainfall_anunal_GMX(year):
file = glob.glob('/Users/yuewang/Documents/DATA/atl/ATL_3B42V7_rain_accum.'+ str(year)+'*')
rainfall_0 = []
for i in file:
atl =SD(i,SDC.READ)
rainfall = atl.select('RAIN_TOTAL')
rainfall_value = rainfall.get()
rainfall_0.append(rainfall_value)
rainfall_single = np.array(rainfall_0)
rainfall_anunal = sum(rainfall_single)
rainfall_anunal_GMX = rainfall_anunal[280:320,340:400]
ind = np.where(rainfall_anunal_GMX != 0)
rf_annual = []
for i,j in zip(*ind):
mm = rainfall_anunal_GMX[i,j]
rf_annual.append(mm)
rf_annual = np.array(rf_annual)
c = np.mean(rf_annual)
return c
开发者ID:yueewang,项目名称:research-,代码行数:25,代码来源:mean_cal.py
示例9: test_1000m_to_250m
def test_1000m_to_250m(self):
"""Test the 1 km to 250 meter interpolation facility."""
# gfilename = \
# "/san1/test/data/modis/MOD03_A12278_113638_2012278145123.hdf"
gfilename = "/local_disk/src/python-geotiepoints/tests/MOD03_A12278_113638_2012278145123.hdf"
# result_filename = \
# "/san1/test/data/modis/250m_lonlat_results.npz"
result_filename = "/local_disk/src/python-geotiepoints/tests/250m_lonlat_results.npz"
from pyhdf.SD import SD
from pyhdf.error import HDF4Error
try:
gdata = SD(gfilename)
except HDF4Error:
print("Failed reading eos-hdf file %s" % gfilename)
return
lats = gdata.select("Latitude")[0:50, :]
lons = gdata.select("Longitude")[0:50, :]
verif = np.load(result_filename)
vlons = verif['lons']
vlats = verif['lats']
tlons, tlats = modis1kmto250m(lons, lats)
self.assert_(np.allclose(tlons, vlons, atol=0.05))
self.assert_(np.allclose(tlats, vlats, atol=0.05))
开发者ID:adybbroe,项目名称:pygac,代码行数:28,代码来源:geotiepoints.py
示例10: test_1000m_to_250m
def test_1000m_to_250m(self):
"""test the 1 km to 250 meter interpolation facility
"""
gfilename_hdf = "testdata/MOD03_A12278_113638_2012278145123.hdf"
gfilename = "testdata/250m_lonlat_section_input.npz"
result_filename = "testdata/250m_lonlat_section_result.npz"
from pyhdf.SD import SD
from pyhdf.error import HDF4Error
gdata = None
try:
gdata = SD(gfilename_hdf)
except HDF4Error:
print "Failed reading eos-hdf file %s" % gfilename_hdf
try:
indata = np.load(gfilename)
except IOError:
return
if gdata:
lats = gdata.select("Latitude")[20:50, :]
lons = gdata.select("Longitude")[20:50, :]
else:
lats = indata['lat'] / 1000.
lons = indata['lon'] / 1000.
verif = np.load(result_filename)
vlons = verif['lon'] / 1000.
vlats = verif['lat'] / 1000.
tlons, tlats = modis1kmto250m(lons, lats)
self.assert_(np.allclose(tlons, vlons, atol=0.05))
self.assert_(np.allclose(tlats, vlats, atol=0.05))
开发者ID:bomakoto,项目名称:python-geotiepoints,代码行数:34,代码来源:test_modis.py
示例11: export_multi_fluid_LFM
def export_multi_fluid_LFM(argv):
if (len(argv) >= 2):
input_filename = argv[0]
output_filename = argv[1]
print input_filename
sd = SD(input_filename, SDC.READ)
grid = get_corners(sd)
timesteps = 0
# step = 1640000
for key in sd.datasets().keys():
shift = key.find('time_step')
if shift == 0:
if len(argv) == 3:
step = argv[2]
if key == 'time_step_'+str(step):
export_timestep(sd, output_filename, key, grid)
else:
export_timestep(sd, output_filename, key, grid)
timesteps += 1
print 'timesteps found in file:', timesteps
else:
print 'usage: python lfm_split.py input_multi_timestep_hdf output_filename_prefix step(optional)'
开发者ID:NeelSavani,项目名称:ccmc-software,代码行数:29,代码来源:lfm_split.py
示例12: load_standard_lfm_hdf
def load_standard_lfm_hdf(filename):
""" Load the standard formated hdf which we want to emulate"""
f = SD(filename, SDC.READ)
X_grid = f.select('X_grid')
Y_grid = f.select('Y_grid')
Z_grid = f.select('Z_grid')
# x_grid is size nkp1,njp1,nip1
(nkp1,njp1,nip1) = X_grid[:].shape
# The LFM reader expects i to vary fastest, then j, then k
# However, the LFM pre-converted files store positions with k varying fastest (column-major)
# Recommend saving in column-major format. If it fails, we can always switch.
# i = 0; j = 0; k = 0
# print 'printing standard first row'
# for i in range(nip1):
# print X_grid[k,j,i]/R_e
# print 'printing j sweep'
# i = 0; j = 0; k = 0;
# for j in range(njp1):
# print X_grid[k,j,i]/R_e
# print 'printing k sweep'
# i = 0; j = 0; k = 0;
# for k in range(nkp1):
# print X_grid[k,j,i]/R_e
print 'standard nip1,njp1,nkp1 =', nip1,njp1,nkp1
ni = nip1-1
nj = njp1-1
nk = nkp1-1
print 'standard ni,nj,nk =', ni,nj,nk
开发者ID:NeelSavani,项目名称:ccmc-software,代码行数:35,代码来源:lfm_split.py
示例13: write_interpolated
def write_interpolated(filename, f0, f1, fact, datasets):
'''
interpolate two hdf files f0 and f1 using factor fact, and
write the result to filename
'''
hdf = SD(filename, SDC.WRITE|SDC.CREATE)
for dataset in datasets:
try:
info = SD(f0).select(dataset).info()
except:
print >> stderr, 'Error loading %s in %s' % (dataset, f0)
raise
typ = info[3]
shp = info[2]
met0 = SD(f0).select(dataset).get()
met1 = SD(f1).select(dataset).get()
interp = (1-fact)*met0 + fact*met1
interp = interp.astype({
SDC.INT16: 'int16',
SDC.FLOAT32: 'float32',
SDC.FLOAT64: 'float64',
}[typ])
# write
sds = hdf.create(dataset, typ, shp)
sds[:] = interp[:]
sds.endaccess()
hdf.end()
开发者ID:bcdev,项目名称:oc-cci,代码行数:34,代码来源:common-get_meteo_calvalus.py
示例14: run
def run(FILE_NAME):
# Identify the data field.
DATAFIELD_NAME = 'Longwave Flux (2.5R)'
if USE_NETCDF4:
from netCDF4 import Dataset
nc = Dataset(FILE_NAME)
data = nc.variables[DATAFIELD_NAME][:].astype(np.float64)
else:
from pyhdf.SD import SD, SDC
hdf = SD(FILE_NAME, SDC.READ)
# Read dataset.
data2D = hdf.select(DATAFIELD_NAME)
data = data2D[:,:]
# Set fillvalue and units.
# See "CERES Data Management System ES-4 Collection Guide" [1] and a sample
# image by NASA [2] for details. The fillvalue is 3.4028235E38. Here, we
# just use the max of the data.
fillvalue = np.max(data)
data[data == fillvalue] = np.nan
datam = np.ma.masked_array(data, mask=np.isnan(data))
# Set fillvalue and units.
# See "CERES Data Management System ES-4 Collection Guide" [1] and a
# sample image by NASA [2] for details.
# The fillvalue is 3.4028235E38. Here, we use max value from the dataset.
units = 'Watts/Meter^2'
ysize, xsize = data.shape
xinc = 360.0 / xsize
yinc = 180.0 / ysize
x0, x1 = (-180, 180)
y0, y1 = (-90, 90)
longitude = np.linspace(x0 + xinc/2, x1 - xinc/2, xsize)
latitude = np.linspace(y0 + yinc/2, y1 - yinc/2, ysize)
# Flip the latitude to run from 90 to -90
latitude = latitude[::-1]
# The data is global, so render in a global projection.
m = Basemap(projection='cyl', resolution='l',
llcrnrlat=-90, urcrnrlat=90,
llcrnrlon=-180, urcrnrlon=180)
m.drawcoastlines(linewidth=0.5)
m.drawparallels(np.arange(-90.,90,45))
m.drawmeridians(np.arange(-180.,180,45), labels=[True,False,False,True])
m.pcolormesh(longitude, latitude, datam, latlon=True)
cb = m.colorbar()
cb.set_label(units)
basename = os.path.basename(FILE_NAME)
plt.title('{0}\n{1}'.format(basename, DATAFIELD_NAME))
fig = plt.gcf()
# plt.show()
pngfile = "{0}.py.png".format(basename)
fig.savefig(pngfile)
开发者ID:hdfeos,项目名称:zoo_python,代码行数:59,代码来源:CER_ES4_TRMM_Longwave_Flux_2_5_R.py
示例15: __init__
def __init__(self, filename, filename_info, filetype_info):
super(HDF4FileHandler, self).__init__(filename, filename_info, filetype_info)
self.file_content = {}
file_handle = SD(self.filename, SDC.READ)
self._collect_attrs('', file_handle.attributes())
for k, v in file_handle.datasets().items():
self.collect_metadata(k, file_handle.select(k))
del file_handle
开发者ID:davidh-ssec,项目名称:satpy,代码行数:8,代码来源:hdf4_utils.py
示例16: print_dataset_2A12
def print_dataset_2A12(*arg):
FILE_NAME=arg[0]+'1B01.'+arg[1]
hdf = SD(FILE_NAME, SDC.READ)
'List available SDS datasets'
for ds in hdf.datasets():
print ds
开发者ID:rvalenzuelar,项目名称:trmm_vis,代码行数:8,代码来源:read_trmm.py
示例17: readhdf
def readhdf(filename, fieldname, ignoreE = True):
try:
hdf = SD(filename, SDC.READ)
data = hdf.select(fieldname)[:].copy()
hdf.end()
except Exception, e:
if not ignoreE:print e
data = Dataset(filename)[fieldname][:].copy()
开发者ID:wgx998877,项目名称:pyglass,代码行数:8,代码来源:util.py
示例18: setup_grid
def setup_grid(self):
"""Setup necessary variables for grid """
if not os.path.isfile(self.datadir + self.gridfile):
urllib.urlretrieve(self.dataurl + self.gridfile,
self.datadir + self.gridfile)
g = SD(self.datadir + self.gridfile, SDC.READ)
self.llat = g.select('Latitude')[:]
self.llon = g.select('Longitude')[:]
开发者ID:brorfred,项目名称:njord,代码行数:9,代码来源:mati.py
示例19: print_dataset_1C21
def print_dataset_1C21(*arg):
FILE_NAME=arg[0]+'1C21.'+arg[1]
print FILE_NAME
hdf = SD(FILE_NAME, SDC.READ)
'List available SDS datasets'
for ds in hdf.datasets():
print ds
开发者ID:rvalenzuelar,项目名称:trmm_vis,代码行数:9,代码来源:read_trmm.py
示例20: run
def run(FILE_NAME):
DATAFIELD_NAME = 'Temperature_MW_A'
if USE_NETCDF4:
from netCDF4 import Dataset
nc = Dataset(FILE_NAME)
# The variable has a fill value,
# so netCDF4 converts it to a float64 masked array for us.
data = nc.variables[DATAFIELD_NAME][11,:,:]
latitude = nc.variables['Latitude'][:]
longitude = nc.variables['Longitude'][:]
else:
from pyhdf.SD import SD, SDC
hdf = SD(FILE_NAME, SDC.READ)
# List available SDS datasets.
# print hdf.datasets()
# Read dataset.
data3D = hdf.select(DATAFIELD_NAME)
data = data3D[11,:,:]
# Read geolocation dataset.
lat = hdf.select('Latitude')
latitude = lat[:,:]
lon = hdf.select('Longitude')
longitude = lon[:,:]
# Handle fill value.
attrs = data3D.attributes(full=1)
fillvalue=attrs["_FillValue"]
# fillvalue[0] is the attribute value.
fv = fillvalue[0]
data[data == fv] = np.nan
data = np.ma.masked_array(data, np.isnan(data))
# Draw an equidistant cylindrical projection using the low resolution
# coastline database.
m = Basemap(projection='cyl', resolution='l',
llcrnrlat=-90, urcrnrlat = 90,
llcrnrlon=-180, urcrnrlon = 180)
m.drawcoastlines(linewidth=0.5)
m.drawparallels(np.arange(-90., 120., 30.), labels=[1, 0, 0, 0])
m.drawmeridians(np.arange(-180., 181., 45.), labels=[0, 0, 0, 1])
m.pcolormesh(longitude, latitude, data, latlon=True, alpha=0.90)
cb = m.colorbar()
cb.set_label('Unit:K')
basename = os.path.basename(FILE_NAME)
plt.title('{0}\n {1} at TempPrsLvls=11'.format(basename, DATAFIELD_NAME))
fig = plt.gcf()
# plt.show()
pngfile = "{0}.{1}.py.png".format(basename, DATAFIELD_NAME)
fig.savefig(pngfile)
开发者ID:hdfeos,项目名称:zoo_python,代码行数:57,代码来源:AIRS_L3_Temperature_MW_A_Lvls11.py
注:本文中的pyhdf.SD.SD类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论