本文整理汇总了Python中util.geo.lat_lon.lon_lat_to_cartesian函数的典型用法代码示例。如果您正苦于以下问题:Python lon_lat_to_cartesian函数的具体用法?Python lon_lat_to_cartesian怎么用?Python lon_lat_to_cartesian使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了lon_lat_to_cartesian函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: plot_difference
def plot_difference(basemap,
lons1, lats1, data1, label1,
lons2, lats2, data2, label2,
base_folder="/skynet3_rech1/huziy/veg_fractions/"
):
xs, ys, zs = lat_lon.lon_lat_to_cartesian(lons2.flatten(), lats2.flatten())
xt, yt, zt = lat_lon.lon_lat_to_cartesian(lons1.flatten(), lats1.flatten())
ktree = cKDTree(list(zip(xs, ys, zs)))
dists, inds = ktree.query(list(zip(xt, yt, zt)))
# Calculate differences
diff_dict = {}
for key, the_field in data2.items():
diff_dict[key] = the_field.flatten()[inds].reshape(data1[key].shape) - data1[key]
x, y = basemap(lons1, lats1)
imname = "sand_clay_diff_{0}-{1}.jpeg".format(label2, label1)
impath = os.path.join(base_folder, imname)
plot_sand_and_clay_diff(x, y, basemap, diff_dict["SAND"], diff_dict["CLAY"],
out_image=impath)
del diff_dict["SAND"], diff_dict["CLAY"]
imname = "veg_fract_diff_{0}-{1}.jpeg".format(label2, label1)
impath = os.path.join(base_folder, imname)
plot_veg_fractions_diff(x, y, basemap, diff_dict,
out_image=impath)
开发者ID:guziy,项目名称:RPN,代码行数:27,代码来源:plot_veg_fractions.py
示例2: __init__
def __init__(self, file_path = "", var_name = "",
bathymetry_path = "/skynet3_rech1/huziy/NEMO_OFFICIAL/dev_v3_4_STABLE_2012/NEMOGCM/CONFIG/GLK_LIM3_Michigan/EXP00/bathy_meter.nc"):
"""
:param file_path:
:param var_name:
:param bathymetry_path: used to mask land points
"""
self.current_time_frame = -1
self.var_name = var_name
self.cube = iris.load_cube(file_path, constraint=iris.Constraint(cube_func=lambda c: c.var_name == var_name))
self.lons, self.lats = cartography.get_xy_grids(self.cube)
lons2d_gl, lats2d_gl = nemo_commons.get_2d_lons_lats_from_nemo(path=bathymetry_path)
mask_gl = nemo_commons.get_mask(path=bathymetry_path)
xs, ys, zs = lat_lon.lon_lat_to_cartesian(lons2d_gl.flatten(), lats2d_gl.flatten())
xt, yt, zt = lat_lon.lon_lat_to_cartesian(self.lons.flatten(), self.lats.flatten())
tree = cKDTree(list(zip(xs, ys, zs)))
dists, indices = tree.query(list(zip(xt, yt, zt)))
self.mask = mask_gl.flatten()[indices].reshape(self.lons.shape)
self.nt = self.cube.shape[0]
assert isinstance(self.cube, Cube)
print(self.nt)
开发者ID:guziy,项目名称:RPN,代码行数:28,代码来源:nemo_output_manager.py
示例3: read_and_interpolate_homa_data
def read_and_interpolate_homa_data(self, path="", start_year=None, end_year=None, season_to_months=None):
"""
:param path:
:param target_cube:
"""
import pandas as pd
ds = Dataset(path)
sst = ds.variables["sst"][:]
# read longitudes and latitudes from a file
lons_source = ds.variables["lon"][:]
lats_source = ds.variables["lat"][:]
month_to_season = defaultdict(lambda: "no-season")
for seas, mths in season_to_months.items():
for m in mths:
month_to_season[m] = seas
# time variable
time_var = ds.variables["time"]
dates = num2date(time_var[:], time_var.units)
if hasattr(sst, "mask"):
sst[sst.mask] = np.nan
panel = pd.Panel(data=sst, items=dates, major_axis=range(sst.shape[1]), minor_axis=range(sst.shape[2]))
seasonal_sst = panel.groupby(
lambda d: (d.year, month_to_season[d.month]), axis="items").mean()
# source grid
xs, ys, zs = lat_lon.lon_lat_to_cartesian(lons_source.flatten(), lats_source.flatten())
kdtree = cKDTree(data=list(zip(xs, ys, zs)))
# target grid
xt, yt, zt = lat_lon.lon_lat_to_cartesian(self.lons.flatten(), self.lats.flatten())
dists, inds = kdtree.query(list(zip(xt, yt, zt)))
assert isinstance(seasonal_sst, pd.Panel)
result = {}
for the_year in range(start_year, end_year + 1):
result[the_year] = {}
for the_season in list(season_to_months.keys()):
the_mean = seasonal_sst.select(lambda item: item == (the_year, the_season), axis="items")
result[the_year][the_season] = the_mean.values.flatten()[inds].reshape(self.lons.shape)
return result
开发者ID:guziy,项目名称:RPN,代码行数:59,代码来源:nemo_yearly_files_manager.py
示例4: main
def main(dfs_var_name="t2", cru_var_name="tmp",
dfs_folder="/home/huziy/skynet3_rech1/NEMO_OFFICIAL/DFS5.2_interpolated",
cru_file = "data/cru_data/CRUTS3.1/cru_ts_3_10.1901.2009.tmp.dat.nc"):
if not os.path.isdir(NEMO_IMAGES_DIR):
os.mkdir(NEMO_IMAGES_DIR)
#year range is inclusive [start_year, end_year]
start_year = 1981
end_year = 2009
season_name_to_months = OrderedDict([
("Winter", (1, 2, 12)),
("Spring", list(range(3, 6))),
("Summer", list(range(6, 9))),
("Fall", list(range(9, 12)))])
cru_t_manager = CRUDataManager(var_name=cru_var_name, path=cru_file)
cru_lons, cru_lats = cru_t_manager.lons2d, cru_t_manager.lats2d
#get seasonal means (CRU)
season_to_mean_cru = cru_t_manager.get_seasonal_means(season_name_to_months=season_name_to_months,
start_year=start_year,
end_year=end_year)
#get seasonal means Drakkar
dfs_manager = DFSDataManager(folder_path=dfs_folder, var_name=dfs_var_name)
season_to_mean_dfs = dfs_manager.get_seasonal_means(season_name_to_months=season_name_to_months,
start_year=start_year,
end_year=end_year)
dfs_lons, dfs_lats = dfs_manager.get_lons_and_lats_2d()
xt, yt, zt = lat_lon.lon_lat_to_cartesian(dfs_lons.flatten(), dfs_lats.flatten())
xs, ys, zs = lat_lon.lon_lat_to_cartesian(cru_lons.flatten(), cru_lats.flatten())
ktree = cKDTree(data=list(zip(xs, ys, zs)))
dists, inds = ktree.query(list(zip(xt, yt, zt)))
season_to_err = OrderedDict()
for k in season_to_mean_dfs:
interpolated_cru = season_to_mean_cru[k].flatten()[inds].reshape(dfs_lons.shape)
if dfs_var_name.lower() == "t2":
#interpolated_cru += 273.15
season_to_mean_dfs[k] -= 273.15
elif dfs_var_name.lower() == "precip": # precipitation in mm/day
season_to_mean_dfs[k] *= 24 * 60 * 60
season_to_err[k] = season_to_mean_dfs[k] #- interpolated_cru
season_indicator = "-".join(sorted(season_to_err.keys()))
fig_path = os.path.join(NEMO_IMAGES_DIR, "{3}_errors_{0}-{1}_{2}_dfs.jpeg".format(start_year,
end_year,
season_indicator,
dfs_var_name))
basemap = nemo_commons.get_default_basemap_for_glk(dfs_lons, dfs_lats, resolution="l")
x, y = basemap(dfs_lons, dfs_lats)
coords_and_basemap = {
"basemap": basemap, "x": x, "y": y
}
plot_errors_in_one_figure(season_to_err, fig_path=fig_path, **coords_and_basemap)
开发者ID:guziy,项目名称:RPN,代码行数:59,代码来源:compare_forcing_clim_with_CRU.py
示例5: main
def main(in_file: Path, target_grid_file: Path, out_dir: Path=None):
if out_dir is not None:
out_dir.mkdir(exist_ok=True)
out_file = out_dir / (in_file.name + "_interpolated")
else:
out_file = in_file.parent / (in_file.name + "_interpolated")
if out_file.exists():
print(f"Skipping {in_file}, output already exists ({out_file})")
return
with xarray.open_dataset(target_grid_file) as ds_grid:
lons, lats = ds_grid["lon"][:].values, ds_grid["lat"][:].values
xt, yt, zt = lat_lon.lon_lat_to_cartesian(lons.flatten(), lats.flatten())
with xarray.open_dataset(in_file) as ds_in:
lons_s, lats_s = ds_in["lon"][:].values, ds_in["lat"][:].values
xs, ys, zs = lat_lon.lon_lat_to_cartesian(lons_s.flatten(), lats_s.flatten())
ktree = KDTree(list(zip(xs, ys, zs)))
dists, inds = ktree.query(list(zip(xt, yt, zt)), k=1)
# resample to daily
ds_in_r = ds_in.resample(t="1D", keep_attrs=True).mean()
ds_out = xarray.Dataset()
for vname, var in ds_grid.variables.items():
ds_out[vname] = var[:]
ds_out["t"] = ds_in_r["t"][:]
for vname, var in ds_in_r.variables.items():
assert isinstance(var, xarray.Variable)
var = var.squeeze()
# only interested in (t, x, y) fields
if var.ndim != 3:
print(f"skipping {vname}")
continue
if vname.lower() not in ["t", "time", "lon", "lat"]:
print(f"Processing {vname}")
var_interpolated = [var[ti].values.flatten()[inds].reshape(lons.shape) for ti in range(var.shape[0])]
ds_out[vname] = xarray.DataArray(
var_interpolated, dims=("t", "x", "y"),
attrs=var.attrs,
)
ds_out.to_netcdf(out_file)
开发者ID:guziy,项目名称:RPN,代码行数:59,代码来源:interpolate_fields_to_the_hles_focus_domain.py
示例6: interpolate_data_to_model_grid
def interpolate_data_to_model_grid(self, model_lons_2d, model_lats_2d, data_obs):
x0, y0, z0 = lat_lon.lon_lat_to_cartesian(model_lons_2d.flatten(), model_lats_2d.flatten())
x, y, z = lat_lon.lon_lat_to_cartesian(self.lons2d.flatten(), self.lats2d.flatten())
if self.ktree is None:
self.ktree = KDTree(list(zip(x, y, z)))
d, i = self.ktree.query(list(zip(x0, y0, z0)))
return data_obs.flatten()[i].reshape(model_lons_2d.shape)
开发者ID:guziy,项目名称:RPN,代码行数:10,代码来源:grdc.py
示例7: get_dataless_model_points_for_stations
def get_dataless_model_points_for_stations(station_list, accumulation_area_km2_2d,
model_lons2d, model_lats2d,
i_array, j_array):
"""
returns a map {station => modelpoint} for comparison modeled streamflows with observed
this uses exactly the same method for searching model points as one in diagnose_point (nc-version)
"""
lons = model_lons2d[i_array, j_array]
lats = model_lats2d[i_array, j_array]
model_acc_area_1d = accumulation_area_km2_2d[i_array, j_array]
npoints = 1
result = {}
x0, y0, z0 = lat_lon.lon_lat_to_cartesian(lons, lats)
kdtree = cKDTree(list(zip(x0, y0, z0)))
for s in station_list:
# list of model points which could represent the station
assert isinstance(s, Station)
x, y, z = lat_lon.lon_lat_to_cartesian(s.longitude, s.latitude)
dists, inds = kdtree.query((x, y, z), k=5)
if npoints == 1:
deltaDaMin = np.min(np.abs(model_acc_area_1d[inds] - s.drainage_km2))
# this returns a list of numpy arrays
imin = np.where(np.abs(model_acc_area_1d[inds] - s.drainage_km2) == deltaDaMin)[0][0]
selected_cell_index = inds[imin]
# check if difference in drainage areas is not too big less than 10 %
print(s.river_name, deltaDaMin / s.drainage_km2)
# if deltaDaMin / s.drainage_km2 > 0.2:
# continue
mp = ModelPoint()
mp.accumulation_area = model_acc_area_1d[selected_cell_index]
mp.longitude = lons[selected_cell_index]
mp.latitude = lats[selected_cell_index]
mp.cell_index = selected_cell_index
mp.distance_to_station = dists[imin]
print("Distance to station: ", dists[imin])
print("Model accumulation area: ", mp.accumulation_area)
print("Obs accumulation area: ", s.drainage_km2)
result[s] = mp
else:
raise Exception("npoints = {0}, is not yet implemented ...")
return result
开发者ID:guziy,项目名称:RPN,代码行数:53,代码来源:validate_streamflow_with_obs.py
示例8: get_timeseries_for_points
def get_timeseries_for_points(lons, lats, data_path="", varname="LD"):
"""
return the list of timeseries for the points with the given coordinates,
using the nearest neighbor interpolation
:param varname: variable name
:param data_path: path to the hdf5 file
:param lons:
:param lats:
:return: pd.DataFrame with axes (time, point_index)
"""
assert len(lons) == len(lats)
df = None
indices = None
lk_fraction = None
with tb.open_file(data_path) as h:
var_table = h.get_node("/{}".format(varname))
for i, row in enumerate(var_table):
if df is None:
df = pd.DataFrame(index=range(len(var_table)), columns=["date", ] + list(range(len(lons))))
# calculate indices of the grid corresponding to the points
bmp_info = get_basemap_info_from_hdf(file_path=data_path)
"""
:type bmp_info: BasemapInfo
"""
grid_lons, grid_lats = bmp_info.lons, bmp_info.lats
x, y, z = lat_lon.lon_lat_to_cartesian(grid_lons.flatten(), grid_lats.flatten())
ktree = KDTree(list(zip(x, y, z)))
x1, y1, z1 = lat_lon.lon_lat_to_cartesian(lons, lats)
dists, indices = ktree.query(list(zip(x1, y1, z1)))
lk_fraction = get_array_from_file(path=data_path, var_name="lake_fraction")
df.loc[i, :] = [datetime(row["year"], row["month"], row["day"], row["hour"]), ] + list(row["field"].flatten()[indices])
# print lake fractions
print(lk_fraction.flatten()[indices])
print(sum(lk_fraction.flatten()[indices] > 0.05))
df.set_index("date", inplace=True)
df.sort_index(inplace=True)
return df
开发者ID:guziy,项目名称:RPN,代码行数:48,代码来源:do_analysis_using_pytables.py
示例9: get_daily_timeseries_using_mask
def get_daily_timeseries_using_mask(self, mask, lons2d_target, lats2d_target, multipliers_2d, start_date=None,
end_date=None):
"""
multipliers_2d used to multiply the values when aggregating into a single timeseries
sum(mi * vi) - in space
"""
bool_vect = np.array([start_date <= t <= end_date for t in self.times])
new_times = list(filter(lambda t: start_date <= t <= end_date, self.times))
new_vals = self.var_data[bool_vect, :, :]
x_out, y_out, z_out = lat_lon.lon_lat_to_cartesian(lons2d_target.flatten(), lats2d_target.flatten())
print(len(new_times))
flat_mask = mask.flatten()
x_out = x_out[flat_mask == 1]
y_out = y_out[flat_mask == 1]
z_out = z_out[flat_mask == 1]
mults = multipliers_2d.flatten()[flat_mask == 1]
data_interp = [self._interp_and_sum(new_vals[t, :, :].flatten(), flat_mask, x_out, y_out, z_out) for t in
range(len(new_times))]
print("Interpolated all")
return TimeSeries(time=new_times, data=data_interp).get_ts_of_daily_means()
开发者ID:guziy,项目名称:RPN,代码行数:25,代码来源:temperature.py
示例10: _get_spatial_integrals_over_points_in_time
def _get_spatial_integrals_over_points_in_time(self, lons2d_target, lats2d_target, mask,
areas2d, start_date=None, end_date=None, var_name=""):
"""
i) Interpolate to the grid (lons2d_target, lats2d_target)
ii) Apply the mask to the interpoated fields and sum with coefficients from areas2d
Note: the interpolation is done using nearest neighbor approach
returns a timeseries of {t -> sum(Ai[mask]*xi[mask])(t)}
"""
#interpolation
x1, y1, z1 = lat_lon.lon_lat_to_cartesian(lons2d_target.flatten(), lats2d_target.flatten())
dists, indices = self.kdtree.query(list(zip(x1, y1, z1)))
mask1d = mask.flatten().astype(int)
areas1d = areas2d.flatten()
result = {}
for the_date in list(self.date_to_path.keys()):
if start_date is not None:
if start_date > the_date: continue
if end_date is not None:
if end_date < the_date: continue
data = self.get_field_for_date(the_date, var_name=var_name)
result[the_date] = np.sum(data.flatten()[indices][mask1d == 1] * areas1d[mask1d == 1])
times = list(sorted(result.keys()))
values = [result[x] for x in times]
print("nvals, min, max", len(values), min(values), max(values))
return TimeSeries(time=times, data=values)
开发者ID:guziy,项目名称:RPN,代码行数:35,代码来源:gldas_manager.py
示例11: get_zone_around_lakes_mask
def get_zone_around_lakes_mask(lons, lats, lake_mask, ktree=None, dist_km=100):
"""
Returns the mask of a zone around lakes (excluding lakes) of a given width
:type ktree: cKDTree
:param ktree:
:param lons:
:param lats:
:param lake_mask:
:param dist_km:
"""
x, y, z = lat_lon.lon_lat_to_cartesian(lons[lake_mask], lats[lake_mask])
near_lake_zone = np.zeros_like(lons, dtype=bool)
nlons = lons.shape[0] * lons.shape[1]
near_lake_zone.shape = (nlons,)
for xi, yi, zi in zip(x, y, z):
dists, inds = ktree.query(np.array([[xi, yi, zi], ]), k=nlons, distance_upper_bound=dist_km * 1000)
near_lake_zone[inds[inds < nlons]] = True
near_lake_zone.shape = (lons.shape[0], lons.shape[1])
# Remove lake points from the mask
near_lake_zone &= ~lake_mask
return near_lake_zone
开发者ID:guziy,项目名称:RPN,代码行数:28,代码来源:lake_effect_snowfall_entry.py
示例12: get_kdtree
def get_kdtree(self, lons=None, lats=None, cache=True):
"""
:param lons:
:param lats:
:param cache: if True then reuse the kdtree
:return:
"""
if lons is None:
lons = self.lons
lats = self.lats
if lons is None:
raise Exception(
"The coordinates (lons and lats) are not yet set for the manager, please read some data first")
if cache and self.kdtree is not None:
return self.kdtree
xs, ys, zs = lat_lon.lon_lat_to_cartesian(lons.flatten(), lats.flatten())
kdtree = KDTree(list(zip(xs, ys, zs)))
if cache:
self.kdtree = kdtree
return kdtree
开发者ID:guziy,项目名称:RPN,代码行数:26,代码来源:data_manager.py
示例13: get_daily_clim_fields_aggregated_and_interpolated_to
def get_daily_clim_fields_aggregated_and_interpolated_to(self, start_year=None, end_year=None,
lons_target=None, lats_target=None, n_agg_x=1, n_agg_y=1):
"""
Aggregate fields to the desired resolution prior to interpolation
:param start_year:
:param end_year:
:param lons_target:
:param lats_target:
"""
# Return 365 fields
df = self.get_daily_climatology_fields(start_year=start_year, end_year=end_year)
assert isinstance(df, pd.Panel)
lons1d, lats1d = lons_target.flatten(), lats_target.flatten()
xt, yt, zt = lat_lon.lon_lat_to_cartesian(lons1d, lats1d)
dists, indices = self.kdtree.query(list(zip(xt, yt, zt)), k=n_agg_x * n_agg_y)
clim_fields = [
df.loc[:, day, :].values.flatten()[indices].mean(axis=1).reshape(lons_target.shape) for day in df.major_axis
]
clim_fields = np.asarray(clim_fields)
clim_fields = np.ma.masked_where(np.isnan(clim_fields), clim_fields)
return df.major_axis, clim_fields
开发者ID:guziy,项目名称:RPN,代码行数:25,代码来源:base_data_manager.py
示例14: interpolate_data_to
def interpolate_data_to(self, data_in, lons2d, lats2d, nneighbours=4):
"""
Interpolates data_in to the grid defined by (lons2d, lats2d)
assuming that the data_in field is on the initial CRU grid
interpolate using 4 nearest neighbors and inverse of squared distance
"""
x_out, y_out, z_out = lat_lon.lon_lat_to_cartesian(lons2d.flatten(), lats2d.flatten())
dst, ind = self.kdtree.query(list(zip(x_out, y_out, z_out)), k=nneighbours)
data_in_flat = data_in.flatten()
inverse_square = 1.0 / dst ** 2
if len(dst.shape) > 1:
norm = np.sum(inverse_square, axis=1)
norm = np.array([norm] * dst.shape[1]).transpose()
coefs = inverse_square / norm
data_out_flat = np.sum(coefs * data_in_flat[ind], axis=1)
elif len(dst.shape) == 1:
data_out_flat = data_in_flat[ind]
else:
raise Exception("Could not find neighbor points")
return np.reshape(data_out_flat, lons2d.shape)
开发者ID:guziy,项目名称:RPN,代码行数:25,代码来源:temperature.py
示例15: _init_fields
def _init_fields(self, nc_dataset):
nc_vars = nc_dataset.variables
lons = nc_vars["lon"][:]
lats = nc_vars["lat"][:]
if lons.ndim == 1:
lats2d, lons2d = np.meshgrid(lats, lons)
elif lons.ndim == 2:
lats2d, lons2d = lats, lons
else:
raise NotImplementedError("Cannot handle {}-dimensional coordinates".format(lons.ndim))
self.lons2d, self.lats2d = lons2d, lats2d
self.times_var = nc_vars["time"]
self.times_num = nc_vars["time"][:]
if hasattr(self.times_var, "calendar"):
self.times = num2date(self.times_num, self.times_var.units, self.times_var.calendar)
else:
self.times = num2date(self.times_num, self.times_var.units)
if not self.lazy:
self.var_data = nc_vars[self.var_name][:]
if nc_vars[self.var_name].shape[1:] != self.lons2d.shape:
print("nc_vars[self.var_name].shape = {}".format(nc_vars[self.var_name].shape))
self.var_data = np.transpose(self.var_data, axes=[0, 2, 1])
x_in, y_in, z_in = lat_lon.lon_lat_to_cartesian(self.lons2d.flatten(), self.lats2d.flatten())
self.kdtree = cKDTree(list(zip(x_in, y_in, z_in)))
开发者ID:guziy,项目名称:RPN,代码行数:35,代码来源:temperature.py
示例16: get_flat_index
def get_flat_index(lon, lat, the_kd_tree ):
"""
:type the_kd_tree: KDTree
"""
x0,y0,z0 = lat_lon.lon_lat_to_cartesian(lon, lat)
d, i = the_kd_tree.query((x0,y0,z0))
return i
开发者ID:guziy,项目名称:RPN,代码行数:7,代码来源:plot_soiltemp_cross.py
示例17: __init__
def __init__(self, name_to_date_to_field, basemap, lons2d, lats2d,
ax = None, cell_area = None, cell_manager = None, data_manager = None):
self.gwdi_mean_field = None
self.traf_mean_field = None
self.tdra_mean_field = None
self.upin_mean_field = None
self.basemap = basemap
self.date_to_stfl_field = name_to_date_to_field["STFL"]
self.date_to_traf_field = name_to_date_to_field["TRAF"]
self.date_to_tdra_field = name_to_date_to_field["TDRA"]
self.date_to_pr_field = name_to_date_to_field["PR"]
self.date_to_swe_field = name_to_date_to_field["I5"]
self.date_to_swst_field = name_to_date_to_field["SWST"]
#self.date_to_imav_field = name_to_date_to_field["IMAV"]
self.acc_area_km2 = name_to_date_to_field["FACC"]
#:type : CellManager
self.cell_manager = cell_manager
assert isinstance(self.cell_manager, CellManager)
self.cell_area = cell_area
x, y, z = lat_lon.lon_lat_to_cartesian(lons2d.flatten(), lats2d.flatten())
self.kdtree = KDTree(list(zip(x,y,z)))
ax.figure.canvas.mpl_connect("button_press_event", self)
self.ax = ax
self.lons2d = lons2d
self.lats2d = lats2d
self.data_manager = data_manager
assert isinstance(self.data_manager, Crcm5ModelDataManager)
self.x_pr, self.y_pr = basemap(lons2d, lats2d)
self.lons_flat = lons2d.flatten()
self.lats_flat = lats2d.flatten()
self.dates_sorted = list( sorted(list(name_to_date_to_field.items())[0][1].keys()) )
self.counter = 0
self.date_to_swsr_field = name_to_date_to_field["SWSR"]
self.date_to_swsl_field = name_to_date_to_field["SWSL"]
#self.date_to_gwdi_field = name_to_date_to_field["GWDI"]
self.date_to_upin_field = name_to_date_to_field["UPIN"]
#static fields
self.slope = name_to_date_to_field["SLOP"]
self.channel_length = name_to_date_to_field["LENG"]
self.lake_outlet = name_to_date_to_field["LKOU"]
self.coef_bf = -np.ones(self.slope.shape)
good_points = self.slope >= 0
self.coef_bf[good_points] = (self.slope[good_points]) ** 0.5 / ((self.channel_length[good_points]) ** (4.0/3.0) * data_manager.manning_bf[good_points] )
开发者ID:guziy,项目名称:RPN,代码行数:59,代码来源:timeseries_plotter.py
示例18: get_lake_model_points_for_stations
def get_lake_model_points_for_stations(self, station_list, lake_fraction=None,
nneighbours=8):
"""
For lake levels we have a bit different search algorithm since accumulation area is not a very sensible param to compare
:return {station: list of corresponding model points}
:param station_list:
:param lake_fraction:
:param drainaige_area_reldiff_limit:
:param nneighbours:
:return: :raise Exception:
"""
station_to_model_point_list = {}
nx, ny = self.lons2d.shape
i1d, j1d = list(range(nx)), list(range(ny))
j2d, i2d = np.meshgrid(j1d, i1d)
i_flat, j_flat = i2d.flatten(), j2d.flatten()
for s in station_list:
mp_list = []
assert isinstance(s, Station)
x, y, z = lat_lon.lon_lat_to_cartesian(s.longitude, s.latitude)
dists, inds = self.kdtree.query((x, y, z), k=nneighbours)
if nneighbours == 1:
dists = [dists]
inds = [inds]
for d, i in zip(dists, inds):
ix = i_flat[i]
jy = j_flat[i]
mp = ModelPoint(ix=ix, jy=jy)
mp.longitude = self.lons2d[ix, jy]
mp.latitude = self.lats2d[ix, jy]
mp.distance_to_station = d
if lake_fraction is not None:
if lake_fraction[ix, jy] <= 0.001: # skip the model point if almost no lakes inisde
continue
mp.lake_fraction = lake_fraction[ix, jy]
mp_list.append(mp)
if lake_fraction is not None:
lf = 0.0
for mp in mp_list:
lf += mp.lake_fraction
if lf <= 0.001:
continue
station_to_model_point_list[s] = mp_list
print("Found model point for the station {0}".format(s))
return station_to_model_point_list
开发者ID:guziy,项目名称:RPN,代码行数:58,代码来源:cell_manager.py
示例19: __init__
def __init__(self, flow_dirs, nx=None, ny=None,
lons2d=None,
lats2d=None,
accumulation_area_km2=None):
self.cells = []
self.lons2d = lons2d
self.lats2d = lats2d
self.flow_directions = flow_dirs
self.accumulation_area_km2 = accumulation_area_km2
# calculate characteristic distance
if not any([None is arr for arr in [self.lats2d, self.lons2d]]):
v1 = lat_lon.lon_lat_to_cartesian(self.lons2d[0, 0], self.lats2d[0, 0])
v2 = lat_lon.lon_lat_to_cartesian(self.lons2d[1, 1], self.lats2d[1, 1])
dv = np.array(v2) - np.array(v1)
self.characteristic_distance = np.sqrt(np.dot(dv, dv))
x, y, z = lat_lon.lon_lat_to_cartesian(self.lons2d.flatten(), self.lats2d.flatten())
self.kdtree = cKDTree(list(zip(x, y, z)))
if None not in [nx, ny]:
self.nx = nx
self.ny = ny
else:
nx, ny = flow_dirs.shape
self.nx, self.ny = flow_dirs.shape
for i in range(nx):
self.cells.append(list([Cell(i=i, j=j, flow_dir_value=flow_dirs[i, j]) for j in range(ny)]))
self._without_next_mask = np.zeros((nx, ny), dtype=np.int)
self._wo_next_wo_prev_mask = np.zeros((nx, ny), dtype=np.int) # mask of the potential outlets
for i in range(nx):
if i % 100 == 0:
print("Created {}/{}".format(i, nx))
for j in range(ny):
i_next, j_next = direction_and_value.to_indices(i, j, flow_dirs[i][j])
next_cell = None
if 0 <= i_next < nx:
if 0 <= j_next < ny:
next_cell = self.cells[i_next][j_next]
self._without_next_mask[i, j] = int(next_cell is None)
self.cells[i][j].set_next(next_cell)
开发者ID:guziy,项目名称:RPN,代码行数:45,代码来源:cell_manager.py
示例20: get_kd_tree
def get_kd_tree(self):
"""
:rtype : KDTree
for interpolation purposes
"""
if self._kdtree is None:
x, y, z = lat_lon.lon_lat_to_cartesian(self.lons.flatten(), self.lats.flatten() )
self._kdtree = KDTree(zip(x,y,z))
return self._kdtree
开发者ID:guziy,项目名称:PlotWatrouteData,代码行数:9,代码来源:map_parameters.py
注:本文中的util.geo.lat_lon.lon_lat_to_cartesian函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论