本文整理汇总了Python中mdtraj.iterload函数的典型用法代码示例。如果您正苦于以下问题:Python iterload函数的具体用法?Python iterload怎么用?Python iterload使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了iterload函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: iterload
def iterload(self, i, chunk):
if self.verbose:
print('[MDTraj dataset] iterloading %s' % self.filename(i))
if self._topology is None:
return md.iterload(
self.filename(i), chunk=chunk, stride=self.stride,
atom_indices=self.atom_indices)
else:
return md.iterload(
self.filename(i), chunk=chunk, stride=self.stride,
atom_indices=self.atom_indices, top=self._topology)
开发者ID:Eigenstate,项目名称:msmbuilder,代码行数:12,代码来源:dataset.py
示例2: bead_tetrahedrality
def bead_tetrahedrality(fn_traj, fn_top, fn_save, ibead, len_chunk=100, select_A ='name O', select_B='name O'):
Qs = np.array([])
t0 = time.time()
print('Processing bead %d...' % ibead)
print('')
top = md.load(fn_top).topology
trj = md.iterload(fn_traj, top=top, chunk=len_chunk)
# Prepare index pairs
idx_A = top.select(select_A)
idx_B = top.select(select_B)
n_A = len(idx_A)
n_B = len(idx_B)
pairs = []
for iB in idx_B:
for iA in idx_A:
pairs.append((iB, iA))
pairs = np.array(pairs, dtype=int)
i_frame = 0
for chunk in trj:
neighbors = extract_neighbors(chunk, pairs, 4, n_A, n_B)
for i in range(len_chunk):
# Iteration over chunk is necessary because neighbors
# are not fixed over the trajectory.
Qs = np.append(Qs, extract_Q(chunk[i], neighbors[i], idx_A))
i_frame += len_chunk
np.savetxt(fn_save, Qs)
t1 = time.time()
print('Processing bead %d took %.2f minutes.' % (ibead, (t1-t0)/60.0))
print('')
开发者ID:JNapoli,项目名称:scripts,代码行数:33,代码来源:tetra_indexed.py
示例3: test_iterload_skip
def test_iterload_skip():
files = [
"frame0.nc",
"frame0.h5",
"frame0.xtc",
"frame0.trr",
"frame0.dcd",
"frame0.binpos",
"frame0.xyz",
"frame0.lammpstrj",
]
if not (on_win and on_py3):
files.append("legacy_msmbuilder_trj0.lh5")
err_msg = "failed for file %s with chunksize %i and skip %i"
for file in files:
for cs in [0, 1, 11, 100]:
for skip in [0, 1, 20, 101]:
print("testing file %s with skip=%i" % (file, skip))
t_ref = md.load(get_fn(file), top=get_fn("native.pdb"))
t = functools.reduce(
lambda a, b: a.join(b), md.iterload(get_fn(file), skip=skip, top=get_fn("native.pdb"), chunk=cs)
)
eq(t_ref.xyz[skip:], t.xyz, err_msg=err_msg % (file, cs, skip))
eq(t_ref.time[skip:], t.time, err_msg=err_msg % (file, cs, skip))
eq(t_ref.topology, t.topology, err_msg=err_msg % (file, cs, skip))
开发者ID:rafwiewiora,项目名称:mdtraj,代码行数:27,代码来源:test_trajectory.py
示例4: bin_covariance_multiple_coordinates_for_traj
def bin_covariance_multiple_coordinates_for_traj(trajfile,covar_by_bin,count_by_bin,
observable1,observable2,obs1_bin_avg,obs2_bin_avg,
binning_coord,bin_edges,topology,chunksize):
"""Loop over chunks of a trajectory to bin a set of observables along a 1D coordinate"""
## TODO test cases:
# - Two vector-valued observables
# - One single-valued obesrvable and one-vector-valued observable.
# - Two single-valued observables
# In order to save memory we loop over trajectories in chunks.
start_idx = 0
for trajchunk in md.iterload(trajfile,top=topology,chunk=chunksize):
# Calculate observable for trajectory chunk
obs1_temp = observable1(trajchunk)
obs2_temp = observable2(trajchunk)
chunk_size = trajchunk.n_frames
coord = binning_coord[start_idx:start_idx + chunk_size]
# Sort frames into bins along binning coordinate.
for n in range(bin_edges.shape[0]):
frames_in_this_bin = (coord >= bin_edges[n][0]) & (coord < bin_edges[n][1])
if frames_in_this_bin.any():
# Compute the covariance
delta_obs1 = obs1_temp[frames_in_this_bin] - obs1_bin_avg[n]
delta_obs2 = obs2_temp[frames_in_this_bin] - obs2_bin_avg[n]
# How should result be collected depending on the number of return values?
covar_by_bin[n,:,:] = np.dot(delta_obs1.T,delta_obs2)
count_by_bin[n] += float(sum(frames_in_this_bin))
start_idx += chunk_size
return covar_by_bin,count_by_bin
开发者ID:ajkluber,项目名称:simulation,代码行数:30,代码来源:util.py
示例5: run
def run(project, atom_indices=None, traj_fn = 'all'):
n_atoms = project.load_conf().n_atoms
if traj_fn.lower() == 'all':
SASA = np.ones((project.n_trajs, np.max(project.traj_lengths), n_atoms)) * -1
for traj_ind in xrange(project.n_trajs):
traj_asa = []
logger.info("Working on Trajectory %d", traj_ind)
traj_fn = project.traj_filename(traj_ind)
chunk_ind = 0
for traj_chunk in md.iterload(traj_fn, atom_indices=atom_indices, chunk=1000):
traj_asa.extend(md.shrake_rupley(traj_chunk))
chunk_ind += 1
SASA[traj_ind, 0:project.traj_lengths[traj_ind]] = traj_asa
else:
traj_asa = []
for traj_chunk in Trajectory.enum_chunks_from_lhdf( traj_fn, AtomIndices=atom_indices ):
traj_asa.extend( asa.calculate_asa( traj_chunk ) )
SASA = np.array(traj_asa)
return SASA
开发者ID:lilipeng,项目名称:msmbuilder,代码行数:26,代码来源:CalculateProjectSASA.py
示例6: load_Trajs
def load_Trajs(trajfiles_list, prmtop_file, stride, chunk):
"""
Iteratively loads a list of NetCDF files and returns them
as a list of mdtraj.Trajectory objects
Parameters
----------
trajfiles_list: list of str
List with the names of trajectory files
prmtop_file: str
Name of the prmtop file
stride: int
Frames to be used when loading the trajectories
chunk: int
Number of frames to load at once from disk per iteration.
If 0, load all.
Returns
-------
list_chunks: list
List of mdtraj.Trajectory objects, each of 'chunk' lenght
"""
list_chunks = []
for traj in trajfiles_list:
for frag in md.iterload(traj, chunk=chunk, top=prmtop_file,
stride=stride):
list_chunks.append(frag)
return(list_chunks)
开发者ID:jeiros,项目名称:Scripts,代码行数:28,代码来源:traj_loading.py
示例7: compute_rmsd
def compute_rmsd(fname, topname, sel="name CA", step=1):
rmsd = []
atom_indices = md.load(topname).topology.select(sel)
top = md.load(topname)
for chunk in md.iterload(fname, top=top, stride=step):
rmsd.append(md.rmsd(chunk, top, 0, atom_indices=atom_indices))
rmsd = np.concatenate(rmsd)
return rmsd
开发者ID:s-gordon,项目名称:MD-TAT,代码行数:8,代码来源:rmsd.py
示例8: test_md_join
def test_md_join():
t_ref = md.load(get_fn('frame0.h5'))[:20]
loaded = md.load(fn, top=t_ref, stride=2)
iterloaded = md.join(md.iterload(fn, top=t_ref, stride=2, chunk=6))
eq(loaded.xyz, iterloaded.xyz)
eq(loaded.time, iterloaded.time)
eq(loaded.unitcell_angles, iterloaded.unitcell_angles)
eq(loaded.unitcell_lengths, iterloaded.unitcell_lengths)
开发者ID:msultan,项目名称:mdtraj,代码行数:8,代码来源:test_trajectory.py
示例9: calc_coordinate_for_traj
def calc_coordinate_for_traj(trajfile,observable_fun,topology,chunksize):
"""Loop over chunks of a trajectory to calculate 1D observable"""
# In order to save memory we loop over trajectories in chunks.
obs_traj = []
for trajchunk in md.iterload(trajfile,top=topology,chunk=chunksize):
# Calculate observable for trajectory chunk
obs_traj.extend(observable_fun(trajchunk))
return np.array(obs_traj)
开发者ID:ajkluber,项目名称:simulation,代码行数:8,代码来源:util.py
示例10: test
def test():
for stride in [1, 2, 3]:
loaded = md.load(fn, top=t_ref, stride=stride)
iterloaded = functools.reduce(lambda a, b: a.join(b), md.iterload(fn, top=t_ref, stride=stride, chunk=6))
eq(loaded.xyz, iterloaded.xyz)
eq(loaded.time, iterloaded.time)
eq(loaded.unitcell_angles, iterloaded.unitcell_angles)
eq(loaded.unitcell_lengths, iterloaded.unitcell_lengths)
开发者ID:huynhqu1,项目名称:mdtraj,代码行数:8,代码来源:test_trajectory.py
示例11: test_chunk0_iterload
def test_chunk0_iterload():
filename = 'frame0.h5'
trj0 = md.load(get_fn(filename))
for trj in md.iterload(get_fn(filename), chunk=0):
pass
eq(trj0.n_frames, trj.n_frames)
开发者ID:huynhqu1,项目名称:mdtraj,代码行数:9,代码来源:test_trajectory.py
示例12: load_data
def load_data(self):
load_time_start = time.time()
data = []
for tfn in self.filenames:
kwargs = {} if tfn.endswith('h5') else {'top': self.top}
for t in md.iterload(tfn, chunk=self.args.split, **kwargs):
item = np.asarray(md.compute_dihedrals(t, self.indices), np.double)
data.append(item)
return data
开发者ID:gkiss,项目名称:mixtape,代码行数:9,代码来源:fitvmhmm.py
示例13: bin_observable
def bin_observable(trajfiles, observable, binning_coord, bin_edges, chunksize=10000):
"""Bin observable over trajectories
Parameters
----------
trajfiles : list
List of trajectory file names to process. Can be full path to file.
observable : object
A function that takes in an MDtraj trajectory object and returns a
number.
binning_coord : list
List of multiple timeseries, each timeseries is used a reaction
coordinate to histogram the frames of the corresponding trajectory.
bin_edges : np.ndarray (n_bins,2)
Edges of the bins used to histogram trajectory frames according
to values of binning_coord.
chunksize : int, opt.
Trajectories are processed in chunks. chunksize sets the number of
frames in a chunk. Default: 10000
Returns
-------
obs_bin_avg : np.ndarray (n_bins, observable.dimension)
Average of observable in each bin along binning reaction coordinate.
"""
assert len(binning_coord[0].shape) == 1
assert bin_edges.shape[1] == 2
obs_by_bin = np.zeros((bin_edges.shape[0],observable.dimension),float)
count_by_bin = np.zeros(bin_edges.shape[0],float)
for i in range(len(trajfiles)):
start_idx = 0
for trajchunk in mdtraj.iterload(trajfiles[i],top=observable.top,chunk=chunksize):
obs_temp = observable.map(trajchunk)
chunk_size = trajchunk.n_frames
coord = binning_coord[i][start_idx:start_idx + chunk_size]
# Assign frames in trajectory chunk to histogram bins.
for n in range(bin_edges.shape[0]):
frames_in_this_bin = (coord >= bin_edges[n][0]) & (coord < bin_edges[n][1])
if np.any(frames_in_this_bin):
obs_by_bin[n,:] += np.sum(obs_temp[frames_in_this_bin],axis=0)
count_by_bin[n] += float(sum(frames_in_this_bin))
# TODO: Break out of loop when all frames have been assigned.
# Count n_frames_assigned. Break when n_frames_assigned == chunk_size
start_idx += chunk_size
obs_bin_avg = np.zeros((bin_edges.shape[0],observable.dimension),float)
for n in range(bin_edges.shape[0]):
if count_by_bin[n] > 0:
obs_bin_avg[n,:] = obs_by_bin[n,:]/count_by_bin[n]
return obs_bin_avg
开发者ID:ajkluber,项目名称:simulation,代码行数:56,代码来源:observables.py
示例14: test_iterload
def test_iterload():
files = ['frame0.nc', 'frame0.h5', 'frame0.xtc', 'frame0.trr',
'frame0.dcd', 'frame0.binpos', 'legacy_msmbuilder_trj0.lh5']
chunk = 100
for stride in [1, 2, 5, 10]:
for file in files:
t_ref = md.load(get_fn(file), stride=stride, top=get_fn('native.pdb'))
t = functools.reduce(lambda a, b: a.join(b), md.iterload(get_fn(file), stride=stride, top=get_fn('native.pdb'), chunk=100))
eq(t_ref.xyz, t.xyz)
eq(t_ref.time, t.time)
eq(t_ref.topology, t.topology)
开发者ID:gabrielelanaro,项目名称:mdtraj,代码行数:11,代码来源:test_trajectory.py
示例15: _fluctuation_matrix
def _fluctuation_matrix(reference_frame, trajectories_path, atom_subset, topology, chunk, first_frame):
"""
This function computes the residual sum of squares of
the reference frame and all the corresponding atoms
in the provided frames
Input:
reference_frame:
numpy.array
array with the coordinates of reference frame/
average conformation/ native conformation
trajectories_path:
str
path of trajectories file of interest
atom_subset:
numpy.array
array with all the atom numbers corresponding to selection
topology:
mdtraj.core.topology.Topology
chunk:
int
number of frames to be loaded at a time.
Note that this value can be defined in the main
function.
number_frames:
int
total number of frames of trajectories
first_frame:
mdtraj.core.trajectory.Trajectory
trajectory of first frame
"""
residual_sum_squares = np.zeros((len(atom_subset)))
## now can compute the difference between the trajectory and its reference
## ri(t) - riref Using the mdtraj trajectory attribute xyz to extract
## the cartesian coordinates of trajectory and reference in a numpy array
## chunk.xyz.shape = (frames, atom, coordinate dimensions)
number_of_frames=0
trajectory_time=[]
for chunk_i in md.iterload(trajectories_path, chunk = chunk, top=topology, atom_indices = atom_subset):
trajectory_time.append(chunk_i.time)
for atom in range(len(atom_subset)):
diff = np.subtract(chunk_i.xyz[:, atom, :] * 10, reference_frame[atom])
residual_sum_squares[atom] = residual_sum_squares[atom] + ((diff ** 2).sum(axis = 1).sum(axis=0))
number_of_frames += chunk_i.xyz.shape[0]
## the result is a matrix with all fluctuations squared
## shape(number of frames * atom numbers, 3)
## from 0 to number of frames we have information of first atom
## then from number of frames to number of frames * 2 second atoms
## and so forth
return residual_sum_squares, number_of_frames, trajectory_time
开发者ID:gf712,项目名称:MDanalysis,代码行数:54,代码来源:RMSF.py
示例16: test_hashing
def test_hashing():
frames = [frame for frame in md.iterload(get_fn("frame0.xtc"), chunk=1, top=get_fn("native.pdb"))]
hashes = [hash(frame) for frame in frames]
# check all frames have a unique hash value
assert len(hashes) == len(set(hashes))
# change topology and ensure hash changes too
top = frames[0].topology
top.add_bond(top.atom(0), top.atom(1))
last_frame_hash = hash(frames[0])
assert last_frame_hash != hashes[-1]
开发者ID:rafwiewiora,项目名称:mdtraj,代码行数:12,代码来源:test_trajectory.py
示例17: regroup_DISK
def regroup_DISK(trajs, topology_file, disctrajs, path, stride=1):
"""Regroups MD trajectories into clusters according to discretised trajectories.
Parameters
----------
trajs : list of strings
xtc/dcd/... trajectory file names
topology_file : string
name of topology file that matches `trajs`
disctrajs : list of array-likes
discretized trajectories
path : string
file system path to directory where cluster trajectories are written
stride : int
stride of disctrajs with respect to the (original) trajs
Returns
-------
cluster : list of file names or `None`, len(cluster)=np.max(trajs)+1
each element cluster[i] is either `None` if i wasn't found in disctrajs or
is a the file name of a new trajectory that holds all frames that were
assigned to cluster i.
"""
# handle single element invocation
if not isinstance(trajs, list):
trajs = [trajs]
if not isinstance(disctrajs, list):
disctrajs = [disctrajs]
states = np.unique(np.hstack(([np.unique(disctraj) for disctraj in disctrajs])))
states = np.setdiff1d(states, [-1]) # exclude invalid states
writer = [None] * (max(states) + 1)
cluster = [None] * (max(states) + 1)
for i in states:
cluster[i] = path + os.sep + ('%d.xtc' % i)
writer[i] = XTCTrajectoryFile(cluster[i], 'w', force_overwrite=True)
for disctraj, traj in zip(disctrajs, trajs):
reader = md.iterload(traj, top=topology_file, stride=stride)
start = 0
for chunk in reader:
chunk_length = chunk.xyz.shape[0]
for i in xrange(chunk_length):
cl = disctraj[i + start]
if cl != -1:
writer[cl].write(chunk.xyz[i, :, :]) # np.newaxis?
start += chunk_length
# TODO: check that whole disctrajs was used
for i in states:
writer[i].close()
return cluster
开发者ID:ismaelresp,项目名称:PyEMMA,代码行数:53,代码来源:mapping.py
示例18: read_and_featurize
def read_and_featurize(traj_file, features_dir = None, condition=None, dihedral_types = ["phi", "psi", "chi1", "chi2"], dihedral_residues = None, resSeq_pairs = None, iterative = True):
a = time.time()
dihedral_indices = []
residue_order = []
if len(dihedral_residues) > 0:
for dihedral_type in dihedral_types:
if dihedral_type == "phi": dihedral_indices.append(phi_indices(fix_topology(top), dihedral_residues))
if dihedral_type == "psi": dihedral_indices.append(psi_indices(fix_topology(top), dihedral_residues))
if dihedral_type == "chi1": dihedral_indices.append(chi1_indices(fix_topology(top), dihedral_residues))
if dihedral_type == "chi2": dihedral_indices.append(chi2_indices(fix_topology(top), dihedral_residues))
#print("new features has dim %d" %(2*len(phi_tuples) + 2*len(psi_tuples) + 2*len(chi2_tuples)))
#print("feauturizing manually:")
dihedral_angles = []
for dihedral_type in dihedral_indices:
angles = np.transpose(ManualDihedral.compute_dihedrals(traj=traj,indices=dihedral_type))
dihedral_angles.append(np.sin(angles))
dihedral_angles.append(np.cos(angles))
manual_features = np.transpose(np.concatenate(dihedral_angles))
if len(resSeq_pairs) > 0:
top = md.load_frame(traj_file, index=0).topology
resIndex_pairs = convert_resSeq_to_resIndex(top, resSeq_pairs)
contact_features = []
if iterative:
try:
for chunk in md.iterload(traj_file, chunk = 1000):
# chunk = fix_traj(chunk)
#chunk = md.load(traj_file,stride=1000)
#print(resIndex_pairs[0:10])
chunk_features = md.compute_contacts(chunk, contacts = resIndex_pairs, scheme = 'closest-heavy', ignore_nonprotein=False)[0]
print(np.shape(chunk_features))
contact_features.append(chunk_features)
contact_features = np.concatenate(contact_features)
except Exception,e:
print str(e)
print("Failed")
return
#traj = md.load(traj_file)
#contact_features = md.compute_contacts(chunk, contacts = contact_residue_pairs, scheme = 'closest-heavy', ignore_nonprotein=False)[0]
else:
try:
traj = md.load(traj_file)
contact_features = md.compute_contacts(traj, contacts = resIndex_pairs, scheme = 'closest-heavy', ignore_nonprotein=False)[0]
except Exception,e:
print str(e)
print("Failed for traj")
return
开发者ID:msultan,项目名称:conformation,代码行数:52,代码来源:custom_featurizer.py
示例19: _load_traj_xyz
def _load_traj_xyz(md_topology, trajectory, atom_subset, verbose, chunk, stride):
"""
Returns xyz coordinates of all requested trajectories
"""
# first create a list with all the paths that are needed
try:
trajectory_path = os.listdir(trajectory)
except:
sys.exit('Make sure you have provided a string for a valid path to a trajectory file!')
else:
if verbose > 0:
print 'Loading trajectories from the following files: '
for trajectory_i in trajectory_path:
print trajectory_i
# get first frame for superpositioning
first_frame = md.load(trajectory + trajectory_path[0], frame=0, top=md_topology, atom_indices=atom_subset)
# initiate some variables
all_coordinates = []
number_of_frames = 0
sim_time = []
# now we need to load each trajectory file as a chunk
try:
for file_i in trajectory_path:
for chunk_i in md.iterload(trajectory + file_i, chunk, top=md_topology, atom_indices = atom_subset, stride = stride):
sim_time.append(chunk_i.time)
# superpose each chunk to first frame
chunk_i.superpose(first_frame, 0)
if verbose > 1:
print 'Successfully loaded trajectory: \n %s' %(chunk_i)
all_coordinates.append(chunk_i.xyz.reshape(chunk_i.n_frames, chunk_i.n_atoms * 3))
all_coordinates_np = np.concatenate(all_coordinates)
except:
sys.exit('Make sure you provided a valid path to a folder with trajectory files!')
else:
print '\nSuccesfully loaded coordinates for %s atoms from %s out of %s frames!' %(all_coordinates_np.shape[1] / 3, all_coordinates_np.shape[0], all_coordinates_np.shape[0] * stride)
sim_time = np.concatenate(sim_time)
return all_coordinates_np, sim_time
开发者ID:jeiros,项目名称:MDanalysis,代码行数:51,代码来源:Divergence_PCA.py
示例20: itertrajs
def itertrajs(meta, stride=1):
"""Load one mdtraj trajectory at a time and yield it.
MDTraj does striding badly. It reads in the whole trajectory and
then performs a stride. We join(iterload) to conserve memory.
"""
tops = preload_tops(meta)
for i, row in meta.iterrows():
yield i, md.join(md.iterload(row['traj_fn'],
top=tops[row['top_fn']],
stride=stride),
discard_overlapping_frames=False,
check_topology=False)
开发者ID:dr-nate,项目名称:msmbuilder,代码行数:14,代码来源:io.py
注:本文中的mdtraj.iterload函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论