本文整理汇总了Python中mne.SourceEstimate类的典型用法代码示例。如果您正苦于以下问题:Python SourceEstimate类的具体用法?Python SourceEstimate怎么用?Python SourceEstimate使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SourceEstimate类的19个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_volume_stc
def test_volume_stc():
"""Test reading and writing volume STCs
"""
N = 100
data = np.arange(N)[:, np.newaxis]
datas = [data, data, np.arange(2)[:, np.newaxis]]
vertno = np.arange(N)
vertnos = [vertno, vertno[:, np.newaxis], np.arange(2)[:, np.newaxis]]
vertno_reads = [vertno, vertno, np.arange(2)]
for data, vertno, vertno_read in zip(datas, vertnos, vertno_reads):
stc = SourceEstimate(data, vertno, 0, 1)
assert_true(stc.is_surface() is False)
fname_temp = op.join(tempdir, 'temp-vl.stc')
stc_new = stc
for _ in xrange(2):
stc_new.save(fname_temp)
stc_new = read_source_estimate(fname_temp)
assert_true(stc_new.is_surface() is False)
assert_array_equal(vertno_read, stc_new.vertno)
assert_array_almost_equal(stc.data, stc_new.data)
# now let's actually read a MNE-C processed file
stc = read_source_estimate(fname_vol, 'sample')
assert_true('sample' in repr(stc))
stc_new = stc
assert_raises(ValueError, stc.save, fname_vol, ftype='whatever')
for _ in xrange(2):
fname_temp = op.join(tempdir, 'temp-vol.w')
stc_new.save(fname_temp, ftype='w')
stc_new = read_source_estimate(fname_temp)
assert_true(stc_new.is_surface() is False)
assert_array_equal(stc.vertno, stc_new.vertno)
assert_array_almost_equal(stc.data, stc_new.data)
开发者ID:ashwinashok9111993,项目名称:mne-python,代码行数:32,代码来源:test_source_estimate.py
示例2: test_stc_mpl
def test_stc_mpl():
"""Test plotting source estimates with matplotlib."""
sample_src = read_source_spaces(src_fname)
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.ones((n_verts * n_time))
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')
with pytest.warns(RuntimeWarning, match='not included'):
stc.plot(subjects_dir=subjects_dir, time_unit='s', views='ven',
hemi='rh', smoothing_steps=2, subject='sample',
backend='matplotlib', spacing='oct1', initial_time=0.001,
colormap='Reds')
fig = stc.plot(subjects_dir=subjects_dir, time_unit='ms', views='dor',
hemi='lh', smoothing_steps=2, subject='sample',
backend='matplotlib', spacing='ico2', time_viewer=True,
colormap='mne')
time_viewer = fig.time_viewer
_fake_click(time_viewer, time_viewer.axes[0], (0.5, 0.5)) # change t
time_viewer.canvas.key_press_event('ctrl+right')
time_viewer.canvas.key_press_event('left')
pytest.raises(ValueError, stc.plot, subjects_dir=subjects_dir,
hemi='both', subject='sample', backend='matplotlib')
pytest.raises(ValueError, stc.plot, subjects_dir=subjects_dir,
time_unit='ss', subject='sample', backend='matplotlib')
plt.close('all')
开发者ID:kambysese,项目名称:mne-python,代码行数:28,代码来源:test_3d.py
示例3: test_stc_arithmetic
def test_stc_arithmetic():
"""Test arithmetic for STC files
"""
fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg')
stc = SourceEstimate(fname)
data = stc.data.copy()
out = list()
for a in [data, stc]:
a = a + a * 3 + 3 * a - a ** 2 / 2
a += a
a -= a
a /= 2 * a
a *= -a
a += 2
a -= 1
a *= -1
a /= 2
a **= 3
out.append(a)
assert_array_equal(out[0], out[1].data)
assert_array_equal(stc.sqrt().data, np.sqrt(stc.data))
开发者ID:sudo-nim,项目名称:mne-python,代码行数:25,代码来源:test_source_estimate.py
示例4: test_transform_data
def test_transform_data():
"""Test applying linear (time) transform to data"""
# make up some data
n_sensors, n_vertices, n_times = 10, 20, 4
kernel = np.random.randn(n_vertices, n_sensors)
sens_data = np.random.randn(n_sensors, n_times)
vertices = np.arange(n_vertices)
data = np.dot(kernel, sens_data)
for idx, tmin_idx, tmax_idx in\
zip([None, np.arange(n_vertices / 2, n_vertices)],
[None, 1], [None, 3]):
if idx is None:
idx_use = slice(None, None)
else:
idx_use = idx
data_f, _ = _my_trans(data[idx_use, tmin_idx:tmax_idx])
for stc_data in (data, (kernel, sens_data)):
stc = SourceEstimate(stc_data, vertices=vertices,
tmin=0., tstep=1.)
stc_data_t = stc.transform_data(_my_trans, idx=idx,
tmin_idx=tmin_idx,
tmax_idx=tmax_idx)
assert_allclose(data_f, stc_data_t)
开发者ID:ashwinashok9111993,项目名称:mne-python,代码行数:28,代码来源:test_source_estimate.py
示例5: test_transform
def test_transform():
"""Test applying linear (time) transform to data."""
# make up some data
n_verts_lh, n_verts_rh, n_times = 10, 10, 10
vertices = [np.arange(n_verts_lh), n_verts_lh + np.arange(n_verts_rh)]
data = rng.randn(n_verts_lh + n_verts_rh, n_times)
stc = SourceEstimate(data, vertices=vertices, tmin=-0.1, tstep=0.1)
# data_t.ndim > 2 & copy is True
stcs_t = stc.transform(_my_trans, copy=True)
assert (isinstance(stcs_t, list))
assert_array_equal(stc.times, stcs_t[0].times)
assert_equal(stc.vertices, stcs_t[0].vertices)
data = np.concatenate((stcs_t[0].data[:, :, None],
stcs_t[1].data[:, :, None]), axis=2)
data_t = stc.transform_data(_my_trans)
assert_array_equal(data, data_t) # check against stc.transform_data()
# data_t.ndim > 2 & copy is False
pytest.raises(ValueError, stc.transform, _my_trans, copy=False)
# data_t.ndim = 2 & copy is True
tmp = deepcopy(stc)
stc_t = stc.transform(np.abs, copy=True)
assert (isinstance(stc_t, SourceEstimate))
assert_array_equal(stc.data, tmp.data) # xfrm doesn't modify original?
# data_t.ndim = 2 & copy is False
times = np.round(1000 * stc.times)
verts = np.arange(len(stc.lh_vertno),
len(stc.lh_vertno) + len(stc.rh_vertno), 1)
verts_rh = stc.rh_vertno
tmin_idx = np.searchsorted(times, 0)
tmax_idx = np.searchsorted(times, 501) # Include 500ms in the range
data_t = stc.transform_data(np.abs, idx=verts, tmin_idx=tmin_idx,
tmax_idx=tmax_idx)
stc.transform(np.abs, idx=verts, tmin=-50, tmax=500, copy=False)
assert (isinstance(stc, SourceEstimate))
assert_equal(stc.tmin, 0.)
assert_equal(stc.times[-1], 0.5)
assert_equal(len(stc.vertices[0]), 0)
assert_equal(stc.vertices[1], verts_rh)
assert_array_equal(stc.data, data_t)
times = np.round(1000 * stc.times)
tmin_idx, tmax_idx = np.searchsorted(times, 0), np.searchsorted(times, 250)
data_t = stc.transform_data(np.abs, tmin_idx=tmin_idx, tmax_idx=tmax_idx)
stc.transform(np.abs, tmin=0, tmax=250, copy=False)
assert_equal(stc.tmin, 0.)
assert_equal(stc.times[-1], 0.2)
assert_array_equal(stc.data, data_t)
开发者ID:teonbrooks,项目名称:mne-python,代码行数:52,代码来源:test_source_estimate.py
示例6: test_transform
def test_transform():
"""Test applying linear (time) transform to data"""
# make up some data
n_verts_lh, n_verts_rh, n_times = 10, 10, 10
vertices = [np.arange(n_verts_lh), n_verts_lh + np.arange(n_verts_rh)]
data = np.random.randn(n_verts_lh + n_verts_rh, n_times)
stc = SourceEstimate(data, vertices=vertices, tmin=-0.1, tstep=0.1)
# data_t.ndim > 2 & copy is True
stcs_t = stc.transform(_my_trans, copy=True)
assert_true(isinstance(stcs_t, list))
assert_array_equal(stc.times, stcs_t[0].times)
assert_equal(stc.vertno, stcs_t[0].vertno)
data = np.concatenate((stcs_t[0].data[:, :, None],
stcs_t[1].data[:, :, None]), axis=2)
data_t = stc.transform_data(_my_trans)
assert_array_equal(data, data_t) # check against stc.transform_data()
# data_t.ndim > 2 & copy is False
assert_raises(ValueError, stc.transform, _my_trans, copy=False)
# data_t.ndim = 2 & copy is True
tmp = deepcopy(stc)
stc_t = stc.transform(np.abs, copy=True)
assert_true(isinstance(stc_t, SourceEstimate))
assert_array_equal(stc.data, tmp.data) # xfrm doesn't modify original?
# data_t.ndim = 2 & copy is False
times = np.round(1000 * stc.times)
verts = np.arange(len(stc.lh_vertno),
len(stc.lh_vertno) + len(stc.rh_vertno), 1)
verts_rh = stc.rh_vertno
t_idx = [np.where(times >= -50)[0][0], np.where(times <= 500)[0][-1]]
data_t = stc.transform_data(np.abs, idx=verts, tmin_idx=t_idx[0],
tmax_idx=t_idx[-1])
stc.transform(np.abs, idx=verts, tmin=-50, tmax=500, copy=False)
assert_true(isinstance(stc, SourceEstimate))
assert_true((stc.tmin == 0.) & (stc.times[-1] == 0.5))
assert_true(len(stc.vertno[0]) == 0)
assert_equal(stc.vertno[1], verts_rh)
assert_array_equal(stc.data, data_t)
times = np.round(1000 * stc.times)
t_idx = [np.where(times >= 0)[0][0], np.where(times <= 250)[0][-1]]
data_t = stc.transform_data(np.abs, tmin_idx=t_idx[0], tmax_idx=t_idx[-1])
stc.transform(np.abs, tmin=0, tmax=250, copy=False)
assert_true((stc.tmin == 0.) & (stc.times[-1] == 0.2))
assert_array_equal(stc.data, data_t)
开发者ID:lengyelgabor,项目名称:mne-python,代码行数:49,代码来源:test_source_estimate.py
示例7: test_io_w
def test_io_w():
"""Test IO for w files
"""
w_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis-meg-oct-6-fwd-sensmap')
src = SourceEstimate(w_fname)
src.save('tmp', ftype='w')
src2 = SourceEstimate('tmp-lh.w')
assert_array_almost_equal(src.data, src2.data)
assert_array_almost_equal(src.lh_vertno, src2.lh_vertno)
assert_array_almost_equal(src.rh_vertno, src2.rh_vertno)
开发者ID:sudo-nim,项目名称:mne-python,代码行数:15,代码来源:test_source_estimate.py
示例8: test_limits_to_control_points
def test_limits_to_control_points():
"""Test functionality for determing control points
"""
sample_src = read_source_spaces(src_fname)
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.random.rand((n_verts * n_time))
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')
# Test for simple use cases
from mayavi import mlab
stc.plot(subjects_dir=subjects_dir)
stc.plot(clim=dict(pos_lims=(10, 50, 90)), subjects_dir=subjects_dir)
stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99,
subjects_dir=subjects_dir)
stc.plot(colormap='hot', clim='auto', subjects_dir=subjects_dir)
stc.plot(colormap='mne', clim='auto', subjects_dir=subjects_dir)
figs = [mlab.figure(), mlab.figure()]
assert_raises(RuntimeError, stc.plot, clim='auto', figure=figs)
# Test both types of incorrect limits key (lims/pos_lims)
assert_raises(KeyError, plot_source_estimates, stc, colormap='mne',
clim=dict(kind='value', lims=(5, 10, 15)))
assert_raises(KeyError, plot_source_estimates, stc, colormap='hot',
clim=dict(kind='value', pos_lims=(5, 10, 15)))
# Test for correct clim values
colormap = 'mne'
assert_raises(ValueError, stc.plot, colormap=colormap,
clim=dict(pos_lims=(5, 10, 15, 20)))
assert_raises(ValueError, stc.plot, colormap=colormap,
clim=dict(pos_lims=(5, 10, 15), kind='foo'))
assert_raises(ValueError, stc.plot, colormap=colormap, clim='foo')
assert_raises(ValueError, stc.plot, colormap=colormap, clim=(5, 10, 15))
assert_raises(ValueError, plot_source_estimates, 'foo', clim='auto')
assert_raises(ValueError, stc.plot, hemi='foo', clim='auto')
# Test that stc.data contains enough unique values to use percentages
clim = 'auto'
stc._data = np.zeros_like(stc.data)
assert_raises(ValueError, plot_source_estimates, stc,
colormap=colormap, clim=clim)
mlab.close()
开发者ID:davidmeunier79,项目名称:mne-python,代码行数:46,代码来源:test_3d.py
示例9: test_morphed_source_space_return
def test_morphed_source_space_return():
"""Test returning a morphed source space to the original subject"""
# let's create some random data on fsaverage
data = rng.randn(20484, 1)
tmin, tstep = 0, 1.
src_fs = read_source_spaces(fname_fs)
stc_fs = SourceEstimate(data, [s['vertno'] for s in src_fs],
tmin, tstep, 'fsaverage')
# Create our morph source space
src_morph = morph_source_spaces(src_fs, 'sample',
subjects_dir=subjects_dir)
# Morph the data over using standard methods
stc_morph = stc_fs.morph('sample', [s['vertno'] for s in src_morph],
smooth=1, subjects_dir=subjects_dir)
# We can now pretend like this was real data we got e.g. from an inverse.
# To be complete, let's remove some vertices
keeps = [np.sort(rng.permutation(np.arange(len(v)))[:len(v) - 10])
for v in stc_morph.vertices]
stc_morph = SourceEstimate(
np.concatenate([stc_morph.lh_data[keeps[0]],
stc_morph.rh_data[keeps[1]]]),
[v[k] for v, k in zip(stc_morph.vertices, keeps)], tmin, tstep,
'sample')
# Return it to the original subject
stc_morph_return = stc_morph.to_original_src(
src_fs, subjects_dir=subjects_dir)
# Compare to the original data
stc_morph_morph = stc_morph.morph('fsaverage', stc_morph_return.vertices,
smooth=1,
subjects_dir=subjects_dir)
assert_equal(stc_morph_return.subject, stc_morph_morph.subject)
for ii in range(2):
assert_array_equal(stc_morph_return.vertices[ii],
stc_morph_morph.vertices[ii])
# These will not match perfectly because morphing pushes data around
corr = np.corrcoef(stc_morph_return.data[:, 0],
stc_morph_morph.data[:, 0])[0, 1]
assert_true(corr > 0.99, corr)
# Degenerate cases
stc_morph.subject = None # no .subject provided
assert_raises(ValueError, stc_morph.to_original_src,
src_fs, subject_orig='fsaverage', subjects_dir=subjects_dir)
stc_morph.subject = 'sample'
del src_fs[0]['subject_his_id'] # no name in src_fsaverage
assert_raises(ValueError, stc_morph.to_original_src,
src_fs, subjects_dir=subjects_dir)
src_fs[0]['subject_his_id'] = 'fsaverage' # name mismatch
assert_raises(ValueError, stc_morph.to_original_src,
src_fs, subject_orig='foo', subjects_dir=subjects_dir)
src_fs[0]['subject_his_id'] = 'sample'
src = read_source_spaces(fname) # wrong source space
assert_raises(RuntimeError, stc_morph.to_original_src,
src, subjects_dir=subjects_dir)
开发者ID:MartinBaBer,项目名称:mne-python,代码行数:59,代码来源:test_source_space.py
示例10: test_limits_to_control_points
def test_limits_to_control_points():
"""Test functionality for determing control points."""
sample_src = read_source_spaces(src_fname)
kwargs = dict(subjects_dir=subjects_dir, smoothing_steps=1)
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.random.RandomState(0).rand((n_verts * n_time))
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')
# Test for simple use cases
mlab = _import_mlab()
stc.plot(**kwargs)
stc.plot(clim=dict(pos_lims=(10, 50, 90)), **kwargs)
stc.plot(colormap='hot', clim='auto', **kwargs)
stc.plot(colormap='mne', clim='auto', **kwargs)
figs = [mlab.figure(), mlab.figure()]
stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99, **kwargs)
assert_raises(ValueError, stc.plot, clim='auto', figure=figs, **kwargs)
# Test both types of incorrect limits key (lims/pos_lims)
assert_raises(KeyError, plot_source_estimates, stc, colormap='mne',
clim=dict(kind='value', lims=(5, 10, 15)), **kwargs)
assert_raises(KeyError, plot_source_estimates, stc, colormap='hot',
clim=dict(kind='value', pos_lims=(5, 10, 15)), **kwargs)
# Test for correct clim values
assert_raises(ValueError, stc.plot,
clim=dict(kind='value', pos_lims=[0, 1, 0]), **kwargs)
assert_raises(ValueError, stc.plot, colormap='mne',
clim=dict(pos_lims=(5, 10, 15, 20)), **kwargs)
assert_raises(ValueError, stc.plot,
clim=dict(pos_lims=(5, 10, 15), kind='foo'), **kwargs)
assert_raises(ValueError, stc.plot, colormap='mne', clim='foo', **kwargs)
assert_raises(ValueError, stc.plot, clim=(5, 10, 15), **kwargs)
assert_raises(ValueError, plot_source_estimates, 'foo', clim='auto',
**kwargs)
assert_raises(ValueError, stc.plot, hemi='foo', clim='auto', **kwargs)
# Test handling of degenerate data
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
# thresholded maps
stc._data.fill(0.)
plot_source_estimates(stc, **kwargs)
assert any('All data were zero' in str(ww.message) for ww in w)
mlab.close(all=True)
开发者ID:claire-braboszcz,项目名称:mne-python,代码行数:49,代码来源:test_3d.py
示例11: test_morph_data
def test_morph_data():
"""Test morphing of data
"""
subject_from = 'sample'
subject_to = 'fsaverage'
fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg')
stc_from = SourceEstimate(fname)
stc_from.crop(0.09, 0.1) # for faster computation
stc_to = morph_data(subject_from, subject_to, stc_from,
grade=3, smooth=12, buffer_size=1000)
stc_to.save('%s_audvis-meg' % subject_to)
stc_to2 = morph_data(subject_from, subject_to, stc_from,
grade=3, smooth=12, buffer_size=3)
assert_array_almost_equal(stc_to.data, stc_to2.data)
mean_from = stc_from.data.mean(axis=0)
mean_to = stc_to.data.mean(axis=0)
assert_true(np.corrcoef(mean_to, mean_from).min() > 0.999)
开发者ID:sudo-nim,项目名称:mne-python,代码行数:19,代码来源:test_source_estimate.py
示例12: test_limits_to_control_points
def test_limits_to_control_points():
"""Test functionality for determing control points
"""
sample_src = read_source_spaces(src_fname)
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.zeros((n_verts * n_time))
stc_data[(np.random.rand(20) * n_verts * n_time).astype(int)] = 1
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1)
# Test both types of incorrect limits key (lims/pos_lims)
clim = dict(kind='value', lims=(5, 10, 15))
colormap = 'mne_analyze'
assert_raises(KeyError, plot_source_estimates, stc, 'sample',
colormap=colormap, clim=clim)
clim = dict(kind='value', pos_lims=(5, 10, 15))
colormap = 'hot'
assert_raises(KeyError, plot_source_estimates, stc, 'sample',
colormap=colormap, clim=clim)
# Test for correct clim values
clim['pos_lims'] = (5, 10, 15, 20)
colormap = 'mne_analyze'
assert_raises(ValueError, plot_source_estimates, stc, 'sample',
colormap=colormap, clim=clim)
clim = 'foo'
assert_raises(ValueError, plot_source_estimates, stc, 'sample',
colormap=colormap, clim=clim)
clim = (5, 10, 15)
assert_raises(ValueError, plot_source_estimates, stc, 'sample',
colormap=colormap, clim=clim)
# Test that stc.data contains enough unique values to use percentages
clim = 'auto'
stc._data = np.zeros_like(stc.data)
assert_raises(ValueError, plot_source_estimates, stc, 'sample',
colormap=colormap, clim=clim)
开发者ID:ImmanuelSamuel,项目名称:mne-python,代码行数:41,代码来源:test_3d.py
示例13: test_morphed_source_space_return
def test_morphed_source_space_return():
"""Test returning a morphed source space to the original subject."""
# let's create some random data on fsaverage
data = rng.randn(20484, 1)
tmin, tstep = 0, 1.
src_fs = read_source_spaces(fname_fs)
stc_fs = SourceEstimate(data, [s['vertno'] for s in src_fs],
tmin, tstep, 'fsaverage')
n_verts_fs = sum(len(s['vertno']) for s in src_fs)
# Create our morph source space
src_morph = morph_source_spaces(src_fs, 'sample',
subjects_dir=subjects_dir)
n_verts_sample = sum(len(s['vertno']) for s in src_morph)
assert n_verts_fs == n_verts_sample
# Morph the data over using standard methods
stc_morph = compute_source_morph(
src_fs, 'fsaverage', 'sample',
spacing=[s['vertno'] for s in src_morph], smooth=1,
subjects_dir=subjects_dir, warn=False).apply(stc_fs)
assert stc_morph.data.shape[0] == n_verts_sample
# We can now pretend like this was real data we got e.g. from an inverse.
# To be complete, let's remove some vertices
keeps = [np.sort(rng.permutation(np.arange(len(v)))[:len(v) - 10])
for v in stc_morph.vertices]
stc_morph = SourceEstimate(
np.concatenate([stc_morph.lh_data[keeps[0]],
stc_morph.rh_data[keeps[1]]]),
[v[k] for v, k in zip(stc_morph.vertices, keeps)], tmin, tstep,
'sample')
# Return it to the original subject
stc_morph_return = stc_morph.to_original_src(
src_fs, subjects_dir=subjects_dir)
# This should fail (has too many verts in SourceMorph)
with pytest.warns(RuntimeWarning, match='vertices not included'):
morph = compute_source_morph(
src_morph, subject_from='sample',
spacing=stc_morph_return.vertices, smooth=1,
subjects_dir=subjects_dir)
with pytest.raises(ValueError, match='vertices do not match'):
morph.apply(stc_morph)
# Compare to the original data
with pytest.warns(RuntimeWarning, match='vertices not included'):
stc_morph_morph = compute_source_morph(
src=stc_morph, subject_from='sample',
spacing=stc_morph_return.vertices, smooth=1,
subjects_dir=subjects_dir).apply(stc_morph)
assert_equal(stc_morph_return.subject, stc_morph_morph.subject)
for ii in range(2):
assert_array_equal(stc_morph_return.vertices[ii],
stc_morph_morph.vertices[ii])
# These will not match perfectly because morphing pushes data around
corr = np.corrcoef(stc_morph_return.data[:, 0],
stc_morph_morph.data[:, 0])[0, 1]
assert corr > 0.99, corr
# Explicitly test having two vertices map to the same target vertex. We
# simulate this by having two vertices be at the same position.
src_fs2 = src_fs.copy()
vert1, vert2 = src_fs2[0]['vertno'][:2]
src_fs2[0]['rr'][vert1] = src_fs2[0]['rr'][vert2]
stc_morph_return = stc_morph.to_original_src(
src_fs2, subjects_dir=subjects_dir)
# test to_original_src method result equality
for ii in range(2):
assert_array_equal(stc_morph_return.vertices[ii],
stc_morph_morph.vertices[ii])
# These will not match perfectly because morphing pushes data around
corr = np.corrcoef(stc_morph_return.data[:, 0],
stc_morph_morph.data[:, 0])[0, 1]
assert corr > 0.99, corr
# Degenerate cases
stc_morph.subject = None # no .subject provided
pytest.raises(ValueError, stc_morph.to_original_src,
src_fs, subject_orig='fsaverage', subjects_dir=subjects_dir)
stc_morph.subject = 'sample'
del src_fs[0]['subject_his_id'] # no name in src_fsaverage
pytest.raises(ValueError, stc_morph.to_original_src,
src_fs, subjects_dir=subjects_dir)
src_fs[0]['subject_his_id'] = 'fsaverage' # name mismatch
pytest.raises(ValueError, stc_morph.to_original_src,
src_fs, subject_orig='foo', subjects_dir=subjects_dir)
src_fs[0]['subject_his_id'] = 'sample'
src = read_source_spaces(fname) # wrong source space
pytest.raises(RuntimeError, stc_morph.to_original_src,
src, subjects_dir=subjects_dir)
开发者ID:palday,项目名称:mne-python,代码行数:95,代码来源:test_source_space.py
示例14: len
data_summary = np.zeros((n_vertices_fsave, len(good_cluster_inds) + 1))
tstep = stc.tstep
for ii, cluster_ind in enumerate(good_cluster_inds):
data.fill(0)
v_inds = clusters[cluster_ind][1]
t_inds = clusters[cluster_ind][0]
data[v_inds, t_inds] = T_obs[t_inds, v_inds]
# Store a nice visualization of the cluster by summing across time (in ms)
data = np.sign(data) * np.logical_not(data == 0) * tstep
data_summary[:, ii + 1] = 1e3 * np.sum(data, axis=1)
# Make the first "time point" a sum across all clusters for easy
# visualization
data_summary[:, 0] = np.sum(data_summary, axis=1)
fsave_vertices = [np.arange(10242), np.arange(10242)]
stc_all_cluster_vis = SourceEstimate(data_summary, fsave_vertices, tmin=0,
tstep=1e-3, subject='fsaverage')
# Let's actually plot the first "time point" in the SourceEstimate, which
# shows all the clusters, weighted by duration
subjects_dir = op.join(data_path, 'subjects')
# blue blobs are for condition A != condition B
brains = stc_all_cluster_vis.plot('fsaverage', 'inflated', 'both',
subjects_dir=subjects_dir,
time_label='Duration significant (ms)',
fmin=0, fmid=25, fmax=50)
for idx, brain in enumerate(brains):
brain.set_data_time_index(0)
brain.scale_data_colormap(fmin=0, fmid=25, fmax=50, transparent=True)
brain.show_view('lateral')
brain.save_image('clusters-%s.png' % ('lh' if idx == 0 else 'rh'))
开发者ID:OliverWS,项目名称:mne-python,代码行数:31,代码来源:plot_cluster_stats_spatio_temporal_2samp.py
示例15: test_limits_to_control_points
def test_limits_to_control_points():
"""Test functionality for determining control points."""
sample_src = read_source_spaces(src_fname)
kwargs = dict(subjects_dir=subjects_dir, smoothing_steps=1)
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.random.RandomState(0).rand((n_verts * n_time))
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')
# Test for simple use cases
mlab = _import_mlab()
stc.plot(**kwargs)
stc.plot(clim=dict(pos_lims=(10, 50, 90)), **kwargs)
stc.plot(colormap='hot', clim='auto', **kwargs)
stc.plot(colormap='mne', clim='auto', **kwargs)
figs = [mlab.figure(), mlab.figure()]
stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99, **kwargs)
pytest.raises(ValueError, stc.plot, clim='auto', figure=figs, **kwargs)
# Test for correct clim values
with pytest.raises(ValueError, match='monotonically'):
stc.plot(clim=dict(kind='value', pos_lims=[0, 1, 0]), **kwargs)
with pytest.raises(ValueError, match=r'.*must be \(3,\)'):
stc.plot(colormap='mne', clim=dict(pos_lims=(5, 10, 15, 20)), **kwargs)
with pytest.raises(ValueError, match='must be "value" or "percent"'):
stc.plot(clim=dict(pos_lims=(5, 10, 15), kind='foo'), **kwargs)
with pytest.raises(ValueError, match='must be "auto" or dict'):
stc.plot(colormap='mne', clim='foo', **kwargs)
with pytest.raises(TypeError, match='must be an instance of'):
plot_source_estimates('foo', clim='auto', **kwargs)
with pytest.raises(ValueError, match='hemi'):
stc.plot(hemi='foo', clim='auto', **kwargs)
with pytest.raises(ValueError, match='Exactly one'):
stc.plot(clim=dict(lims=[0, 1, 2], pos_lims=[0, 1, 2], kind='value'),
**kwargs)
# Test handling of degenerate data: thresholded maps
stc._data.fill(0.)
with pytest.warns(RuntimeWarning, match='All data were zero'):
plot_source_estimates(stc, **kwargs)
mlab.close(all=True)
开发者ID:kambysese,项目名称:mne-python,代码行数:44,代码来源:test_3d.py
示例16: test_limits_to_control_points
def test_limits_to_control_points():
"""Test functionality for determing control points
"""
sample_src = read_source_spaces(src_fname)
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.random.RandomState(0).rand((n_verts * n_time))
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')
# Test for simple use cases
from mayavi import mlab
stc.plot(subjects_dir=subjects_dir)
stc.plot(clim=dict(pos_lims=(10, 50, 90)), subjects_dir=subjects_dir)
stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99,
subjects_dir=subjects_dir)
stc.plot(colormap='hot', clim='auto', subjects_dir=subjects_dir)
stc.plot(colormap='mne', clim='auto', subjects_dir=subjects_dir)
figs = [mlab.figure(), mlab.figure()]
assert_raises(RuntimeError, stc.plot, clim='auto', figure=figs,
subjects_dir=subjects_dir)
# Test both types of incorrect limits key (lims/pos_lims)
assert_raises(KeyError, plot_source_estimates, stc, colormap='mne',
clim=dict(kind='value', lims=(5, 10, 15)),
subjects_dir=subjects_dir)
assert_raises(KeyError, plot_source_estimates, stc, colormap='hot',
clim=dict(kind='value', pos_lims=(5, 10, 15)),
subjects_dir=subjects_dir)
# Test for correct clim values
assert_raises(ValueError, stc.plot,
clim=dict(kind='value', pos_lims=[0, 1, 0]),
subjects_dir=subjects_dir)
assert_raises(ValueError, stc.plot, colormap='mne',
clim=dict(pos_lims=(5, 10, 15, 20)),
subjects_dir=subjects_dir)
assert_raises(ValueError, stc.plot,
clim=dict(pos_lims=(5, 10, 15), kind='foo'),
subjects_dir=subjects_dir)
assert_raises(ValueError, stc.plot, colormap='mne', clim='foo',
subjects_dir=subjects_dir)
assert_raises(ValueError, stc.plot, clim=(5, 10, 15),
subjects_dir=subjects_dir)
assert_raises(ValueError, plot_source_estimates, 'foo', clim='auto',
subjects_dir=subjects_dir)
assert_raises(ValueError, stc.plot, hemi='foo', clim='auto',
subjects_dir=subjects_dir)
# Test handling of degenerate data
stc.plot(clim=dict(kind='value', lims=[0, 0, 1]),
subjects_dir=subjects_dir) # ok
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
# thresholded maps
stc._data.fill(1.)
plot_source_estimates(stc, subjects_dir=subjects_dir)
assert_equal(len(w), 0)
stc._data[0].fill(0.)
plot_source_estimates(stc, subjects_dir=subjects_dir)
assert_equal(len(w), 0)
stc._data.fill(0.)
plot_source_estimates(stc, subjects_dir=subjects_dir)
assert_equal(len(w), 1)
mlab.close()
开发者ID:The3DWizard,项目名称:mne-python,代码行数:67,代码来源:test_3d.py
示例17: read_source_spaces
fname_raw = op.join(data_dir, 'raw_fif', '%s_funloc_raw.fif' % subj)
fname_erm = op.join(data_dir, 'raw_fif', '%s_erm_raw.fif' % subj)
trans = op.join(data_dir, 'trans', '%s-trans.fif' % subj)
bem = op.join(bem_dir, '%s-5120-5120-5120-bem-sol.fif' % subject)
src = read_source_spaces(op.join(bem_dir, '%s-oct-6-src.fif' % subject))
sfreq = read_info(fname_raw, verbose=False)['sfreq']
# ############################################################################
# construct appropriate brain activity
print('Constructing original (simulated) sources')
tmin, tmax = -0.2, 0.8
vertices = [s['vertno'] for s in src]
n_vertices = sum(s['nuse'] for s in src)
data = np.ones((n_vertices, int((tmax - tmin) * sfreq)))
stc = SourceEstimate(data, vertices, -0.2, 1. / sfreq, subject)
# limit activation to a square pulse in time at two vertices in space
labels = [read_labels_from_annot(subject, 'aparc.a2009s', hemi,
regexp='G_temp_sup-G_T_transv')[0]
for hi, hemi in enumerate(('lh', 'rh'))]
stc = stc.in_label(labels[0] + labels[1])
stc.data.fill(0)
stc.data[:, np.where(np.logical_and(stc.times >= pulse_tmin,
stc.times <= pulse_tmax))[0]] = 10e-9
# ############################################################################
# Simulate data
# Simulate data with movement
with warnings.catch_warnings(record=True):
开发者ID:staulu,项目名称:mnefun,代码行数:31,代码来源:plot_simulation.py
示例18: plot_visualize_mft_sources
def plot_visualize_mft_sources(fwdmag, stcdata, tmin, tstep,
subject, subjects_dir):
'''
Plot the MFT sources at time point of peak.
'''
print "##### Attempting to plot:"
# cf. decoding/plot_decoding_spatio_temporal_source.py
vertices = [s['vertno'] for s in fwdmag['src']]
if len(vertices) == 1:
vertices = [fwdmag['src'][0]['vertno'][fwdmag['src'][0]['rr'][fwdmag['src'][0]['vertno']][:, 0] <= -0.],
fwdmag['src'][0]['vertno'][fwdmag['src'][0]['rr'][fwdmag['src'][0]['vertno']][:, 0] > -0.]]
stc_feat = SourceEstimate(stcdata, vertices=vertices,
tmin=-0.2, tstep=tstep, subject=subject)
for hemi in ['lh', 'rh']:
brain = stc_feat.plot(surface='white', hemi=hemi, subjects_dir=subjects_dir,
transparent=True, clim='auto')
brain.show_view('lateral')
# use peak getter to move visualization to the time point of the peak
tmin = 0.095
tmax = 0.10
print "Restricting peak search to [%fs, %fs]" % (tmin, tmax)
if hemi == 'both':
vertno_max, time_idx = stc_feat.get_peak(hemi='rh', time_as_index=True,
tmin=tmin, tmax=tmax)
else:
vertno_max, time_idx = stc_feat.get_peak(hemi=hemi, time_as_index=True,
tmin=tmin, tmax=tmax)
if hemi == 'lh':
comax = fwdmag['src'][0]['rr'][vertno_max]
print "hemi=%s: vertno_max=%d, time_idx=%d fwdmag['src'][0]['rr'][vertno_max] = " %\
(hemi, vertno_max, time_idx), comax
elif len(fwdmag['src']) > 1:
comax = fwdmag['src'][1]['rr'][vertno_max]
print "hemi=%s: vertno_max=%d, time_idx=%d fwdmag['src'][1]['rr'][vertno_max] = " %\
(hemi, vertno_max, time_idx), comax
print "hemi=%s: setting time_idx=%d" % (hemi, time_idx)
brain.set_data_time_index(time_idx)
# draw marker at maximum peaking vertex
brain.add_foci(vertno_max, coords_as_verts=True, hemi=hemi, color='blue',
scale_factor=0.6)
offsets = np.append([0], [s['nuse'] for s in fwdmag['src']])
if hemi == 'lh':
ifoci = [np.nonzero([stcdata[0:offsets[1],time_idx]>=0.25*np.max(stcdata[:,time_idx])][0])]
vfoci = fwdmag['src'][0]['vertno'][ifoci[0][0]]
cfoci = fwdmag['src'][0]['rr'][vfoci]
print "Coords of %d sel. vfoci: " % cfoci.shape[0]
print cfoci
print "vfoci: "
print vfoci
print "brain.geo['lh'].coords[vfoci] : "
print brain.geo['lh'].coords[vfoci]
elif len(fwdmag['src']) > 1:
ifoci = [np.nonzero([stcdata[offsets[1]:,time_idx]>=0.25*np.max(stcdata[:,time_idx])][0])]
vfoci = fwdmag['src'][1]['vertno'][ifoci[0][0]]
cfoci = fwdmag['src'][1]['rr'][vfoci]
print "Coords of %d sel. vfoci: " % cfoci.shape[0]
print cfoci
print "vfoci: "
print vfoci
print "brain.geo['rh'].coords[vfoci] : "
print brain.geo['rh'].coords[vfoci]
mrfoci = np.zeros(cfoci.shape)
invmri_head_t = invert_transform(fwdmag['info']['mri_head_t'])
mrfoci = apply_trans(invmri_head_t['trans'],cfoci, move=True)
print "mrfoci: "
print mrfoci
# Just some blops:
bloblist = np.zeros((300,3))
for i in xrange(100):
bloblist[i,0] = float(i)
bloblist[i+100,1] = float(i)
bloblist[i+200,2] = float(i)
mrblobs = apply_trans(invmri_head_t['trans'], bloblist, move=True)
brain.save_image('testfig_map_%s.png' % hemi)
brain.close()
开发者ID:d-van-de-velden,项目名称:jumeg,代码行数:79,代码来源:jumeg_mft_plot.py
示例19: len
# cluster becomes a "time point" in the SourceEstimate
data = np.zeros((n_vertices_fsave, n_times))
data_summary = np.zeros((n_vertices_fsave, len(good_cluster_inds) + 1))
for ii, cluster_ind in enumerate(good_cluster_inds):
data.fill(0)
v_inds = clusters[cluster_ind][1]
t_inds = clusters[cluster_ind][0]
data[v_inds, t_inds] = T_obs[t_inds, v_inds]
# Store a nice visualization of the cluster by summing across time (in ms)
data = np.sign(data) * np.logical_not(data == 0) * tstep
data_summary[:, ii + 1] = 1e3 * np.sum(data, axis=1)
# Make the first "time point" a sum across all clusters for easy
# visualization
data_summary[:, 0] = np.sum(data_summary, axis=1)
stc_all_cluster_vis = SourceEstimate(data_summary, fsave_vertices, tmin=0,
tstep=1e-3)
# Let's actually plot the first "time point" in the SourceEstimate, which
# shows all the clusters, weighted by duration
colormap = mne_analyze_colormap(limits=[0, 10, 50])
subjects_dir = op.join(data_path, 'subjects')
# blue blobs are for condition A < condition B, red for A > B
brain = stc_all_cluster_vis.plot('fsaverage', 'inflated', 'rh', colormap,
subjects_dir=subjects_dir,
time_label='Duration significant (ms)')
brain.set_data_time_index(0)
# The colormap requires brain data to be scaled -fmax -> fmax
brain.scale_data_colormap(fmin=-50, fmid=0, fmax=50, transparent=False)
brain.show_view('lateral')
brain.save_image('clusters.png')
开发者ID:mshamalainen,项目名称:mne-python,代码行数:31,代码来源:plot_cluster_stats_spatio_temporal.py
|
请发表评论