本文整理汇总了Python中mne.set_log_level函数的典型用法代码示例。如果您正苦于以下问题:Python set_log_level函数的具体用法?Python set_log_level怎么用?Python set_log_level使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了set_log_level函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: interpolate_bads
def interpolate_bads(inst, picks, dots=None, reset_bads=True, mode='accurate'):
"""Interpolate bad MEG and EEG channels."""
import mne
# to prevent cobyla printf error
# XXX putting to critical for now unless better solution
# emerges
verbose = mne.set_log_level('CRITICAL', return_old_level=True)
eeg_picks = set(pick_types(inst.info, meg=False, eeg=True, exclude=[]))
eeg_picks_interp = [p for p in picks if p in eeg_picks]
if len(eeg_picks_interp) > 0:
_interpolate_bads_eeg(inst, picks=eeg_picks_interp)
meg_picks = set(pick_types(inst.info, meg=True, eeg=False, exclude=[]))
meg_picks_interp = [p for p in picks if p in meg_picks]
if len(meg_picks_interp) > 0:
_interpolate_bads_meg_fast(inst, picks=meg_picks_interp,
dots=dots, mode=mode)
if reset_bads is True:
inst.info['bads'] = []
mne.set_log_level(verbose)
return inst
开发者ID:autoreject,项目名称:autoreject,代码行数:25,代码来源:utils.py
示例2: write_mnefiff
def write_mnefiff(data, filename):
"""Export data to MNE using FIFF format.
Parameters
----------
data : instance of ChanTime
data with only one trial
filename : path to file
file to export to (include '.mat')
Notes
-----
It cannot store data larger than 2 GB.
The data is assumed to have only EEG electrodes.
It overwrites a file if it exists.
"""
from mne import create_info, set_log_level
from mne.io import RawArray
set_log_level(WARNING)
TRIAL = 0
info = create_info(list(data.axis['chan'][TRIAL]), data.s_freq, ['eeg', ] *
data.number_of('chan')[TRIAL])
UNITS = 1e-6 # mne wants data in uV
fiff = RawArray(data.data[0] * UNITS, info)
if data.attr['chan']:
fiff.set_channel_positions(data.attr['chan'].return_xyz(),
data.attr['chan'].return_label())
fiff.save(filename, overwrite=True)
开发者ID:gpiantoni,项目名称:phypno,代码行数:33,代码来源:mnefiff.py
示例3: run
def run():
"""Run command."""
from mne.commands.utils import get_optparser
parser = get_optparser(__file__)
parser.add_option('--input', dest='input_fname',
help='Input data file name', metavar='filename')
parser.add_option('--mrk', dest='mrk_fname',
help='MEG Marker file name', metavar='filename')
parser.add_option('--elp', dest='elp_fname',
help='Headshape points file name', metavar='filename')
parser.add_option('--hsp', dest='hsp_fname',
help='Headshape file name', metavar='filename')
parser.add_option('--stim', dest='stim',
help='Colon Separated Stimulus Trigger Channels',
metavar='chs')
parser.add_option('--slope', dest='slope', help='Slope direction',
metavar='slope')
parser.add_option('--stimthresh', dest='stimthresh', default=1,
help='Threshold value for trigger channels',
metavar='value')
parser.add_option('--output', dest='out_fname',
help='Name of the resulting fiff file',
metavar='filename')
parser.add_option('--debug', dest='debug', action='store_true',
default=False,
help='Set logging level for terminal output to debug')
options, args = parser.parse_args()
if options.debug:
mne.set_log_level('debug')
input_fname = options.input_fname
if input_fname is None:
with ETSContext():
mne.gui.kit2fiff()
sys.exit(0)
hsp_fname = options.hsp_fname
elp_fname = options.elp_fname
mrk_fname = options.mrk_fname
stim = options.stim
slope = options.slope
stimthresh = options.stimthresh
out_fname = options.out_fname
if isinstance(stim, str):
stim = map(int, stim.split(':'))
raw = read_raw_kit(input_fname=input_fname, mrk=mrk_fname, elp=elp_fname,
hsp=hsp_fname, stim=stim, slope=slope,
stimthresh=stimthresh)
raw.save(out_fname)
raw.close()
sys.exit(0)
开发者ID:Eric89GXL,项目名称:mne-python,代码行数:58,代码来源:mne_kit2fiff.py
示例4: test_morphing
def test_morphing():
mne.set_log_level('warning')
sss = datasets._mne_source_space('fsaverage', 'ico-4', subjects_dir)
vertices_to = [sss[0]['vertno'], sss[1]['vertno']]
ds = datasets.get_mne_sample(-0.1, 0.1, src='ico', sub='index==0', stc=True)
stc = ds['stc', 0]
morph_mat = mne.compute_morph_matrix('sample', 'fsaverage', stc.vertno,
vertices_to, None, subjects_dir)
ndvar = ds['src']
morphed_ndvar = morph_source_space(ndvar, 'fsaverage')
morphed_stc = mne.morph_data_precomputed('sample', 'fsaverage', stc,
vertices_to, morph_mat)
assert_array_equal(morphed_ndvar.x[0], morphed_stc.data)
morphed_stc_ndvar = load.fiff.stc_ndvar([morphed_stc], 'fsaverage', 'ico-4',
subjects_dir, 'src', parc=None)
assert_dataobj_equal(morphed_ndvar, morphed_stc_ndvar)
开发者ID:awjamison,项目名称:Eelbrain,代码行数:17,代码来源:test_mne.py
示例5: interpolate_bads
def interpolate_bads(inst, reset_bads=True, mode='accurate'):
"""Interpolate bad MEG and EEG channels.
"""
import mne
from mne.channels.interpolation import _interpolate_bads_eeg
mne.set_log_level('WARNING') # to prevent cobyla printf error
if getattr(inst, 'preload', None) is False:
raise ValueError('Data must be preloaded.')
_interpolate_bads_eeg(inst)
_interpolate_bads_meg_fast(inst, mode=mode)
if reset_bads is True:
inst.info['bads'] = []
return inst
开发者ID:dengemann,项目名称:autoreject,代码行数:17,代码来源:utils.py
示例6: create_3Dmatrix
def create_3Dmatrix(epochs_dim, ch_type, input_filename, input_filename_trial, output_filename=None):
mne.set_log_level('WARNING')
raw = mne.fiff.Raw(input_filename)
datatrial = pickle.load(open(input_filename_trial))
trigger_times = datatrial['trigger_times']
trigger_decimal = datatrial['trigger_decimal']
coi = datatrial['coi']
print
print "Get the indexes of just the MEG channels"
picks = mne.fiff.pick_types(raw.info, meg=ch_type['meg'], eeg=ch_type['eeg'], stim=ch_type['stim'], exclude=ch_type['exclude']) #only meg channels
events = np.vstack([trigger_times, np.zeros(len(trigger_times), dtype=np.int), trigger_decimal]).T
print "Extracting Epochs for each condition for the contrast."
baseline = (None, 0) # means from the first instant to t = 0
reject = {}
tmin = epochs_dim[0]
tmax = epochs_dim[1]
epochs = mne.Epochs(raw, events, event_id=None, tmin=tmin, tmax=tmax, proj=True, picks=picks, baseline=baseline, preload=False, reject=reject)
X = epochs.get_data()
y = epochs.events[:,2]
label = np.zeros(len(y))
for i, yi in enumerate(y):
if np.sum(yi == coi[0]):
label[i] = 1
if output_filename == None:
filename_save = input_filename_trial.split('.')[0]+'_3D.pickle'
else:
filename_save = os.path.abspath(output_filename)
print "Saving to:", filename_save
pickle.dump({'X': X,
'y': label,
'tmin': tmin,
'sfreq': raw.info['sfreq']
}, open(filename_save, 'w'),
protocol = pickle.HIGHEST_PROTOCOL)
return filename_save
开发者ID:baothien,项目名称:tiensy,代码行数:44,代码来源:NILabMNELibrary.py
示例7: _fast_map_meg_channels
def _fast_map_meg_channels(info, pick_from, pick_to,
dots=None, mode='fast'):
from mne.io.pick import pick_info
from mne.forward._field_interpolation import _setup_dots
from mne.forward._field_interpolation import _compute_mapping_matrix
from mne.forward._make_forward import _create_meg_coils, _read_coil_defs
from mne.bem import _check_origin
miss = 1e-4 # Smoothing criterion for MEG
# XXX: hack to silence _compute_mapping_matrix
verbose = mne.get_config('MNE_LOGGING_LEVEL', 'INFO')
mne.set_log_level('WARNING')
info_from = pick_info(info, pick_from, copy=True)
templates = _read_coil_defs()
coils_from = _create_meg_coils(info_from['chs'], 'normal',
info_from['dev_head_t'], templates)
my_origin = _check_origin((0., 0., 0.04), info_from)
int_rad, noise, lut_fun, n_fact = _setup_dots(mode, coils_from, 'meg')
# This function needs a clean input. It hates the presence of other
# channels than MEG channels. Make sure all is picked.
if dots is None:
dots = self_dots, cross_dots = _compute_dots(info, mode=mode)
else:
self_dots, cross_dots = dots
self_dots, cross_dots = _pick_dots(dots, pick_from, pick_to)
ch_names = [c['ch_name'] for c in info_from['chs']]
fmd = dict(kind='meg', ch_names=ch_names,
origin=my_origin, noise=noise, self_dots=self_dots,
surface_dots=cross_dots, int_rad=int_rad, miss=miss)
fmd['data'] = _compute_mapping_matrix(fmd, info_from)
mne.set_log_level(verbose)
return fmd['data']
开发者ID:autoreject,项目名称:autoreject,代码行数:39,代码来源:utils.py
示例8: __init__
def __init__(self, subject, settings=dict()):
self.subject = subject
self.settings = settings
if 'debug' in settings:
configure_custom(settings['debug'])
else:
configure_custom(debug=True)
if 'mne_log_level' in settings:
mne.set_log_level(settings['mne_log_level'])
else:
mne.set_log_level('INFO')
if 'sfreq' in settings:
self.downsample_sfreq = settings['sfreq']
else:
self.downsample_sfreq = 64
if 'layout' in settings:
self.layout = settings['layout']
else:
self.layout = mne.channels.read_layout('biosemi.lay')
if 'data_root' in settings:
self.data_root = settings['data_root']
else:
self.data_root = os.path.join(deepthought.DATA_PATH, 'OpenMIIR')
# load stimuli metadata version
self.stimuli_version = get_stimuli_version(subject)
# initial state
self.raw = None
self.ica = None
self.filtered = False
self.downsampled = False
开发者ID:Qi0116,项目名称:deepthought,代码行数:38,代码来源:pipeline.py
示例9:
# of filenames for various things we'll be using.
import os.path as op
import numpy as np
from scipy.signal import welch, coherence
from mayavi import mlab
from matplotlib import pyplot as plt
import mne
from mne.simulation import simulate_raw
from mne.datasets import sample
from mne.minimum_norm import make_inverse_operator, apply_inverse
from mne.time_frequency import csd_morlet
from mne.beamformer import make_dics, apply_dics_csd
# Suppress irrelevant output
mne.set_log_level('ERROR')
# We use the MEG and MRI setup from the MNE-sample dataset
data_path = sample.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
mri_path = op.join(subjects_dir, 'sample')
# Filenames for various files we'll be using
meg_path = op.join(data_path, 'MEG', 'sample')
raw_fname = op.join(meg_path, 'sample_audvis_raw.fif')
trans_fname = op.join(meg_path, 'sample_audvis_raw-trans.fif')
src_fname = op.join(mri_path, 'bem/sample-oct-6-src.fif')
bem_fname = op.join(mri_path, 'bem/sample-5120-5120-5120-bem-sol.fif')
fwd_fname = op.join(meg_path, 'sample_audvis-meg-eeg-oct-6-fwd.fif')
cov_fname = op.join(meg_path, 'sample_audvis-cov.fif')
开发者ID:jdammers,项目名称:mne-python,代码行数:30,代码来源:plot_dics.py
示例10: plot_group_fourierICA
def plot_group_fourierICA(fn_groupICA_obj,
stim_id=1, stim_delay=0,
resp_id=None,
corr_event_picking=None,
global_scaling=True,
subjects_dir=None,
bar_plot=False):
"""
Interface to plot the results from group FourierICA
Parameters
----------
fn_groupICA_obj: filename of the group ICA object
stim_id: Id of the event of interest to be considered in
the stimulus channel. Only of interest if 'stim_name'
is set
default: event_id=1
stim_delay: stimulus delay in milliseconds
default: stim_delay=0
resp_id: Response IDs for correct event estimation. Note:
Must be in the order corresponding to the 'event_id'
default: resp_id=None
corr_event_picking: string
if set should contain the complete python path and
name of the function used to identify only the correct events
default: corr_event_picking=None
subjects_dir: string
If the subjects directory is not confirm with
the system variable 'SUBJECTS_DIR' parameter should be set
default: subjects_dir=None
bar_plot: boolean
If set the results of the time-frequency analysis
are shown as bar plot. This option is recommended
when FourierICA was applied to resting-state data
default: bar_plot=False
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
from jumeg.decompose.fourier_ica_plot import plot_results_src_space
from mne import set_log_level
from os.path import exists
from pickle import dump, load
# set log level to 'WARNING'
set_log_level('WARNING')
# ------------------------------------------
# read in group FourierICA object
# ------------------------------------------
with open(fn_groupICA_obj, "rb") as filehandler:
groupICA_obj = load(filehandler)
icasso_obj = groupICA_obj['icasso_obj']
win_length_sec = icasso_obj.tmax_win - icasso_obj.tmin_win
temp_profile_names = ["Event-ID %i" % i for i in groupICA_obj['icasso_obj'].event_id]
# ------------------------------------------
# check if time-courses already exist
# ------------------------------------------
fn_temporal_envelope = fn_groupICA_obj[:-4] + '_temporal_envelope.obj'
# generate time courses if they do not exist
if not exists(fn_temporal_envelope):
# import necessary modules
from jumeg.decompose.group_ica import get_group_fourierICA_time_courses
# generate time courses
temporal_envelope, src_loc, vert, sfreq = \
get_group_fourierICA_time_courses(groupICA_obj, event_id=stim_id,
stim_delay=stim_delay, resp_id=resp_id,
corr_event_picking=corr_event_picking,
unfiltered=False, baseline=(None, 0))
# save data
temp_env_obj = {'temporal_envelope': temporal_envelope,
'src_loc': src_loc, 'vert': vert, 'sfreq': sfreq}
with open(fn_temporal_envelope, "wb") as filehandler:
dump(temp_env_obj, filehandler)
# when data are stored read them in
else:
# read data in
with open(fn_temporal_envelope, "rb") as filehandler:
temp_env_obj = load(filehandler)
# get data
temporal_envelope = temp_env_obj['temporal_envelope']
src_loc = temp_env_obj['src_loc']
vert = temp_env_obj['vert']
# ------------------------------------------
# check if classification already exists
# ------------------------------------------
if groupICA_obj.has_key('classification') and\
groupICA_obj.has_key('mni_coords') and\
#.........这里部分代码省略.........
开发者ID:d-van-de-velden,项目名称:jumeg,代码行数:101,代码来源:group_ica.py
示例11:
parser.add_argument('--reject', help=help_reject, action='store_true')
parser.add_argument('--nharm', type=int, default=default_nharm,
choices=[0, 1, 2, 3, 4], help=help_nharm)
parser.add_argument('--epoch_start', type=float, default=None,
help=help_epoch_start)
parser.add_argument('--epoch_end', type=float, default=None,
help=help_epoch_end)
parser.add_argument('--plot_snr', help=help_plot_snr, action='store_true')
parser.add_argument('--stim_channel', help=help_stim_channel, type=str,
default=None)
parser.add_argument('--stim_mask', type=int, default=None,
help=help_sti_mask)
args = parser.parse_args()
mne.set_log_level('ERROR') # reduce mne output
if args.epochs_file:
fnbase = os.path.basename(os.path.splitext(args.epochs_file)[0])
else:
fnbase = os.path.basename(os.path.splitext(args.snr_file)[0])
verbose = False
""" Load cHPI SNR file. It is typically not maxfiltered, so ignore
MaxShield warnings. """
raw_chpi = mne.io.Raw(args.snr_file, allow_maxshield='yes',
verbose=verbose)
picks = mne.pick_types(raw_chpi.info, meg=True)
""" If using a separate file for the actual data epochs, load it. """
if args.epochs_file:
开发者ID:BioMag,项目名称:meg_scripts,代码行数:31,代码来源:chpi_weighted_average.py
示例12: group_fourierICA_src_space
#.........这里部分代码省略.........
is generated automatically.
default: fnout=None
verbose: bool, str, int, or None
If not None, override default verbose level
(see mne.verbose).
default: verbose=True
Return
------
groupICA_obj: dictionary
Group ICA information stored in a dictionary. The dictionary
has following keys:
'fn_list': List of filenames which where used to estimate the
group ICA
'W_orig': estimated de-mixing matrix
'A_orig': estimated mixing matrix
'quality': quality index of the clustering between
components belonging to one cluster
(between 0 and 1; 1 refers to small clusters,
i.e., components in one cluster have a highly similar)
'icasso_obj': ICASSO object. For further information
please have a look into the ICASSO routine
'fourier_ica_obj': FourierICA object. For further information
please have a look into the FourierICA routine
fnout: string
filename where the 'groupICA_obj' is stored
"""
# ------------------------------------------
# import necessary modules
# ------------------------------------------
from jumeg.decompose.icasso import JuMEG_icasso
from mne import set_log_level
import numpy as np
from os.path import dirname, join
from pickle import dump
# set log level to 'WARNING'
set_log_level('WARNING')
# ------------------------------------------
# check input parameter
# ------------------------------------------
# filenames
if isinstance(fname_raw, list):
fn_list = fname_raw
else:
fn_list = [fname_raw]
# -------------------------------------------
# set some path parameter
# -------------------------------------------
fn_inv = []
for fn_raw in fn_list:
fn_inv.append(fn_raw[:fn_raw.rfind('-raw.fif')] + inv_pattern)
# ------------------------------------------
# apply FourierICA combined with ICASSO
# ------------------------------------------
icasso_obj = JuMEG_icasso(nrep=nrep, fn_inv=fn_inv,
src_loc_method=src_loc_method,
morph2fsaverage=True,
开发者ID:d-van-de-velden,项目名称:jumeg,代码行数:67,代码来源:group_ica.py
示例13:
==============================
Generate simulated evoked data
==============================
compare the regression results between my method and MNE, using the wrapped version
"""
import numpy as np
import matplotlib.pyplot as plt
import numpy.linalg as la
import mne
from mne.viz import plot_evoked, plot_sparse_source_estimates
from mne.simulation import generate_stc
import matplotlib.pyplot as plt
from mne.inverse_sparse.mxne_optim import _Phi, _PhiT
mne.set_log_level('warning')
import os,sys,inspect
# laptop
#os.chdir('/home/yingyang/Dropbox/MEG_source_loc_proj/stft_tree_group_lasso/')
# desktop
os.chdir('/home/ying/Dropbox/MEG_source_loc_proj/STFT_R_git_repo/')
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir+"/Simulation")
sys.path.insert(0,parentdir + "/Spline_Regression")
sys.path.insert(0,parentdir + "/MNE_stft")
sys.path.insert(0,currentdir)
from mne.inverse_sparse.mxne_optim import *
from Simulation_real_scale import *
开发者ID:YingYang,项目名称:STFT_R_git_repo,代码行数:31,代码来源:Simulation_real_scale_trial_by_trial_regression_comp.py
示例14: import
from utils import get_data
from config import (
data_path,
pyoutput_path,
subjects,
paths('report'),
contrasts,
open_browser,
chan_types,
)
report, run_id, _, logger = setup_provenance(script=__file__,
results_dir=paths('report'))
mne.set_log_level('INFO')
# force separation of magnetometers and gradiometers
if 'meg' in [i['name'] for i in chan_types]:
chan_types = [dict(name='mag'), dict(name='grad')] + \
[dict(name=i['name']) for i in chan_types
if i['name'] != 'meg']
for subject in subjects:
# Extract events from mat file
meg_fname = op.join(data_path, subject, 'preprocessed', subject + '_preprocessed')
bhv_fname = op.join(data_path, subject, 'behavior', subject + '_fixed.mat')
epochs, events = get_data(meg_fname, bhv_fname)
# Apply each contrast
all_epochs = [[]] * len(contrasts)
开发者ID:SherazKhan,项目名称:Paris_orientation-decoding,代码行数:31,代码来源:run_evoked_contrast.py
示例15: fit
def fit(self, fn_raw, stim_name=None, event_id=1,
tmin_stim=0.0, tmax_stim=1.0, flow=4.0, fhigh=34.0,
pca_dim=0.90, max_iter=10000, conv_eps=1e-16,
verbose=True):
"""
Perform ICASSO estimation. ICASSO is based on running ICA
multiple times with slightly different conditions and
clustering the obtained components. Note, here FourierICA
is applied
Parameters
----------
fn_raw: filename of the input data (expect fif-file).
stim_name: name of the stimulus channel. Note, for
applying FourierCIA data are chopped around stimulus
onset. If not set data are chopped in overlapping
windows
default: stim_names=None
event_id: Id of the event of interest to be considered in
the stimulus channel. Only of interest if 'stim_name'
is set
default: event_id=1
tmin_stim: time of interest prior to stimulus onset.
Important for generating epochs to apply FourierICA
default = 0.0
tmax_stim: time of interest after stimulus onset.
Important for generating epochs to apply FourierICA
default = 1.0
flow: lower frequency border for estimating the optimal
de-mixing matrix using FourierICA
default: flow=4.0
fhigh: upper frequency border for estimating the optimal
de-mixing matrix using FourierICA
default: fhigh=34.0
Note: here default flow and fhigh are choosen to
contain:
- theta (4-7Hz)
- low (7.5-9.5Hz) and high alpha (10-12Hz),
- low (13-23Hz) and high beta (24-34Hz)
pca_dim: The number of PCA components used to apply FourierICA.
If pca_dim > 1 this refers to the exact number of components.
If between 0 and 1 pca_dim refers to the variance which
should be explained by the chosen components
default: pca_dim=0.9
max_iter: maximum number od iterations used in FourierICA
default: max_iter=10000
conv_eps: iteration stops when weight changes are smaller
then this number
default: conv_eps = 1e-16
verbose: bool, str, int, or None
If not None, override default verbose level
(see mne.verbose).
default: verbose=True
Returns
-------
W: estimated optimal de-mixing matrix
A: estimated mixing matrix
Iq: quality index of the clustering between
components belonging to one cluster
(between 0 and 1; 1 refers to small clusters,
i.e., components in one cluster a highly similar)
fourier_ica_obj: FourierICA object. For further information
please have a look into the FourierICA routine
"""
# ------------------------------------------
# import necessary module
# ------------------------------------------
from fourier_ica import JuMEG_fourier_ica
from mne import find_events, pick_types, set_log_level
from mne.io import Raw
# set log level to 'WARNING'
set_log_level('WARNING')
# ------------------------------------------
# prepare data to apply FourierICA
# ------------------------------------------
meg_raw = Raw(fn_raw, preload=True)
meg_channels = pick_types(meg_raw.info, meg=True, eeg=False,
eog=False, stim=False, exclude='bads')
meg_data = meg_raw._data[meg_channels, :]
if stim_name:
events = find_events(meg_raw, stim_channel=stim_name, consecutive=True)
events = events[events[:, 2] == event_id, 0]
else:
events = []
# ------------------------------------------
# generate FourierICA object
# ------------------------------------------
if verbose:
print ">>>>>>>>>>>>>>>>>>>>>>>>><<<<<<<<<<<<<<<<<<<<<<<<<"
print ">>> Performing FourierICA estimation <<<"
#.........这里部分代码省略.........
开发者ID:VolkanChen,项目名称:jumeg,代码行数:101,代码来源:icasso.py
示例16: EpochShim
from __future__ import absolute_import
import mne
from ..datasets import segment as sg
from . import feature_extractor
mne.set_log_level(verbose='WARNING')
from mne.time_frequency.tfr import cwt_morlet
import random
import sys
import numpy as np
from itertools import chain
class EpochShim(object):
"""A wrapper for our segments which mimics the interface of mne.Epoch, for the band_wavelet_synchrony function."""
def __init__(self, segment, window_size):
self.segment = segment
self.window_size = window_size
# The epoch needs a dictionary attribute with the key 'freq'
self.info = dict(sfreq=segment.get_sampling_frequency())
def __iter__(self):
for window in self.segment.get_windowed(self.window_size):
yield window.transpose()
def epochs_from_segment(segment, window_size=5.0):
"""
Creates an MNE Epochs object from a Segment object
开发者ID:sics-lm,项目名称:kaggle-seizure-prediction,代码行数:30,代码来源:wavelets.py
示例17: range
for case in range(rt.n_cases):
w = rt[case]['Word']
i = word_map[w]
index.append(i)
rt['index'] = Var(index)
rt.sort('index') # align rows of log with rt
log.update(rt, replace=False, info=False) # combine
log = log[log['RealWord'] == 1] # event file currently does not contain nonwords
log.sort('AbsTime') # make sure log file is sorted by time
return log
# don't print a bunch of info
set_log_level('warning')
# create instance upon importing module
if socket.gethostname() == 'silversurfer.linguistics.fas.nyu.edu':
e = SufAmb('/Volumes/BackUp/sufAmb') # data is stored on silversurfer
else:
print "Trying to connect to data drive."
e = SufAmb('/Volumes/BackUp/sufAmb')
# Number of good trials per condition
# total: table.frequencies(Y='accept',ds=e.load_selected_events(reject='keep'))
# by condition: # print table.frequencies(Y='Condition',ds=e.load_selected_events(reject='keep').sub("accept==True"))
#e.set_inv(ori, depth, reg, snr, method, pick_normal)
#e.load_inv(fiff)
开发者ID:awjamison,项目名称:SufAmb,代码行数:30,代码来源:sufamb.py
示例18: test_logging
def test_logging():
"""Test logging (to file)
"""
old_log_file = open(fname_log, 'r')
old_lines = clean_lines(old_log_file.readlines())
old_log_file.close()
old_log_file_2 = open(fname_log_2, 'r')
old_lines_2 = clean_lines(old_log_file_2.readlines())
old_log_file_2.close()
if op.isfile(test_name):
os.remove(test_name)
# test it one way (printing default off)
set_log_file(test_name)
set_log_level('WARNING')
# should NOT print
evoked = Evoked(fname_evoked, setno=1)
assert_true(open(test_name).readlines() == [])
# should NOT print
evoked = Evoked(fname_evoked, setno=1, verbose=False)
assert_true(open(test_name).readlines() == [])
# should NOT print
evoked = Evoked(fname_evoked, setno=1, verbose='WARNING')
assert_true(open(test_name).readlines() == [])
# SHOULD print
evoked = Evoked(fname_evoked, setno=1, verbose=True)
new_log_file = open(test_name, 'r')
new_lines = clean_lines(new_log_file.readlines())
assert_equal(new_lines, old_lines)
# now go the other way (printing default on)
os.remove(test_name)
set_log_file(test_name)
set_log_level('INFO')
# should NOT print
evoked = Evoked(fname_evoked, setno=1, verbose='WARNING')
assert_true(open(test_name).readlines() == [])
# should NOT print
evoked = Evoked(fname_evoked, setno=1, verbose=False)
assert_true(open(test_name).readlines() == [])
# SHOULD print
evoked = Evoked(fname_evoked, setno=1)
new_log_file = open(test_name, 'r')
old_log_file = open(fname_log, 'r')
new_lines = clean_lines(new_log_file.readlines())
assert_equal(new_lines, old_lines)
# check to make sure appending works (and as default, raises a warning)
with warnings.catch_warnings(True) as w:
set_log_file(test_name, overwrite=False)
assert len(w) == 0
set_log_file(test_name)
assert len(w) == 1
evoked = Evoked(fname_evoked, setno=1)
new_log_file = open(test_name, 'r')
new_lines = clean_lines(new_log_file.readlines())
assert_equal(new_lines, old_lines_2)
# make sure overwriting works
set_log_file(test_name, overwrite=True)
# this line needs to be called to actually do some logging
evoked = Evoked(fname_evoked, setno=1)
new_log_file = open(test_name, 'r')
new_lines = clean_lines(new_log_file.readlines())
assert_equal(new_lines, old_lines)
开发者ID:starzynski,项目名称:mne-python,代码行数:64,代码来源:test_utils.py
示例19: OptionParser
import cProfile
from optparse import OptionParser
import pstats
import mne
from eelbrain import *
import eelbrain
mne.set_log_level("warning")
eelbrain._stats.testnd.multiprocessing = False
# option parser
parser = OptionParser()
parser.add_option("-m", "--make", dest="make", metavar="KIND", help="Make a new profile of kind mne or uts")
parser.add_option("-f", "--file", dest="file_ext", metavar="NAME", help="Use a different file for this profile")
parser.add_option(
"-s", "--sort", dest="sort", metavar="CRITERION", help="Sort the profile entries according to CRITERION"
)
parser.add_option("-n", dest="number", metavar="NUMBER", help="Display NUMBER entries from the profile.")
(options, args) = parser.parse_args()
# process options
if options.file_ext is None:
fname = "profile_of_anova.profile"
else:
fname = "profile_of_anova_%s.profile" % options.file_ext
sort = options.sort
if options.number is None:
开发者ID:YoheiOseki,项目名称:Eelbrain,代码行数:31,代码来源:profile_anova.py
示例20: test_source_estimate
def test_source_estimate():
"Test SourceSpace dimension"
mne.set_log_level('warning')
ds = datasets.get_mne_sample(src='ico')
dsa = ds.aggregate('side')
# test auto-conversion
asndvar('epochs', ds=ds)
asndvar('epochs', ds=dsa)
asndvar(dsa['epochs'][0])
# source space clustering
res = testnd.ttest_ind('src', 'side', ds=ds, samples=0, pmin=0.05,
tstart=0.05, mintime=0.02, minsource=10)
assert_equal(res.clusters.n_cases, 52)
# test disconnecting parc
src = ds['src']
source = src.source
parc = source.parc
orig_conn = set(map(tuple, source.connectivity()))
disc_conn = set(map(tuple, source.connectivity(True)))
assert_true(len(disc_conn) < len(orig_conn))
for pair in orig_conn:
s, d = pair
if pair in disc_conn:
assert_equal(parc[s], parc[d])
else:
assert_not_equal(parc[s], parc[d])
# threshold-based test with parc
srcl = src.sub(source='lh')
res = testnd.ttest_ind(srcl, 'side', ds=ds, samples=10, pmin=0.05,
tstart=0.05, mintime=0.02, minsource=10,
parc='source')
assert_equal(res._cdist.dist.shape[1], len(srcl.source.parc.cells))
label = 'superiortemporal-lh'
c_all = res._clusters(maps=True)
c_label = res._clusters(maps=True, source=label)
assert_array_equal(c_label['location'], label)
for case in c_label.itercases():
id_ = case['id']
idx = c_all['id'].index(id_)[0]
assert_equal(case['v'], c_all[idx, 'v'])
assert_equal(case['tstart'], c_all[idx, 'tstart'])
assert_equal(case['tstop'], c_all[idx, 'tstop'])
assert_less_equal(case['p'], c_all[idx, 'p'])
assert_dataobj_equal(case['cluster'],
c_all[idx, 'cluster'].sub(source=label))
# threshold-free test with parc
res = testnd.ttest_ind(srcl, 'side', ds=ds, samples=10, tstart=0.05,
parc='source')
cl = res._clusters(0.05)
assert_equal(cl.eval("p.min()"), res.p.min())
mp = res.masked_parameter_map()
assert_in(mp.min(), (0, res.t.min()))
assert_in(mp.max(), (0, res.t.max()))
# indexing source space
s_sub = src.sub(source='fusiform-lh')
idx = source.index_for_label('fusiform-lh')
s_idx = src[idx]
assert_dataobj_equal(s_sub, s_idx)
开发者ID:mw1602,项目名称:Eelbrain,代码行数:64,代码来源:test_mne.py
注:本文中的mne.set_log_level函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论