本文整理汇总了Python中mvpa.base.debug函数的典型用法代码示例。如果您正苦于以下问题:Python debug函数的具体用法?Python debug怎么用?Python debug使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了debug函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _call
def _call(self, dataset):
sensitivities = []
for ind,analyzer in enumerate(self.__analyzers):
if __debug__:
debug("SA", "Computing sensitivity for SA#%d:%s" %
(ind, analyzer))
sensitivity = analyzer(dataset)
sensitivities.append(sensitivity)
if __debug__:
debug("SA",
"Returning combined using %s sensitivity across %d items" %
(self.__combiner, len(sensitivities)))
# TODO Simplify if we go Dataset-only
if len(sensitivities) == 1:
sensitivities = np.asanyarray(sensitivities[0])
else:
if isinstance(sensitivities[0], AttrDataset):
smerged = None
for i, s in enumerate(sensitivities):
s.sa['splits'] = np.repeat(i, len(s))
if smerged is None:
smerged = s
else:
smerged.append(s)
sensitivities = smerged
else:
sensitivities = \
Dataset(sensitivities,
sa={'splits': np.arange(len(sensitivities))})
self.ca.sensitivities = sensitivities
return sensitivities
开发者ID:geeragh,项目名称:PyMVPA,代码行数:33,代码来源:base.py
示例2: _train
def _train(self, samples):
"""Determine the projection matrix onto the SVD components from
a 2D samples x feature data matrix.
"""
X = np.asmatrix(samples)
X = self._demean_data(X)
# singular value decomposition
U, SV, Vh = np.linalg.svd(X, full_matrices=0)
# store the final matrix with the new basis vectors to project the
# features onto the SVD components. And store its .H right away to
# avoid computing it in forward()
self._proj = Vh.H
# also store singular values of all components
self._sv = SV
if __debug__:
debug("MAP", "SVD was done on %s and obtained %d SVs " %
(samples, len(SV)) + " (%d non-0, max=%f)" %
(len(SV.nonzero()), SV[0]))
# .norm might be somewhat expensive to compute
if "MAP_" in debug.active:
debug("MAP_", "Mixing matrix has %s shape and norm=%f" %
(self._proj.shape, np.linalg.norm(self._proj)))
开发者ID:geeragh,项目名称:PyMVPA,代码行数:26,代码来源:svd.py
示例3: _call
def _call(self, dataset):
sensitivities = []
for ind, analyzer in enumerate(self.__analyzers):
if __debug__:
debug("SA", "Computing sensitivity for SA#%d:%s" %
(ind, analyzer))
sensitivity = analyzer(dataset)
sensitivities.append(sensitivity)
if __debug__:
debug("SA",
"Returning %d sensitivities from %s" %
(len(sensitivities), self.__class__.__name__))
sa_attr = self._sa_attr
if isinstance(sensitivities[0], AttrDataset):
smerged = None
for i, s in enumerate(sensitivities):
s.sa[sa_attr] = np.repeat(i, len(s))
if smerged is None:
smerged = s
else:
smerged.append(s)
sensitivities = smerged
else:
sensitivities = \
Dataset(sensitivities,
sa={sa_attr: np.arange(len(sensitivities))})
self.ca.sensitivities = sensitivities
return sensitivities
开发者ID:B-Rich,项目名称:PyMVPA,代码行数:32,代码来源:base.py
示例4: _prepredict
def _prepredict(self, dataset):
"""Functionality prior prediction
"""
if not ('notrain2predict' in self.__tags__):
# check if classifier was trained if that is needed
if not self.trained:
raise ValueError, \
"Classifier %s wasn't yet trained, therefore can't " \
"predict" % self
nfeatures = dataset.nfeatures #data.shape[1]
# check if number of features is the same as in the data
# it was trained on
if nfeatures != self.__trainednfeatures:
raise ValueError, \
"Classifier %s was trained on data with %d features, " % \
(self, self.__trainednfeatures) + \
"thus can't predict for %d features" % nfeatures
if self.params.retrainable:
if not self.__changedData_isset:
self.__reset_changed_data()
_changedData = self._changedData
data = np.asanyarray(dataset.samples)
_changedData['testdata'] = \
self.__was_data_changed('testdata', data)
if __debug__:
debug('CLF_', "prepredict: Obtained _changedData is %s"
% (_changedData))
开发者ID:geeragh,项目名称:PyMVPA,代码行数:29,代码来源:base.py
示例5: _set
def _set(self, val):
if __debug__ and __mvpadebug__:
# Since this call is quite often, don't convert
# values to strings here, rely on passing them
# withing msgargs
debug("COL", "Setting %(self)s to %(val)s ", msgargs={"self": self, "val": val})
self._value = val
开发者ID:emanuele,项目名称:PyMVPA,代码行数:7,代码来源:collections.py
示例6: _SLcholesky_autoreg
def _SLcholesky_autoreg(C, nsteps=None, **kwargs):
"""Simple wrapper around cholesky to incrementally regularize the
matrix until successful computation.
For `nsteps` we boost diagonal 10-fold each time from the
'epsilon' of the respective dtype. If None -- would proceed until
reaching 1.
"""
if nsteps is None:
nsteps = -int(np.floor(np.log10(np.finfo(float).eps)))
result = None
for step in xrange(nsteps):
epsilon_value = (10**step) * np.finfo(C.dtype).eps
epsilon = epsilon_value * np.eye(C.shape[0])
try:
result = SLcholesky(C + epsilon, lower=True)
except SLAError, e:
warning("Cholesky decomposition lead to failure: %s. "
"As requested, performing auto-regularization but "
"for better control you might prefer to regularize "
"yourself by providing lm parameter to GPR" % e)
if step < nsteps-1:
if __debug__:
debug("GPR", "Failed to obtain cholesky on "
"auto-regularization step %d value %g. Got %s."
" Boosting lambda more to reg. C."
% (step, epsilon_value, e))
continue
else:
raise
开发者ID:B-Rich,项目名称:PyMVPA,代码行数:30,代码来源:gpr.py
示例7: _recon_customobj_customrecon
def _recon_customobj_customrecon(hdf, memo):
"""Reconstruct a custom object from HDF using a custom recontructor"""
# we found something that has some special idea about how it wants
# to be reconstructed
mod_name = hdf.attrs['module']
recon_name = hdf.attrs['recon']
if mod_name == '__builtin__':
raise NotImplementedError(
"Built-in reconstructors are not supported (yet). "
"Got: '%s'" % recon_name)
if __debug__:
debug('HDF5', "Load from custom reconstructor '%s.%s' [%s]"
% (mod_name, recon_name, hdf.name))
# turn names into definitions
mod = __import__(mod_name, fromlist=[recon_name])
recon = mod.__dict__[recon_name]
if 'rcargs' in hdf:
recon_args_hdf = hdf['rcargs']
if __debug__:
debug('HDF5', "Load reconstructor args in [%s]"
% recon_args_hdf.name)
recon_args = _hdf_tupleitems_to_obj(recon_args_hdf, memo)
else:
recon_args = ()
# reconstruct
obj = recon(*recon_args)
# TODO Handle potentially avialable state settings
return obj
开发者ID:arokem,项目名称:PyMVPA,代码行数:31,代码来源:hdf5.py
示例8: __init__
def __init__(self, name=None, enabled=True, doc="State variable"):
CollectableAttribute.__init__(self, name, doc)
self._isenabled = enabled
self._defaultenabled = enabled
if __debug__:
debug("STV",
"Initialized new state variable %s " % name + `self`)
开发者ID:gorlins,项目名称:PyMVPA,代码行数:7,代码来源:attributes.py
示例9: _set
def _set(self, val, init=False):
different_value = self._value != val
isarray = isinstance(different_value, np.ndarray)
if self._ro and not init:
raise RuntimeError, \
"Attempt to set read-only parameter %s to %s" \
% (self.name, val)
if (isarray and np.any(different_value)) or \
((not isarray) and different_value):
if __debug__:
debug("COL",
"Parameter: setting %s to %s " % (str(self), val))
if not isarray:
if hasattr(self, 'min') and val < self.min:
raise ValueError, \
"Minimal value for parameter %s is %s. Got %s" % \
(self.name, self.min, val)
if hasattr(self, 'max') and val > self.max:
raise ValueError, \
"Maximal value for parameter %s is %s. Got %s" % \
(self.name, self.max, val)
if hasattr(self, 'choices') and (not val in self.choices):
raise ValueError, \
"Valid choices for parameter %s are %s. Got %s" % \
(self.name, self.choices, val)
self._value = val
# Set 'isset' only if not called from initialization routine
self._isset = not init #True
elif __debug__:
debug("COL",
"Parameter: not setting %s since value is the same" \
% (str(self)))
开发者ID:thorstenkranz,项目名称:PyMVPA,代码行数:32,代码来源:param.py
示例10: __new__
def __new__(cls, *args, **kwargs):
if len(args) > 0:
if len(kwargs) > 0:
raise ValueError, \
"Do not mix positional and keyword arguments. " \
"Use a single positional argument -- filename, " \
"or any number of keyword arguments, without having " \
"filename specified"
if len(args) == 1 and isinstance(args[0], basestring):
filename = args[0]
args = args[1:]
if __debug__:
debug('IOH', 'Undigging hamster from %s' % filename)
# compressed or not -- that is the question
if filename.endswith('.gz'):
f = gzip.open(filename)
else:
f = open(filename)
result = cPickle.load(f)
if not isinstance(result, Hamster):
warning("Loaded other than Hamster class from %s" % filename)
return result
else:
raise ValueError, "Hamster accepts only a single positional " \
"argument and it must be a filename. Got %d " \
"arguments" % (len(args),)
else:
return object.__new__(cls)
开发者ID:B-Rich,项目名称:PyMVPA,代码行数:28,代码来源:hamster.py
示例11: dump
def dump(self, filename, compresslevel='auto'):
"""Bury the hamster into the file
Parameters
----------
filename : str
Name of the target file. When writing to a compressed file the
filename gets a '.gz' extension if not already specified. This
is necessary as the constructor uses the extension to decide
whether it loads from a compressed or uncompressed file.
compresslevel : 'auto' or int
Compression level setting passed to gzip. When set to
'auto', if filename ends with '.gz' `compresslevel` is set
to 5, 0 otherwise. However, when `compresslevel` is set to
0 gzip is bypassed completely and everything is written to
an uncompressed file.
"""
if compresslevel == 'auto':
compresslevel = (0, 5)[int(filename.endswith('.gz'))]
if compresslevel > 0 and not filename.endswith('.gz'):
filename += '.gz'
if __debug__:
debug('IOH', 'Burying hamster into %s' % filename)
if compresslevel == 0:
f = open(filename, 'w')
else:
f = gzip.open(filename, 'w', compresslevel)
cPickle.dump(self, f)
f.close()
开发者ID:B-Rich,项目名称:PyMVPA,代码行数:29,代码来源:hamster.py
示例12: _call
def _call(self, dataset):
# local bindings
analyzer = self.__analyzer
insplit_index = self.__insplit_index
sensitivities = []
self.splits = splits = []
store_splits = self.states.isEnabled("splits")
for ind,split in enumerate(self.__splitter(dataset)):
ds = split[insplit_index]
if __debug__ and "SA" in debug.active:
debug("SA", "Computing sensitivity for split %d on "
"dataset %s using %s" % (ind, ds, analyzer))
sensitivity = analyzer(ds)
sensitivities.append(sensitivity)
if store_splits: splits.append(split)
self.sensitivities = sensitivities
if __debug__:
debug("SA",
"Returning sensitivities combined using %s across %d items "
"generated by splitter %s" %
(self.__combiner, len(sensitivities), self.__splitter))
if self.__combiner is not None:
sensitivities = self.__combiner(sensitivities)
else:
# assure that we have an ndarray on output
sensitivities = N.asarray(sensitivities)
return sensitivities
开发者ID:gorlins,项目名称:PyMVPA,代码行数:31,代码来源:base.py
示例13: __reverseSingleLevel
def __reverseSingleLevel(self, wp):
# local bindings
level_paths = self.__level_paths
# define wavelet packet to use
WP = pywt.WaveletPacket(
data=None, wavelet=self._wavelet,
mode=self._mode, maxlevel=self.__level)
# prepare storage
signal_shape = wp.shape[:1] + self.getInSize()
signal = N.zeros(signal_shape)
Ntime_points = self._intimepoints
for indexes in _getIndexes(signal_shape,
self._dim):
if __debug__:
debug('MAP_', " %s" % (indexes,), lf=False, cr=True)
for path, level_data in zip(level_paths, wp[indexes]):
WP[path] = level_data
signal[indexes] = WP.reconstruct(True)[:Ntime_points]
return signal
开发者ID:gorlins,项目名称:PyMVPA,代码行数:25,代码来源:wavelet.py
示例14: train
def train(self, dataset):
"""Train classifier on a dataset
Shouldn't be overridden in subclasses unless explicitly needed
to do so
"""
if dataset.nfeatures == 0 or dataset.nsamples == 0:
raise DegenerateInputError, \
"Cannot train classifier on degenerate data %s" % dataset
if __debug__:
debug("CLF", "Training classifier %(clf)s on dataset %(dataset)s",
msgargs={'clf':self, 'dataset':dataset})
self._pretrain(dataset)
# remember the time when started training
t0 = time.time()
if dataset.nfeatures > 0:
result = self._train(dataset)
else:
warning("Trying to train on dataset with no features present")
if __debug__:
debug("CLF",
"No features present for training, no actual training " \
"is called")
result = None
self.ca.training_time = time.time() - t0
self._posttrain(dataset)
return result
开发者ID:geeragh,项目名称:PyMVPA,代码行数:32,代码来源:base.py
示例15: __init__
def __init__(self, clf, labels=None, confusion_state="training_stats",
**kwargs):
"""Initialization.
Parameters
----------
clf : Classifier
Either trained or untrained classifier
confusion_state
Id of the conditional attribute which stores `ConfusionMatrix`
labels : list
if provided, should be a set of labels to add on top of the
ones present in testdata
"""
ClassifierError.__init__(self, clf, labels, **kwargs)
self.__confusion_state = confusion_state
"""What state to extract from"""
if not clf.ca.has_key(confusion_state):
raise ValueError, \
"Conditional attribute %s is not defined for classifier %r" % \
(confusion_state, clf)
if not clf.ca.is_enabled(confusion_state):
if __debug__:
debug('CERR', "Forcing state %s to be enabled for %r" %
(confusion_state, clf))
clf.ca.enable(confusion_state)
开发者ID:B-Rich,项目名称:PyMVPA,代码行数:28,代码来源:transerror.py
示例16: __was_data_changed
def __was_data_changed(self, key, entry, update=True):
"""Check if given entry was changed from what known prior.
If so -- store only the ones needed for retrainable beastie
"""
idhash_ = idhash(entry)
__idhashes = self.__idhashes
changed = __idhashes[key] != idhash_
if __debug__ and 'CHECK_RETRAIN' in debug.active:
__trained = self.__trained
changed2 = entry != __trained[key]
if isinstance(changed2, np.ndarray):
changed2 = changed2.any()
if changed != changed2 and not changed:
raise RuntimeError, \
'idhash found to be weak for %s. Though hashid %s!=%s %s, '\
'estimates %s!=%s %s' % \
(key, idhash_, __idhashes[key], changed,
entry, __trained[key], changed2)
if update:
__trained[key] = entry
if __debug__ and changed:
debug('CLF_', "Changed %s from %s to %s.%s"
% (key, __idhashes[key], idhash_,
('','updated')[int(update)]))
if update:
__idhashes[key] = idhash_
return changed
开发者ID:geeragh,项目名称:PyMVPA,代码行数:31,代码来源:base.py
示例17: _train
def _train(self, dataset):
"""Train SVM
"""
targets_sa_name = self.params.targets_attr # name of targets sa
targets_sa = dataset.sa[targets_sa_name] # actual targets sa
# libsvm needs doubles
src = _data2ls(dataset)
# libsvm cannot handle literal labels
labels = self._attrmap.to_numeric(targets_sa.value).tolist()
svmprob = _svm.SVMProblem(labels, src )
# Translate few params
TRANSLATEDICT = {'epsilon': 'eps',
'tube_epsilon': 'p'}
args = []
for paramname, param in self.params.items() \
+ self.kernel_params.items():
if paramname in TRANSLATEDICT:
argname = TRANSLATEDICT[paramname]
elif paramname in _svm.SVMParameter.default_parameters:
argname = paramname
else:
if __debug__:
debug("SVM_", "Skipping parameter %s since it is not known"
"to libsvm" % paramname)
continue
args.append( (argname, param.value) )
# ??? All those parameters should be fetched if present from
# **kwargs and create appropriate parameters within .params or
# .kernel_params
libsvm_param = _svm.SVMParameter(
kernel_type=self.params.kernel.as_raw_ls(),# Just an integer ID
svm_type=self._svm_type,
**dict(args))
"""Store SVM parameters in libSVM compatible format."""
if self.params.has_key('C'):#svm_type in [_svm.svmc.C_SVC]:
Cs = self._get_cvec(dataset)
if len(Cs)>1:
C0 = abs(Cs[0])
scale = 1.0/(C0)#*np.sqrt(C0))
# so we got 1 C per label
uls = self._attrmap.to_numeric(targets_sa.unique)
if len(Cs) != len(uls):
raise ValueError, "SVM was parameterized with %d Cs but " \
"there are %d labels in the dataset" % \
(len(Cs), len(targets_sa.unique))
weight = [ c*scale for c in Cs ]
# All 3 need to be set to take an effect
libsvm_param._set_parameter('weight', weight)
libsvm_param._set_parameter('nr_weight', len(weight))
libsvm_param._set_parameter('weight_label', uls)
libsvm_param._set_parameter('C', Cs[0])
self.__model = _svm.SVMModel(svmprob, libsvm_param)
开发者ID:arokem,项目名称:PyMVPA,代码行数:60,代码来源:svm.py
示例18: _call
def _call(self, dataset, testdataset=None, **kwargs):
"""Invocation of the feature selection
"""
wdataset = dataset
wtestdataset = testdataset
self.ca.selected_ids = None
self.ca.nfeatures = []
"""Number of features at each step (before running selection)"""
for fs in self.__feature_selections:
# enable selected_ids state if it was requested from this class
fs.ca.change_temporarily(
enable_ca=["selected_ids"], other=self)
if self.ca.is_enabled("nfeatures"):
self.ca.nfeatures.append(wdataset.nfeatures)
if __debug__:
debug('FSPL', 'Invoking %s on (%s, %s)' %
(fs, wdataset, wtestdataset))
wdataset, wtestdataset = fs(wdataset, wtestdataset, **kwargs)
if self.ca.is_enabled("selected_ids"):
if self.ca.selected_ids == None:
self.ca.selected_ids = fs.ca.selected_ids
else:
self.ca.selected_ids = self.ca.selected_ids[fs.ca.selected_ids]
fs.ca.reset_changed_temporarily()
return (wdataset, wtestdataset)
开发者ID:arokem,项目名称:PyMVPA,代码行数:33,代码来源:base.py
示例19: forward
def forward(self, data):
"""Map data from input to output space.
Parameters
----------
data : Dataset-like, (at least 2D)-array-like
Typically this is a `Dataset`, but it might also be a plain data
array, or even something completely different(TM) that is supported
by a subclass' implementation. If such an object is Dataset-like it
is handled by a dedicated method that also transforms dataset
attributes if necessary. If an array-like is passed, it has to be
at least two-dimensional, with the first axis separating samples
or observations. For single samples `forward1()` might be more
appropriate.
"""
if is_datasetlike(data):
if __debug__:
debug('MAP', "Forward-map %s-shaped dataset through '%s'."
% (data.shape, self))
return self._forward_dataset(data)
else:
if hasattr(data, 'ndim') and data.ndim < 2:
raise ValueError(
'Mapper.forward() only support mapping of data with '
'at least two dimensions, where the first axis '
'separates samples/observations. Consider using '
'Mapper.forward1() instead.')
if __debug__:
debug('MAP', "Forward-map data through '%s'." % (self))
return self._forward_data(data)
开发者ID:esc,项目名称:PyMVPA,代码行数:30,代码来源:base.py
示例20: _forward_dataset
def _forward_dataset(self, dataset):
"""Forward-map a dataset.
This is a private method that can be reimplemented in derived
classes. The default implementation forward-maps the dataset samples
and returns a new dataset that is a shallow copy of the input with
the mapped samples.
Parameters
----------
dataset : Dataset-like
"""
if __debug__:
debug('MAP_', "Forward-map %s-shaped samples in dataset with '%s'."
% (dataset.samples.shape, self))
msamples = self._forward_data(dataset.samples)
if __debug__:
debug('MAP_', "Make shallow copy of to-be-forward-mapped dataset "
"and assigned forward-mapped samples ({sf}a_filters: "
"%s, %s, %s)." % (self._sa_filter, self._fa_filter,
self._a_filter))
mds = dataset.copy(deep=False,
sa=self._sa_filter,
fa=self._fa_filter,
a=self._a_filter)
mds.samples = msamples
return mds
开发者ID:esc,项目名称:PyMVPA,代码行数:27,代码来源:base.py
注:本文中的mvpa.base.debug函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论