本文整理汇总了Python中mvpa2.base.warning函数的典型用法代码示例。如果您正苦于以下问题:Python warning函数的具体用法?Python warning怎么用?Python warning使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了warning函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _verified_reverse1
def _verified_reverse1(mapper, onesample):
"""Replacement of Mapper.reverse1 with safety net
This function can be called instead of a direct call to a mapper's
``reverse1()``. It wraps a single sample into a dummy axis and calls
``reverse()``. Afterwards it verifies that the first axis of the
returned array has one item only, otherwise it will issue a warning.
This function is useful in any context where it is critical to ensure
that reverse mapping a single sample, yields exactly one sample -- which
isn't guaranteed due to the flexible nature of mappers.
Parameters
----------
mapper : Mapper instance
onesample : array-like
Single sample (in terms of the supplied mapper).
Returns
-------
array
Shape matches a single sample in terms of the mappers input space.
"""
dummy_axis_sample = np.asanyarray(onesample)[None]
rsample = mapper.reverse(dummy_axis_sample)
if not len(rsample) == 1:
warning("Reverse mapping single sample yielded multiple -- can lead to unintended behavior!")
return rsample[0]
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:27,代码来源:base.py
示例2: _get_increments
def _get_increments(self, ndim):
"""Creates a list of increments for a given dimensionality
RF: lame yoh just cut-pasted and tuned up because everything
depends on ndim...
"""
# Set element_sizes
element_sizes = self._element_sizes
if element_sizes is None:
element_sizes = np.ones(ndim)
else:
if (ndim != len(element_sizes)):
raise ValueError, \
"Dimensionality mismatch: element_sizes %s provided " \
"to constructor had %i dimensions, whenever queried " \
"coordinate had %i" \
% (element_sizes, len(element_sizes), ndim)
center = np.zeros(ndim)
element_sizes = np.asanyarray(element_sizes)
# What range for each dimension
erange = np.ceil(self._radius / element_sizes).astype(int)
tentative_increments = np.array(list(np.ndindex(tuple(erange*2 + 1)))) \
- erange
# Filter out the ones beyond the "sphere"
res = array([x for x in tentative_increments
if self._inner_radius
< self._distance_func(x * element_sizes, center)
<= self._radius])
if not len(res):
warning("%s defines no neighbors" % self)
return res
开发者ID:andreirusu,项目名称:PyMVPA,代码行数:34,代码来源:neighborhood.py
示例3: _check_cosmo_dataset
def _check_cosmo_dataset(cosmo):
'''
Helper function to ensure a cosmo input for cosmo_dataset is valid.
Currently does two things:
(1) raise an error if there are no samples
(2) raise a warning if samples have very large or very small values. A use
case is certain MEEG datasets with very small sample values
(in the order of 1e-25) which affects some classifiers
'''
samples = cosmo.get('samples', None)
if samples is None:
raise KeyError("Missing field .samples in %s" % cosmo)
# check for extreme values
warn_for_extreme_values_decimals = 10
# ignore NaNs and infinity
nonzero_msk = np.logical_and(np.isfinite(samples), samples != 0)
if np.any(nonzero_msk):
max_nonzero = np.max(np.abs(samples[nonzero_msk]))
# see how many decimals in the largest absolute value
decimals_nonzero = np.log10(max_nonzero)
if abs(decimals_nonzero) > warn_for_extreme_values_decimals:
msg = (
'Samples have extreme values, maximum absolute value is %s; '
'This may affect some analyses. Considering scaling the samples, '
'e.g. by a factor of 10**%d ' % (
max_nonzero, -decimals_nonzero))
warning(msg)
开发者ID:PyMVPA,项目名称:PyMVPA,代码行数:34,代码来源:cosmo.py
示例4: append
def append(self, other):
"""This method should not be used and will be removed in the future"""
warning(
"AttrDataset.append() is deprecated and will be removed. "
"Instead of ds.append(x) use: ds = vstack((ds, x), a=0)"
)
if not self.nfeatures == other.nfeatures:
raise DatasetError("Cannot merge datasets, because the number of " "features does not match.")
if not sorted(self.sa.keys()) == sorted(other.sa.keys()):
raise DatasetError(
"Cannot merge dataset. This datasets samples "
"attributes %s cannot be mapped into the other "
"set %s" % (self.sa.keys(), other.sa.keys())
)
# concat the samples as well
self.samples = np.concatenate((self.samples, other.samples), axis=0)
# tell the collection the new desired length of all attributes
self.sa.set_length_check(len(self.samples))
# concat all samples attributes
for k, v in other.sa.iteritems():
self.sa[k].value = np.concatenate((self.sa[k].value, v.value), axis=0)
开发者ID:neurosbh,项目名称:PyMVPA,代码行数:25,代码来源:dataset.py
示例5: _forward_data
def _forward_data(self, data):
params = self.params
try:
mapped = filtfilt(self.__iir_num,
self.__iir_denom,
data,
axis=params.axis,
padtype=params.padtype,
padlen=params.padlen)
except TypeError:
# we have an ancient scipy, do manually
# but is will only support 2d arrays
if params.axis == 0:
data = data.T
if params.axis > 1:
raise ValueError("this version of scipy does not "
"support nd-arrays for filtfilt()")
if not (params['padlen'].is_default and params['padtype'].is_default):
warning("this version of scipy.signal.filtfilt() does not "
"support `padlen` and `padtype` arguments -- ignoring "
"them")
mapped = [filtfilt(self.__iir_num,
self.__iir_denom,
x)
for x in data]
mapped = np.array(mapped)
if params.axis == 0:
mapped = mapped.T
return mapped
开发者ID:armaneshaghi,项目名称:PyMVPA,代码行数:29,代码来源:filters.py
示例6: __init__
def __init__(self, **kwargs):
"""Initialize an SMLR classifier.
"""
"""
TODO:
# Add in likelihood calculation
# Add kernels, not just direct methods.
"""
# init base class first
Classifier.__init__(self, **kwargs)
if _cStepwiseRegression is None and self.params.implementation == 'C':
warning('SMLR: C implementation is not available.'
' Using pure Python one')
self.params.implementation = 'Python'
# pylint friendly initializations
self._ulabels = None
"""Unigue labels from the training set."""
self.__weights_all = None
"""Contains all weights including bias values"""
self.__weights = None
"""Just the weights, without the biases"""
self.__biases = None
"""The biases, will remain none if has_bias is False"""
开发者ID:arnaudsj,项目名称:PyMVPA,代码行数:26,代码来源:smlr.py
示例7: to_npz
def to_npz(self, filename, compress=True):
"""Save dataset to a .npz file storing all fa/sa/a which are ndarrays
Parameters
----------
filename : str
compress : bool, optional
If True, savez_compressed is used
"""
savez = np.savez_compressed if compress else np.savez
if not filename.endswith('.npz'):
filename += '.npz'
entries = {'samples': self.samples}
skipped = []
for c in ('a', 'fa', 'sa'):
col = getattr(self, c)
for k in col:
v = col[k].value
e = '%s.%s' % (c, k)
if isinstance(v, np.ndarray):
entries[e] = v
else:
skipped.append(e)
if skipped:
warning("Skipping %s since not ndarrays" % (', '.join(skipped)))
return savez(filename, **entries)
开发者ID:PyMVPA,项目名称:PyMVPA,代码行数:26,代码来源:dataset.py
示例8: run
def run(args):
if not args.store is None and args.output is None:
raise ValueError("--output is require for result storage")
if not args.data is None:
dss = [arg2ds(d) for d in args.data]
if len(dss):
# convenience short-cut
ds = dss[0]
try:
import nose.tools as nt
except ImportError:
pass
for expr in args.eval:
if expr == '-':
exec sys.stdin
elif os.path.isfile(expr):
execfile(expr, globals(), locals())
else:
exec expr
if not args.store is None:
out = {}
for var in args.store:
try:
out[var] = locals()[var]
except KeyError:
warning("'%s' not found in local name space -- skipped." % var)
if len(out):
ds2hdf5(out, args.output, compression=args.hdf5_compression)
开发者ID:Arthurkorn,项目名称:PyMVPA,代码行数:28,代码来源:cmd_exec.py
示例9: stability_assurance
def stability_assurance(cdf):
if __debug__ and 'CHECK_STABILITY' in debug.active:
cdf_min, cdf_max = np.min(cdf), np.max(cdf)
if cdf_min < 0 or cdf_max > 1.0:
s = ('', ' for %s' % name)[int(name is not None)]
warning('Stability check of cdf %s failed%s. Min=%s, max=%s' % \
(cdf_func, s, cdf_min, cdf_max))
开发者ID:Arthurkorn,项目名称:PyMVPA,代码行数:7,代码来源:stats.py
示例10: _SLcholesky_autoreg
def _SLcholesky_autoreg(C, nsteps=None, **kwargs):
"""Simple wrapper around cholesky to incrementally regularize the
matrix until successful computation.
For `nsteps` we boost diagonal 10-fold each time from the
'epsilon' of the respective dtype. If None -- would proceed until
reaching 1.
"""
if nsteps is None:
nsteps = -int(np.floor(np.log10(np.finfo(float).eps)))
result = None
for step in xrange(nsteps):
epsilon_value = (10**step) * np.finfo(C.dtype).eps
epsilon = epsilon_value * np.eye(C.shape[0])
try:
result = SLcholesky(C + epsilon, lower=True)
except SLAError, e:
warning("Cholesky decomposition lead to failure: %s. "
"As requested, performing auto-regularization but "
"for better control you might prefer to regularize "
"yourself by providing lm parameter to GPR" % e)
if step < nsteps-1:
if __debug__:
debug("GPR", "Failed to obtain cholesky on "
"auto-regularization step %d value %g. Got %s."
" Boosting lambda more to reg. C."
% (step, epsilon_value, e))
continue
else:
raise
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:30,代码来源:gpr.py
示例11: _check
def _check(self):
'''ensures that different fields are sort of consistent'''
fields = ['_v', '_f', '_nv', '_nf']
if not all(hasattr(self, field) for field in fields):
raise Exception("Incomplete surface!")
if self._v.shape != (self._nv, 3):
raise Exception("Wrong shape for vertices")
if self._f.shape != (self._nf, 3):
raise Exception("Wrong shape for faces")
# see if all faces have a corresponding node.
# actually this would not invalidate the surface, so
# we only give a warning
unqf = np.unique(self._f)
if unqf.size != self._nv:
from mvpa2.base import warning
warning("Count mismatch for face range (%d!=%d), "
"faces without node: %r" % (unqf.size, self._nv,
len(set(range(self._nv)) - set(unqf))))
if np.any(unqf != np.arange(self._nv)):
from mvpa2.base import warning
warning("Missing values in faces")
开发者ID:kirty,项目名称:PyMVPA,代码行数:26,代码来源:surf.py
示例12: handle_arg
def handle_arg(arg):
"""Helper which would read in SpatialImage if necessary
"""
if arg is None:
return arg
if isinstance(arg, basestring):
arg = nb.load(arg)
argshape = arg.get_shape()
# Assure that we have 3D (at least)
if len(argshape)<3:
arg = nb.Nifti1Image(
arg.get_data().reshape(argshape + (1,)*(3-len(argshape))),
arg.get_affine(),
arg.get_header())
else:
argshape = arg.shape
if len(argshape) == 4:
if argshape[-1] > 1:
warning("For now plot_lightbox can handle only 3d, 4d data was provided."
" Plotting only the first volume")
if isinstance(arg, SpatialImage):
arg = nb.Nifti1Image(arg.get_data()[..., 0], arg.get_affine(), arg.get_header())
else:
arg = arg[..., 0]
elif len(argshape) != 3:
raise ValueError, "For now just handling 3D volumes"
return arg
开发者ID:Arthurkorn,项目名称:PyMVPA,代码行数:28,代码来源:lightbox.py
示例13: label_voxel
def label_voxel(self, c, levels = None):
if self.__referenceLevel is None:
warning("You did not provide what level to use "
"for reference. Assigning 0th level -- '%s'"
% (self._levels[0],))
self.set_reference_level(0)
# return self.__referenceAtlas.label_voxel(c, levels)
c = self._check_range(c)
# obtain coordinates of the closest voxel
cref = self._data[ self.__referenceLevel.indexes, c[0], c[1], c[2] ]
dist = norm( (cref - c) * self.voxdim )
if __debug__:
debug('ATL__', "Closest referenced point for %r is "
"%r at distance %3.2f" % (c, cref, dist))
if (self.distance - dist) >= 1e-3: # neglect everything smaller
result = self.__referenceAtlas.label_voxel(cref, levels)
result['voxel_referenced'] = c
result['distance'] = dist
else:
result = self.__referenceAtlas.label_voxel(c, levels)
if __debug__:
debug('ATL__', "Closest referenced point is "
"further than desired distance %.2f" % self.distance)
result['voxel_referenced'] = None
result['distance'] = 0
return result
开发者ID:Arthurkorn,项目名称:PyMVPA,代码行数:29,代码来源:base.py
示例14: __init__
def __init__(self, generator, queryengine, errorfx=mean_mismatch_error,
indexsum=None,
reuse_neighbors=False,
splitter=None,
**kwargs):
"""Initialize the base class for "naive" searchlight classifiers
Parameters
----------
generator : `Generator`
Some `Generator` to prepare partitions for cross-validation.
It must not change "targets", thus e.g. no AttributePermutator's
errorfx : func, optional
Functor that computes a scalar error value from the vectors of
desired and predicted values (e.g. subclass of `ErrorFunction`).
indexsum : ('sparse', 'fancy'), optional
What use to compute sums over arbitrary columns. 'fancy'
corresponds to regular fancy indexing over columns, whenever
in 'sparse', product of sparse matrices is used (usually
faster, so is default if `scipy` is available).
reuse_neighbors : bool, optional
Compute neighbors information only once, thus allowing for
efficient reuse on subsequent calls where dataset's feature
attributes remain the same (e.g. during permutation testing)
splitter : Splitter, optional
Which will be used to split partitioned datasets. If None specified
then standard one operating on partitions will be used
"""
# init base class first
BaseSearchlight.__init__(self, queryengine, **kwargs)
self._errorfx = errorfx
self._generator = generator
self._splitter = splitter
# TODO: move into _call since resetting over default None
# obscures __repr__
if indexsum is None:
if externals.exists('scipy'):
indexsum = 'sparse'
else:
indexsum = 'fancy'
else:
if indexsum == 'sparse' and not externals.exists('scipy'):
warning("Scipy.sparse isn't available so taking 'fancy' as "
"'indexsum' method.")
indexsum = 'fancy'
self._indexsum = indexsum
if not self.nproc in (None, 1):
raise NotImplementedError, "For now only nproc=1 (or None for " \
"autodetection) is supported by GNBSearchlight"
self.__pb = None # statistics per each block/label
self.__reuse_neighbors = reuse_neighbors
# Storage to be used for neighborhood information
self.__roi_fids = None
开发者ID:hanke,项目名称:PyMVPA,代码行数:59,代码来源:adhocsearchlightbase.py
示例15: train
def train(self, ds):
"""
The default implementation calls ``_pretrain()``, ``_train()``, and
finally ``_posttrain()``.
Parameters
----------
ds: Dataset
Training dataset.
Returns
-------
None
"""
got_ds = is_datasetlike(ds)
# TODO remove first condition if all Learners get only datasets
if got_ds and (ds.nfeatures == 0 or len(ds) == 0):
raise DegenerateInputError(
"Cannot train learner on degenerate data %s" % ds)
if __debug__:
debug(
"LRN",
"Training learner %(lrn)s on dataset %(dataset)s",
msgargs={'lrn': self, 'dataset': ds})
self._pretrain(ds)
# remember the time when started training
t0 = time.time()
if got_ds:
# things might have happened during pretraining
if ds.nfeatures > 0:
self._train(ds)
else:
warning("Trying to train on dataset with no features present")
if __debug__:
debug("LRN",
"No features present for training, no actual training "
"is called")
else:
# in this case we claim to have no idea and simply try to train
self._train(ds)
# store timing
self.ca.training_time = time.time() - t0
# and post-proc
self._posttrain(ds)
# finally flag as trained
self._set_trained()
if __debug__:
debug(
"LRN",
"Finished training learner %(lrn)s on dataset %(dataset)s",
msgargs={'lrn': self, 'dataset': ds})
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:59,代码来源:learner.py
示例16: _pvalue
def _pvalue(x, cdf_func, tail, return_tails=False, name=None):
"""Helper function to return p-value(x) given cdf and tail
Parameters
----------
cdf_func : callable
Function to be used to derive cdf values for x
tail : str ('left', 'right', 'any', 'both')
Which tail of the distribution to report. For 'any' and 'both'
it chooses the tail it belongs to based on the comparison to
p=0.5. In the case of 'any' significance is taken like in a
one-tailed test.
return_tails : bool
If True, a tuple return (pvalues, tails), where tails contain
1s if value was from the right tail, and 0 if the value was
from the left tail.
"""
is_scalar = np.isscalar(x)
if is_scalar:
x = [x]
cdf = cdf_func(x)
if __debug__ and "CHECK_STABILITY" in debug.active:
cdf_min, cdf_max = np.min(cdf), np.max(cdf)
if cdf_min < 0 or cdf_max > 1.0:
s = ("", " for %s" % name)[int(name is not None)]
warning("Stability check of cdf %s failed%s. Min=%s, max=%s" % (cdf_func, s, cdf_min, cdf_max))
# no escape but to assure that CDF is in the right range. Some
# distributions from scipy tend to jump away from [0,1]
cdf = np.clip(cdf, 0, 1.0)
if tail == "left":
if return_tails:
right_tail = np.zeros(cdf.shape, dtype=bool)
elif tail == "right":
cdf = 1 - cdf
if return_tails:
right_tail = np.ones(cdf.shape, dtype=bool)
elif tail in ("any", "both"):
right_tail = cdf >= 0.5
cdf[right_tail] = 1.0 - cdf[right_tail]
if tail == "both":
# we need report the area under both tails
# XXX this is only meaningful for symetric distributions
cdf *= 2
# Assure that NaNs didn't get significant value
cdf[np.isnan(x)] = 1.0
if is_scalar:
res = cdf[0]
else:
res = cdf
if return_tails:
return (res, right_tail)
else:
return res
开发者ID:psederberg,项目名称:PyMVPA,代码行数:59,代码来源:stats.py
示例17: _level3
def _level3(self, datasets):
params = self.params # for quicker access ;)
# create a mapper per dataset
mappers = [deepcopy(params.alignment) for ds in datasets]
# key different from level-2; the common space is uniform
#temp_commonspace = commonspace
# Fixing nproc=0
if params.nproc == 0:
from mvpa2.base import warning
warning("nproc of 0 doesn't make sense. Setting nproc to 1.")
params.nproc = 1
# Checking for joblib, if not, set nproc to 1
if params.nproc != 1:
from mvpa2.base import externals, warning
if not externals.exists('joblib'):
warning("Setting nproc different from 1 requires joblib package, which "
"does not seem to exist. Setting nproc to 1.")
params.nproc = 1
# start from original input datasets again
if params.nproc == 1:
residuals = []
for i, (m, ds_new) in enumerate(zip(mappers, datasets)):
if __debug__:
debug('HPAL_', "Level 3: ds #%i" % i)
m, residual = get_trained_mapper(ds_new, self.commonspace, m,
self.ca['residual_errors'].enabled)
if self.ca['residual_errors'].enabled:
residuals.append(residual)
else:
if __debug__:
debug('HPAL_', "Level 3: Using joblib with nproc = %d " % params.nproc)
verbose_level_parallel = 20 \
if (__debug__ and 'HPAL' in debug.active) else 0
from joblib import Parallel, delayed
import sys
# joblib's 'multiprocessing' backend has known issues of failure on OSX
# Tested with MacOS 10.12.13, python 2.7.13, joblib v0.10.3
if params.joblib_backend is None:
params.joblib_backend = 'threading' if sys.platform == 'darwin' \
else 'multiprocessing'
res = Parallel(
n_jobs=params.nproc, pre_dispatch=params.nproc,
backend=params.joblib_backend,
verbose=verbose_level_parallel
)(
delayed(get_trained_mapper)
(ds, self.commonspace, mapper, self.ca['residual_errors'].enabled)
for ds, mapper in zip(datasets, mappers)
)
mappers = [m for m, r in res]
if self.ca['residual_errors'].enabled:
residuals = [r for m, r in res]
if self.ca['residual_errors'].enabled:
self.ca.residual_errors = Dataset(samples=np.array(residuals)[None, :])
return mappers
开发者ID:PyMVPA,项目名称:PyMVPA,代码行数:59,代码来源:hyperalignment.py
示例18: seed
def seed(random_seed):
if __debug__:
debug('SG', "Seeding shogun's RNG with %s" % random_seed)
try:
# reuse the same seed for shogun
shogun.Library.Math_init_random(random_seed)
except Exception, e:
warning('Shogun cannot be seeded due to %s' % (e,))
开发者ID:Arthurkorn,项目名称:PyMVPA,代码行数:8,代码来源:svm.py
示例19: corr_error_prob
def corr_error_prob(predicted, target):
"""Computes p-value of correlation between the target and the predicted
values.
"""
from mvpa2.base import warning
warning("p-value for correlation is implemented only when scipy is "
"available. Bogus value -1.0 is returned otherwise")
return -1.0
开发者ID:PyMVPA,项目名称:PyMVPA,代码行数:8,代码来源:errorfx.py
示例20: _extract_boxcar_events
def _extract_boxcar_events(ds, events=None, time_attr=None, match="prev", eprefix="event", event_mapper=None):
"""see eventrelated_dataset() for docs"""
# relabel argument
conv_strategy = {"prev": "floor", "next": "ceil", "closest": "round"}[match]
if not time_attr is None:
tvec = ds.sa[time_attr].value
# we are asked to convert onset time into sample ids
descr_events = []
for ev in events:
# do not mess with the input data
ev = copy.deepcopy(ev)
# best matching sample
idx = value2idx(ev["onset"], tvec, conv_strategy)
# store offset of sample time and real onset
ev["orig_offset"] = ev["onset"] - tvec[idx]
# rescue the real onset into a new attribute
ev["orig_onset"] = ev["onset"]
ev["orig_duration"] = ev["duration"]
# figure out how many samples we need
ev["duration"] = len(tvec[idx:][tvec[idx:] < ev["onset"] + ev["duration"]])
# new onset is sample index
ev["onset"] = idx
descr_events.append(ev)
else:
descr_events = events
# convert the event specs into the format expected by BoxcarMapper
# take the first event as an example of contained keys
evvars = _events2dict(descr_events)
# checks
for p in ["onset", "duration"]:
if not p in evvars:
raise ValueError("'%s' is a required property for all events." % p)
boxlength = max(evvars["duration"])
if __debug__:
if not max(evvars["duration"]) == min(evvars["duration"]):
warning("Boxcar mapper will use maximum boxlength (%i) of all " "provided Events." % boxlength)
# finally create, train und use the boxcar mapper
bcm = BoxcarMapper(evvars["onset"], boxlength, space=eprefix)
bcm.train(ds)
ds = ds.get_mapped(bcm)
if event_mapper is None:
# at last reflatten the dataset
# could we add some meaningful attribute during this mapping, i.e. would
# assigning 'inspace' do something good?
ds = ds.get_mapped(FlattenMapper(shape=ds.samples.shape[1:]))
else:
ds = ds.get_mapped(event_mapper)
# add samples attributes for the events, simply dump everything as a samples
# attribute
# special case onset and duration in case of conversion into descrete time
if not time_attr is None:
for attr in ("onset", "duration"):
evvars[attr] = [e[attr] for e in events]
ds = _evvars2ds(ds, evvars, eprefix)
return ds
开发者ID:neurosbh,项目名称:PyMVPA,代码行数:58,代码来源:eventrelated.py
注:本文中的mvpa2.base.warning函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论