本文整理汇总了Python中mvpa2.support.copy.deepcopy函数的典型用法代码示例。如果您正苦于以下问题:Python deepcopy函数的具体用法?Python deepcopy怎么用?Python deepcopy使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了deepcopy函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_mapper_vs_zscore
def test_mapper_vs_zscore():
"""Test by comparing to results of elderly z-score function
"""
# data: 40 sample feature line in 20d space (40x20; samples x features)
dss = [
dataset_wizard(np.concatenate(
[np.arange(40) for i in range(20)]).reshape(20,-1).T,
targets=1, chunks=1),
] + datasets.values()
for ds in dss:
ds1 = deepcopy(ds)
ds2 = deepcopy(ds)
zsm = ZScoreMapper(chunks_attr=None)
assert_raises(RuntimeError, zsm.forward, ds1.samples)
idhashes = (idhash(ds1), idhash(ds1.samples))
zsm.train(ds1)
idhashes_train = (idhash(ds1), idhash(ds1.samples))
assert_equal(idhashes, idhashes_train)
# forward dataset
ds1z_ds = zsm.forward(ds1)
idhashes_forwardds = (idhash(ds1), idhash(ds1.samples))
# must not modify samples in place!
assert_equal(idhashes, idhashes_forwardds)
# forward samples explicitly
ds1z = zsm.forward(ds1.samples)
idhashes_forward = (idhash(ds1), idhash(ds1.samples))
assert_equal(idhashes, idhashes_forward)
zscore(ds2, chunks_attr=None)
assert_array_almost_equal(ds1z, ds2.samples)
assert_array_equal(ds1.samples, ds.samples)
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:35,代码来源:test_zscoremapper.py
示例2: test_all_equal
def test_all_equal():
# all these values are supposed to be different from each other
# but equal to themselves
a = np.random.normal(size=(10, 10)) + 1000.
b = np.zeros((10, 10))
c = np.zeros(10)
d = np.zeros(11)
e = 0
f = None
g = True
h = ''
i = 'a'
j = dict(bummer=np.arange(5))
values = [a, b, c, d, e, f, g, h, i, j]
for ii, v in enumerate(values):
for jj, w in enumerate(values):
# make deepcopy so == operator cannot cheat by checking id()
assert_equal(all_equal(copy.deepcopy(v),
copy.deepcopy(w)),
ii == jj,
msg='cmp(%s, %s)' % (type(v), type(w)))
# ensure that this function behaves like the
# standard python '==' comparator for singulars
singulars = [0, None, True, False, '', 1, 'a']
for v in singulars:
for w in singulars:
assert_equal(all_equal(v, w), v == w)
开发者ID:StevenLOL,项目名称:PyMVPA,代码行数:29,代码来源:test_datasetng.py
示例3: clone
def clone(self):
"""Create full copy of the classifier.
It might require classifier to be untrained first due to
present SWIG bindings.
TODO: think about proper re-implementation, without enrollment of deepcopy
"""
if __debug__:
debug("CLF", "Cloning %s%s", (self, _strid(self)))
try:
return deepcopy(self)
except:
self.untrain()
return deepcopy(self)
开发者ID:adamatus,项目名称:PyMVPA,代码行数:15,代码来源:base.py
示例4: _level3
def _level3(self, datasets):
params = self.params # for quicker access ;)
# create a mapper per dataset
mappers = [deepcopy(params.alignment) for ds in datasets]
# key different from level-2; the common space is uniform
#temp_commonspace = commonspace
residuals = None
if self.ca['residual_errors'].enabled:
residuals = np.zeros((1, len(datasets)))
self.ca.residual_errors = Dataset(samples=residuals)
# start from original input datasets again
for i, (m, ds_new) in enumerate(zip(mappers, datasets)):
if __debug__:
debug('HPAL_', "Level 3: ds #%i" % i)
# retrain mapper on final common space
ds_new.sa[m.get_space()] = self.commonspace
m.train(ds_new)
# remove common space attribute again to save on memory
del ds_new.sa[m.get_space()]
if residuals is not None:
# obtain final projection
data_mapped = m.forward(ds_new.samples)
residuals[0, i] = np.linalg.norm(data_mapped - self.commonspace)
return mappers
开发者ID:adamatus,项目名称:PyMVPA,代码行数:30,代码来源:hyperalignment.py
示例5: _forward_data
def _forward_data(self, data):
if self.__chunks_attr is not None:
raise RuntimeError(
"%s cannot do chunk-wise Z-scoring of plain data "
"since it has to be parameterized with chunks_attr." % self)
if self.__param_est is not None:
raise RuntimeError("%s cannot do Z-scoring with estimating "
"parameters on some attributes of plain"
"data." % self)
params = self.__params_dict
if params is None:
raise RuntimeError, \
"ZScoreMapper needs to be trained before call to forward"
# mappers should not modify the input data
# cast the data to float, since in-place operations below to not upcast!
if np.issubdtype(data.dtype, np.integer):
if self._secret_inplace_zscore:
raise TypeError(
"Cannot perform inplace z-scoring since data is of integer "
"type. Please convert to float before calling zscore")
mdata = data.astype(self.__dtype)
elif self._secret_inplace_zscore:
mdata = data
else:
# do not call .copy() directly, since it might not be an array
mdata = copy.deepcopy(data)
self._zscore(mdata, *params['__all__'])
return mdata
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:31,代码来源:zscore.py
示例6: _level3
def _level3(self, datasets):
params = self.params # for quicker access ;)
# create a mapper per dataset
mappers = [deepcopy(params.alignment) for ds in datasets]
# key different from level-2; the common space is uniform
#temp_commonspace = commonspace
# Fixing nproc=0
if params.nproc == 0:
from mvpa2.base import warning
warning("nproc of 0 doesn't make sense. Setting nproc to 1.")
params.nproc = 1
# Checking for joblib, if not, set nproc to 1
if params.nproc != 1:
from mvpa2.base import externals, warning
if not externals.exists('joblib'):
warning("Setting nproc different from 1 requires joblib package, which "
"does not seem to exist. Setting nproc to 1.")
params.nproc = 1
# start from original input datasets again
if params.nproc == 1:
residuals = []
for i, (m, ds_new) in enumerate(zip(mappers, datasets)):
if __debug__:
debug('HPAL_', "Level 3: ds #%i" % i)
m, residual = get_trained_mapper(ds_new, self.commonspace, m,
self.ca['residual_errors'].enabled)
if self.ca['residual_errors'].enabled:
residuals.append(residual)
else:
if __debug__:
debug('HPAL_', "Level 3: Using joblib with nproc = %d " % params.nproc)
verbose_level_parallel = 20 \
if (__debug__ and 'HPAL' in debug.active) else 0
from joblib import Parallel, delayed
import sys
# joblib's 'multiprocessing' backend has known issues of failure on OSX
# Tested with MacOS 10.12.13, python 2.7.13, joblib v0.10.3
if params.joblib_backend is None:
params.joblib_backend = 'threading' if sys.platform == 'darwin' \
else 'multiprocessing'
res = Parallel(
n_jobs=params.nproc, pre_dispatch=params.nproc,
backend=params.joblib_backend,
verbose=verbose_level_parallel
)(
delayed(get_trained_mapper)
(ds, self.commonspace, mapper, self.ca['residual_errors'].enabled)
for ds, mapper in zip(datasets, mappers)
)
mappers = [m for m, r in res]
if self.ca['residual_errors'].enabled:
residuals = [r for m, r in res]
if self.ca['residual_errors'].enabled:
self.ca.residual_errors = Dataset(samples=np.array(residuals)[None, :])
return mappers
开发者ID:PyMVPA,项目名称:PyMVPA,代码行数:59,代码来源:hyperalignment.py
示例7: setUp
def setUp(self):
self.backup = []
# paranoid check
self.cfgstr = str(cfg)
# clean up externals cfg for proper testing
if cfg.has_section('externals'):
self.backup = copy.deepcopy(cfg.items('externals'))
cfg.remove_section('externals')
开发者ID:Arthurkorn,项目名称:PyMVPA,代码行数:8,代码来源:test_externals.py
示例8: test_deep_copying_state_variable
def test_deep_copying_state_variable(self):
for v in (True, False):
sv = ConditionalAttribute(enabled=v,
doc="Testing")
sv.enabled = not v
sv_dc = copy.deepcopy(sv)
self.failUnlessEqual(sv.enabled, sv_dc.enabled)
self.failUnlessEqual(sv.name, sv_dc.name)
self.failUnlessEqual(sv._instance_index, sv_dc._instance_index)
开发者ID:psederberg,项目名称:PyMVPA,代码行数:9,代码来源:test_state.py
示例9: test_deep_copying_state_variable
def test_deep_copying_state_variable(self):
for v in (True, False):
sv = ConditionalAttribute(enabled=v, doc="Testing")
sv.enabled = not v
sv_dc = copy.deepcopy(sv)
if not (__debug__ and "ENFORCE_CA_ENABLED" in debug.active):
self.assertEqual(sv.enabled, sv_dc.enabled)
self.assertEqual(sv.name, sv_dc.name)
self.assertEqual(sv._instance_index, sv_dc._instance_index)
开发者ID:schoeke,项目名称:PyMVPA,代码行数:9,代码来源:test_state.py
示例10: select_samples
def select_samples(self, selection):
"""Return new ColumnData with selected samples"""
data = copy.deepcopy(self)
for k, v in data.iteritems():
data[k] = [v[x] for x in selection]
data._check()
return data
开发者ID:arnaudsj,项目名称:PyMVPA,代码行数:9,代码来源:base.py
示例11: test_id_hash
def test_id_hash(self, pair):
a, b = pair
a1 = deepcopy(a)
a_1 = idhash(a)
self.assertTrue(a_1 == idhash(a), msg="Must be of the same idhash")
self.assertTrue(a_1 != idhash(b), msg="Must be of different idhash")
if isinstance(a, np.ndarray):
self.assertTrue(a_1 != idhash(a.T), msg=".T must be of different idhash")
if not isinstance(a, tuple):
self.assertTrue(a_1 != idhash(a1), msg="Must be of different idhash")
a[2] += 1; a_2 = idhash(a)
self.assertTrue(a_1 != a_2, msg="Idhash must change")
else:
a_2 = a_1
a = a[2:]; a_3 = idhash(a)
self.assertTrue(a_2 != a_3, msg="Idhash must change after slicing")
开发者ID:VladimirBadalyan,项目名称:PyMVPA,代码行数:16,代码来源:test_support.py
示例12: is_sorted
def is_sorted(items):
"""Check if listed items are in sorted order.
Parameters
----------
`items`: iterable container
:return: `True` if were sorted. Otherwise `False` + Warning
"""
items_sorted = deepcopy(items)
items_sorted.sort()
equality = items_sorted == items
# XXX yarik forgotten analog to isiterable
if hasattr(equality, '__iter__'):
equality = np.all(equality)
return equality
开发者ID:JohnGriffiths,项目名称:nidata,代码行数:16,代码来源:support.py
示例13: __new__
def __new__(cls, *args, **kwargs):
"""Instantiate ClassWithCollections object
"""
self = super(ClassWithCollections, cls).__new__(cls)
s__dict__ = self.__dict__
# init variable
# XXX: Added as pylint complained (rightfully) -- not sure if false
# is the proper default
self.__params_set = False
# need to check to avoid override of enabled ca in the case
# of multiple inheritance, like both ClassWithCollectionsl and
# Harvestable
if '_collections' not in s__dict__:
s__class__ = self.__class__
collections = copy.deepcopy(s__class__._collections_template)
s__dict__['_collections'] = collections
s__dict__['_known_attribs'] = {}
"""Dictionary to contain 'links' to the collections from each
known attribute. Is used to gain some speed up in lookup within
__getattribute__ and __setattr__
"""
# Assign owner to all collections
for col, collection in collections.iteritems():
if col in s__dict__:
raise ValueError, \
"Object %s has already attribute %s" % \
(self, col)
s__dict__[col] = collection
collection.name = col
self.__params_set = False
if __debug__:
descr = kwargs.get('descr', None)
debug("COL", "ClassWithCollections.__new__ was done "
"for %s%s with descr=%s",
(s__class__.__name__, _strid(self), descr))
return self
开发者ID:pckillerbrici,项目名称:PyMVPA,代码行数:44,代码来源:state.py
示例14: test_generic_tests
def test_generic_tests(self):
"""Test all classifiers for conformant behavior
"""
for clf_, traindata in \
[(clfswh['binary'], datasets['dumb2']),
(clfswh['multiclass'], datasets['dumb'])]:
traindata_copy = deepcopy(traindata) # full copy of dataset
for clf in clf_:
clf.train(traindata)
self.assertTrue(
(traindata.samples == traindata_copy.samples).all(),
"Training of a classifier shouldn't change original dataset")
# TODO: enforce uniform return from predict??
#predicted = clf.predict(traindata.samples)
#self.assertTrue(isinstance(predicted, np.ndarray))
# Just simple test that all of them are syntaxed correctly
self.assertTrue(str(clf) != "")
self.assertTrue(repr(clf) != "")
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:20,代码来源:test_clf.py
示例15: test_more_svd
def test_more_svd(self):
pm = SVDMapper()
# train SVD
pm.train(self.largefeat)
# mixing matrix cannot be square
self.failUnlessEqual(pm.proj.shape, (40, 10))
# only first singular value significant
self.failUnless(pm.sv[:1] > 10)
self.failUnless((pm.sv[1:] < 10).all())
# now project data into SVD space
p = pm.forward(self.largefeat)
# only variance of first component significant
var = p.var(axis=0)
# test that only one component has variance
self.failUnless(var[:1] > 1.0)
self.failUnless((var[1:] < 0.0001).all())
# check that the mapped data can be fully recovered by 'reverse()'
rp = pm.reverse(p)
self.failUnlessEqual(rp.shape, self.largefeat.shape)
self.failUnless((np.round(rp) == self.largefeat).all())
# copy mapper
pm2 = deepcopy(pm)
# now make new random data and do forward->reverse check
data = np.random.normal(size=(98,40))
data_f = pm.forward(data)
self.failUnlessEqual(data_f.shape, (98,10))
data_r = pm.reverse(data_f)
self.failUnlessEqual(data_r.shape, (98,40))
开发者ID:psederberg,项目名称:PyMVPA,代码行数:38,代码来源:test_svdmapper.py
示例16: __call__
def __call__(self, datasets):
"""Estimate mappers for each dataset
Parameters
----------
datasets : list or tuple of datasets
Returns
-------
A list of trained Mappers of the same length as datasets
"""
params = self.params # for quicker access ;)
ca = self.ca
ndatasets = len(datasets)
nfeatures = [ds.nfeatures for ds in datasets]
residuals = None
if ca['residual_errors'].enabled:
residuals = np.zeros((2 + params.level2_niter, ndatasets))
ca.residual_errors = Dataset(
samples = residuals,
sa = {'levels' :
['1'] +
['2:%i' % i for i in xrange(params.level2_niter)] +
['3']})
if __debug__:
debug('HPAL', "Hyperalignment %s for %i datasets"
% (self, ndatasets))
if params.ref_ds is None:
ref_ds = np.argmax(nfeatures)
else:
ref_ds = params.ref_ds
if ref_ds < 0 and ref_ds >= ndatasets:
raise ValueError, "Requested reference dataset %i is out of " \
"bounds. We have only %i datasets provided" \
% (ref_ds, ndatasets)
ca.choosen_ref_ds = ref_ds
# might prefer some other way to initialize... later
mappers = [deepcopy(params.alignment) for ds in datasets]
# zscore all data sets
# ds = [ zscore(ds, chunks_attr=None) for ds in datasets]
# Level 1 (first)
# TODO since we are doing in-place zscoring create deep copies
# of the datasets with pruned targets and shallow copies of
# the collections (if they would come needed in the transformation)
# TODO: handle floats and non-floats differently to prevent
# waste of memory if there is no need (e.g. no z-scoring)
#otargets = [ds.sa.targets for ds in datasets]
datasets = [ds.copy(deep=False) for ds in datasets]
#datasets = [Dataset(ds.samples.astype(float), sa={'targets': [None] * len(ds)})
#datasets = [Dataset(ds.samples, sa={'targets': [None] * len(ds)})
# for ds in datasets]
if params.zscore_all:
if __debug__:
debug('HPAL', "Z-scoring all datasets")
# zscore them once while storing corresponding ZScoreMapper's
zmappers = []
for ids in xrange(len(datasets)):
zmapper = ZScoreMapper(chunks_attr=None)
zmappers.append(zmapper)
zmapper.train(datasets[ids])
datasets[ids] = zmapper.forward(datasets[ids])
commonspace = np.asanyarray(datasets[ref_ds])
if params.zscore_common and not params.zscore_all:
if __debug__:
debug('HPAL_',
"Creating copy of a commonspace and assuring "
"it is of a floating type")
commonspace = commonspace.astype(float)
zscore(commonspace, chunks_attr=None)
data_mapped = [np.asanyarray(ds) for ds in datasets]
#zscore(data_mapped[ref_ds],chunks_attr=None)
for i, (m, ds_new) in enumerate(zip(mappers, datasets)):
if __debug__:
debug('HPAL_', "Level 1: ds #%i" % i)
if i == ref_ds:
continue
#ds_new = ds.copy()
#zscore(ds_new, chunks_attr=None);
ds_new.targets = commonspace
m.train(ds_new)
ds_ = m.forward(np.asanyarray(ds_new))
if params.zscore_common:
zscore(ds_, chunks_attr=None)
data_mapped[i] = ds_
if residuals is not None:
residuals[0, i] = np.linalg.norm(ds_ - commonspace)
## if ds_mapped == []:
## ds_mapped = [zscore(m.forward(d), chunks_attr=None)]
## else:
## ds_mapped += [zscore(m.forward(d), chunks_attr=None)]
#.........这里部分代码省略.........
开发者ID:arnaudsj,项目名称:PyMVPA,代码行数:101,代码来源:hyperalignment.py
示例17: timesegments_classification
def timesegments_classification(
dss,
hyper=None,
part1=HalfPartitioner(),
part2=NFoldPartitioner(attr='subjects'),
window_size=6,
overlapping_windows=True,
distance='correlation',
do_zscore=True):
"""Time-segment classification across subjects using Hyperalignment
Parameters
----------
dss : list of datasets
Datasets to benchmark on. Usually a single dataset per subject.
hyper : Hyperalignment-like, optional
Beast which if called on a list of datasets should spit out trained
mappers. If not specified, `IdentityMapper`s will be used
part1 : Partitioner, optional
Partitioner to split data for hyperalignment "cross-validation"
part2 : Partitioner, optional
Partitioner for CV within the hyperalignment test split
window_size : int, optional
How many temporal points to consider for a classification sample
overlapping_windows : bool, optional
Strategy to how create and classify "samples" for classification. If
True -- `window_size` samples from each time point (but trailing ones)
constitute a sample, and upon "predict" `window_size` of samples around
each test point is not considered. If False -- samples are just taken
(with training and testing splits) at `window_size` step from one to
another.
do_zscore : bool, optional
Perform zscoring (overall, not per-chunk) for each dataset upon
partitioning with part1
...
"""
# Generate outer-most partitioning ()
parts = [copy.deepcopy(part1).generate(ds) for ds in dss]
iter = 1
errors = []
while True:
try:
dss_partitioned = [p.next() for p in parts]
except StopIteration:
# we are done -- no more partitions
break
if __debug__:
debug("BM", "Iteration %d", iter)
dss_train, dss_test = zip(*[list(Splitter("partitions").generate(ds))
for ds in dss_partitioned])
# TODO: allow for doing feature selection
if do_zscore:
for ds in dss_train + dss_test:
zscore(ds, chunks_attr=None)
if hyper is not None:
# since otherwise it would remember previous loop dataset as the "commonspace"
# Now let's do hyperalignment but on a copy in each loop iteration
hyper_ = copy.deepcopy(hyper)
mappers = hyper_(dss_train)
else:
mappers = [IdentityMapper() for ds in dss_train]
dss_test_aligned = [mapper.forward(ds) for mapper, ds in zip(mappers, dss_test)]
# assign .sa.subjects to those datasets
for i, ds in enumerate(dss_test_aligned):
# part2.attr is by default "subjects"
ds.sa[part2.attr] = [i]
dss_test_bc = []
for ds in dss_test_aligned:
if overlapping_windows:
startpoints = range(len(ds) - window_size + 1)
else:
startpoints = _get_nonoverlapping_startpoints(len(ds), window_size)
bm = BoxcarMapper(startpoints, window_size)
bm.train(ds)
ds_ = bm.forward(ds)
ds_.sa['startpoints'] = startpoints
# reassign subjects so they are not arrays
def assign_unique(ds, sa):
ds.sa[sa] = [np.asscalar(np.unique(x)) for x in ds.sa[sa].value]
assign_unique(ds_, part2.attr)
fm = FlattenMapper()
fm.train(ds_)
dss_test_bc.append(ds_.get_mapped(fm))
ds_test = vstack(dss_test_bc)
# Perform classification across subjects comparing against mean
# spatio-temporal pattern of other subjects
errors_across_subjects = []
for ds_test_part in part2.generate(ds_test):
ds_train_, ds_test_ = list(Splitter("partitions").generate(ds_test_part))
#.........这里部分代码省略.........
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:101,代码来源:hyperalignment.py
示例18: __call__
def __call__(self, datasets):
"""Estimate mappers for each dataset
Parameters
----------
datasets : list or tuple of datasets
Returns
-------
A list of trained Mappers of the same length as datasets
"""
params = self.params # for quicker access ;)
ca = self.ca
ndatasets = len(datasets)
nfeatures = [ds.nfeatures for ds in datasets]
residuals = None
if ca['residual_errors'].enabled:
residuals = np.zeros((2 + params.level2_niter, ndatasets))
ca.residual_errors = Dataset(
samples = residuals,
sa = {'levels' :
['1'] +
['2:%i' % i for i in xrange(params.level2_niter)] +
['3']})
if __debug__:
debug('HPAL', "Hyperalignment %s for %i datasets"
% (self, ndatasets))
if params.ref_ds is None:
ref_ds = np.argmax(nfeatures)
else:
ref_ds = params.ref_ds
if ref_ds < 0 and ref_ds >= ndatasets:
raise ValueError, "Requested reference dataset %i is out of " \
"bounds. We have only %i datasets provided" \
% (ref_ds, ndatasets)
ca.choosen_ref_ds = ref_ds
# might prefer some other way to initialize... later
mappers = [deepcopy(params.alignment) for ds in datasets]
# zscore all data sets
# ds = [ zscore(ds, chunks_attr=None) for ds in datasets]
# Level 1 (first)
commonspace = np.asanyarray(datasets[ref_ds])
if params.zscore_common:
zscore(commonspace, chunks_attr=None)
data_mapped = [np.asanyarray(ds) for ds in datasets]
for i, (m, data) in enumerate(zip(mappers, data_mapped)):
if __debug__:
debug('HPAL_', "Level 1: ds #%i" % i)
if i == ref_ds:
continue
#ZSC zscore(data, chunks_attr=None)
ds = dataset_wizard(samples=data, targets=commonspace)
#ZSC zscore(ds, chunks_attr=None)
m.train(ds)
data_temp = m.forward(data)
#ZSC zscore(data_temp, chunks_attr=None)
data_mapped[i] = data_temp
if residuals is not None:
residuals[0, i] = np.linalg.norm(data_temp - commonspace)
## if ds_mapped == []:
## ds_mapped = [zscore(m.forward(d), chunks_attr=None)]
## else:
## ds_mapped += [zscore(m.forward(d), chunks_attr=None)]
# zscore before adding
# TODO: make just a function so we dont' waste space
commonspace = params.combiner1(data_mapped[i], commonspace)
if params.zscore_common:
zscore(commonspace, chunks_attr=None)
# update commonspace to mean of ds_mapped
commonspace = params.combiner2(data_mapped)
if params.zscore_common:
zscore(commonspace, chunks_attr=None)
# Level 2 -- might iterate multiple times
for loop in xrange(params.level2_niter):
for i, (m, ds) in enumerate(zip(mappers, datasets)):
if __debug__:
debug('HPAL_', "Level 2 (%i-th iteration): ds #%i" % (loop, i))
## ds_temp = zscore( (commonspace*ndatasets - ds_mapped[i])
## /(ndatasets-1), chunks_attr=None )
ds_new = ds.copy()
#ZSC zscore(ds_new, chunks_attr=None)
#PRJ ds_temp = (commonspace*ndatasets - ds_mapped[i])/(ndatasets-1)
#ZSC zscore(ds_temp, chunks_attr=None)
ds_new.targets = commonspace #PRJ ds_temp
m.train(ds_new) # ds_temp)
data_mapped[i] = m.forward(np.asanyarray(ds))
if residuals is not None:
residuals[1+loop, i] = np.linalg.norm(data_mapped - commonspace)
#ds_mapped[i] = zscore( m.forward(ds_temp), chunks_attr=None)
#.........这里部分代码省略.........
开发者ID:psederberg,项目名称:PyMVPA,代码行数:101,代码来源:hyperalignment.py
示例19: _get_transformer
def _get_transformer(self):
if self._transformer is None:
self._transformer = deepcopy(self._pristine_transformer)
return self._transformer
开发者ID:Guenx,项目名称:PyMVPA,代码行数:4,代码来源:skl_adaptor.py
示例20: test_multivariate
def test_multivariate(self):
mv_perf = []
mv_lin_perf = []
uv_perf = []
l_clf = clfswh['linear', 'svm'][0]
nl_clf = clfswh['non-linear', 'svm'][0]
#orig_keys = nl_clf.param._params.keys()
#nl_param_orig = nl_clf.param._params.copy()
# l_clf = LinearNuSVMC()
# XXX ??? not sure what below meant and it is obsolete if
# using SG... commenting out for now
# for some reason order is not preserved thus dictionaries are not
# the same any longer -- lets compare values
#self.assertEqual([nl_clf.param._params[k] for k in orig_keys],
# [nl_param_orig[k] for k in orig_keys],
# msg="New instance mustn't override values in previously created")
## and keys separately
#self.assertEqual(set(nl_clf.param._params.keys()),
# set(orig_keys),
# msg="New instance doesn't change set of parameters in original")
# We must be able to deepcopy not yet trained SVMs now
import mvpa2.support.copy as copy
try:
nl_clf.untrain()
nl_clf_copy = copy.deepcopy(nl_clf)
except:
self.fail(msg="Failed to deepcopy not-yet trained SVM %s" % nl_clf)
for i in xrange(20):
train = pure_multivariate_signal( 20, 3 )
test = pure_multivariate_signal( 20, 3 )
# use non-linear CLF on 2d data
nl_clf.train(train)
p_mv = nl_clf.predict(test.samples)
mv_perf.append(np.mean(p_mv==test.targets))
# use linear CLF on 2d data
l_clf.train(train)
p_lin_mv = l_clf.predict(test.samples)
mv_lin_perf.append(np.mean(p_lin_mv==test.targets))
# use non-linear CLF on 1d data
nl_clf.train(train[:, 0])
p_uv = nl_clf.predict(test[:, 0].samples)
uv_perf.append(np.mean(p_uv==test.targets))
mean_mv_perf = np.mean(mv_perf)
mean_mv_lin_perf = np.mean(mv_lin_perf)
mean_uv_perf = np.mean(uv_perf)
# non-linear CLF has to be close to perfect
self.assertTrue( mean_mv_perf > 0.9 )
# linear CLF cannot learn this problem!
self.assertTrue( mean_mv_perf > mean_mv_lin_perf )
# univariate has insufficient information
self.assertTrue( mean_uv_perf < mean_mv_perf )
开发者ID:Arthurkorn,项目名称:PyMVPA,代码行数:62,代码来源:test_svm.py
注:本文中的mvpa2.support.copy.deepcopy函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论