本文整理汇总了Python中mvpa2.testing.tools.assert_array_equal函数的典型用法代码示例。如果您正苦于以下问题:Python assert_array_equal函数的具体用法?Python assert_array_equal怎么用?Python assert_array_equal使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了assert_array_equal函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_sifter_with_balancing
def test_sifter_with_balancing():
# extended previous test which was already
# "... somewhat duplicating the doctest"
ds = Dataset(samples=np.arange(12).reshape((-1, 2)),
sa={'chunks': [ 0 , 1 , 2 , 3 , 4, 5 ],
'targets': ['c', 'c', 'c', 'p', 'p', 'p']})
# Without sifter -- just to assure that we do get all of them
# i.e. 6*5*4*3/(4!) = 15
par = ChainNode([NFoldPartitioner(cvtype=4, attr='chunks')])
assert_equal(len(list(par.generate(ds))), 15)
# so we will take 4 chunks out of available 7, but would care only
# about those partitions where we have balanced number of 'c' and 'p'
# entries
assert_raises(ValueError,
lambda x: list(Sifter([('targets', dict(wrong=1))]).generate(x)),
ds)
par = ChainNode([NFoldPartitioner(cvtype=4, attr='chunks'),
Sifter([('partitions', 2),
('targets',
dict(uvalues=['c', 'p'],
balanced=True))])
])
dss = list(par.generate(ds))
# print [ x[x.sa.partitions==2].sa.targets for x in dss ]
assert_equal(len(dss), 9)
for ds_ in dss:
testing = ds[ds_.sa.partitions == 2]
assert_array_equal(np.unique(testing.sa.targets), ['c', 'p'])
# and we still have both targets present in training
training = ds[ds_.sa.partitions == 1]
assert_array_equal(np.unique(training.sa.targets), ['c', 'p'])
开发者ID:Soletmons,项目名称:PyMVPA,代码行数:34,代码来源:test_generators.py
示例2: test_sphere_scaled
def test_sphere_scaled():
s1 = ne.Sphere(3)
s = ne.Sphere(3, element_sizes=(1, 1))
# Should give exactly the same results since element_sizes are 1s
for p in ((0, 0), (-23, 1)):
assert_array_equal(s1(p), s(p))
ok_(len(s(p)) == len(set(s(p))))
# Raise exception if query dimensionality does not match element_sizes
assert_raises(ValueError, s, (1,))
s = ne.Sphere(3, element_sizes=(1.5, 2))
assert_array_equal(s((0, 0)),
[(-2, 0), (-1, -1), (-1, 0), (-1, 1),
(0, -1), (0, 0), (0, 1),
(1, -1), (1, 0), (1, 1), (2, 0)])
s = ne.Sphere(1.5, element_sizes=(1.5, 1.5, 1.5))
res = s((0, 0, 0))
ok_(np.all([np.sqrt(np.sum(np.array(x)**2)) <= 1.5 for x in res]))
ok_(len(res) == 7)
# all neighbors so no more than 1 voxel away -- just a cube, for
# some "sphere" effect radius had to be 3.0 ;)
td = np.sqrt(3*1.5**2)
s = ne.Sphere(td, element_sizes=(1.5, 1.5, 1.5))
res = s((0, 0, 0))
ok_(np.all([np.sqrt(np.sum(np.array(x)**2)) <= td for x in res]))
ok_(np.all([np.sum(np.abs(x) > 1) == 0 for x in res]))
ok_(len(res) == 27)
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:31,代码来源:test_neighborhood.py
示例3: test_identity
def test_identity():
# IdentityNeighborhood() behaves like Sphere(0.5) without all of the
# computation. Test on a few different coordinates.
neighborhood = ne.IdentityNeighborhood()
sphere = ne.Sphere(0.5)
for center in ((0, 0, 0), (1, 1, 1), (0, 0), (0, )):
assert_array_equal(neighborhood(center), sphere(center))
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:7,代码来源:test_neighborhood.py
示例4: test_mapper_vs_zscore
def test_mapper_vs_zscore():
"""Test by comparing to results of elderly z-score function
"""
# data: 40 sample feature line in 20d space (40x20; samples x features)
dss = [
dataset_wizard(np.concatenate(
[np.arange(40) for i in range(20)]).reshape(20,-1).T,
targets=1, chunks=1),
] + datasets.values()
for ds in dss:
ds1 = deepcopy(ds)
ds2 = deepcopy(ds)
zsm = ZScoreMapper(chunks_attr=None)
assert_raises(RuntimeError, zsm.forward, ds1.samples)
idhashes = (idhash(ds1), idhash(ds1.samples))
zsm.train(ds1)
idhashes_train = (idhash(ds1), idhash(ds1.samples))
assert_equal(idhashes, idhashes_train)
# forward dataset
ds1z_ds = zsm.forward(ds1)
idhashes_forwardds = (idhash(ds1), idhash(ds1.samples))
# must not modify samples in place!
assert_equal(idhashes, idhashes_forwardds)
# forward samples explicitly
ds1z = zsm.forward(ds1.samples)
idhashes_forward = (idhash(ds1), idhash(ds1.samples))
assert_equal(idhashes, idhashes_forward)
zscore(ds2, chunks_attr=None)
assert_array_almost_equal(ds1z, ds2.samples)
assert_array_equal(ds1.samples, ds.samples)
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:35,代码来源:test_zscoremapper.py
示例5: test_corrstability_smoketest
def test_corrstability_smoketest(ds):
if not 'chunks' in ds.sa:
return
if len(ds.sa['targets'].unique) > 30:
# was regression dataset
return
# very basic testing since
cs = CorrStability()
#ds = datasets['uni2small']
out = cs(ds)
assert_equal(out.shape, (ds.nfeatures,))
ok_(np.all(out >= -1.001)) # it should be a correlation after all
ok_(np.all(out <= 1.001))
# and theoretically those nonbogus features should have higher values
if 'nonbogus_targets' in ds.fa:
bogus_features = np.array([x==None for x in ds.fa.nonbogus_targets])
assert_array_less(np.mean(out[bogus_features]), np.mean(out[~bogus_features]))
# and if we move targets to alternative location
ds = ds.copy(deep=True)
ds.sa['alt'] = ds.T
ds.sa.pop('targets')
assert_raises(KeyError, cs, ds)
cs = CorrStability('alt')
out_ = cs(ds)
assert_array_equal(out, out_)
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:26,代码来源:test_corrstability.py
示例6: test_basic
def test_basic(self):
dataset = data_generators.linear1d_gaussian_noise()
k = GeneralizedLinearKernel()
clf = GPR(k)
clf.train(dataset)
y = clf.predict(dataset.samples)
assert_array_equal(y.shape, dataset.targets.shape)
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:7,代码来源:test_gpr.py
示例7: test_cv_no_generator_custom_splitter
def test_cv_no_generator_custom_splitter(self):
ds = Dataset(np.arange(4), sa={'category': ['to', 'to', 'from', 'from'],
'targets': ['a', 'b', 'c', 'd']})
class Measure(Classifier):
def _train(self, ds_):
assert_array_equal(ds_.samples, ds.samples[2:])
assert_array_equal(ds_.sa.category, ['from'] * len(ds_))
def _predict(self, ds_):
assert(ds_ is not ds) # we pass a shallow copy
# could be called to predit training or testing data
if np.all(ds_.sa.targets != ['c', 'd']):
assert_array_equal(ds_.samples, ds.samples[:2])
assert_array_equal(ds_.sa.category, ['to'] * len(ds_))
else:
assert_array_equal(ds_.sa.category, ['from'] * len(ds_))
return ['c', 'd']
measure = Measure()
cv = CrossValidation(measure, splitter=Splitter('category', ['from', 'to']))
res = cv(ds)
assert_array_equal(res, [[1]]) # failed perfectly ;-)
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:25,代码来源:test_clfcrossval.py
示例8: test_chained_crossvalidation_searchlight
def test_chained_crossvalidation_searchlight():
from mvpa2.clfs.gnb import GNB
from mvpa2.clfs.meta import MappedClassifier
from mvpa2.generators.partition import NFoldPartitioner
from mvpa2.mappers.base import ChainMapper
from mvpa2.mappers.base import Mapper
from mvpa2.measures.base import CrossValidation
from mvpa2.measures.searchlight import sphere_searchlight
from mvpa2.testing.datasets import datasets
dataset = datasets['3dlarge'].copy()
dataset.fa['voxel_indices'] = dataset.fa.myspace
sample_clf = GNB() # fast and deterministic
class ZScoreFeaturesMapper(Mapper):
"""Very basic mapper which would take care about standardizing
all features within each sample separately
"""
def _forward_data(self, data):
return (data - np.mean(data, axis=1)[:, None])/np.std(data, axis=1)[:, None]
# only do partial to save time
sl_kwargs = dict(radius=2, center_ids=[3, 50])
clf_mapped = MappedClassifier(sample_clf, ZScoreFeaturesMapper())
cv = CrossValidation(clf_mapped, NFoldPartitioner())
sl = sphere_searchlight(cv, **sl_kwargs)
results_mapped = sl(dataset)
cv_chained = ChainMapper([ZScoreFeaturesMapper(auto_train=True),
CrossValidation(sample_clf, NFoldPartitioner())])
sl_chained = sphere_searchlight(cv_chained, **sl_kwargs)
results_chained = sl_chained(dataset)
assert_array_equal(results_mapped, results_chained)
开发者ID:beausievers,项目名称:PyMVPA,代码行数:34,代码来源:test_usecases.py
示例9: test_remove_invariant_as_a_mapper
def test_remove_invariant_as_a_mapper():
from mvpa2.featsel.helpers import RangeElementSelector
from mvpa2.featsel.base import StaticFeatureSelection, SensitivityBasedFeatureSelection
from mvpa2.testing.datasets import datasets
from mvpa2.datasets.miscfx import remove_invariant_features
mapper = SensitivityBasedFeatureSelection(
lambda x: np.std(x, axis=0),
RangeElementSelector(lower=0, inclusive=False),
train_analyzer=False,
auto_train=True)
ds = datasets['uni2large'].copy()
ds.a['mapper'] = StaticFeatureSelection(np.arange(ds.nfeatures))
ds.fa['index'] = np.arange(ds.nfeatures)
ds.samples[:, [1, 8]] = 10
ds_out = mapper(ds)
# Validate that we are getting the same results as remove_invariant_features
ds_rifs = remove_invariant_features(ds)
assert_array_equal(ds_out.samples, ds_rifs.samples)
assert_array_equal(ds_out.fa.index, ds_rifs.fa.index)
assert_equal(ds_out.fa.index[1], 2)
assert_equal(ds_out.fa.index[8], 10)
开发者ID:beausievers,项目名称:PyMVPA,代码行数:27,代码来源:test_usecases.py
示例10: test_aggregation
def test_aggregation(self):
data = dataset_wizard(np.arange( 20 ).reshape((4, 5)), targets=1, chunks=1)
ag_data = aggregate_features(data, np.mean)
ok_(ag_data.nsamples == 4)
ok_(ag_data.nfeatures == 1)
assert_array_equal(ag_data.samples[:, 0], [2, 7, 12, 17])
开发者ID:armaneshaghi,项目名称:PyMVPA,代码行数:8,代码来源:test_datasetfx.py
示例11: _assert_ds_mat_attributes_equal
def _assert_ds_mat_attributes_equal(ds, m, attr_keys=('a', 'sa', 'fa')):
# ds is a Dataset object, m a matlab-like dictionary
for attr_k in attr_keys:
attr_v = getattr(ds, attr_k)
for k in attr_v.keys():
v = attr_v[k].value
assert_array_equal(m[attr_k][k][0, 0].ravel(), v)
开发者ID:StevenLOL,项目名称:PyMVPA,代码行数:8,代码来源:test_cosmo.py
示例12: test_basic
def test_basic(self):
skip_if_no_external('scipy') # needed by GPR code
dataset = data_generators.linear1d_gaussian_noise()
k = GeneralizedLinearKernel()
clf = GPR(k)
clf.train(dataset)
y = clf.predict(dataset.samples)
assert_array_equal(y.shape, dataset.targets.shape)
开发者ID:Arthurkorn,项目名称:PyMVPA,代码行数:8,代码来源:test_gpr.py
示例13: test_partitionmapper
def test_partitionmapper():
ds = give_data()
oep = OddEvenPartitioner()
parts = list(oep.generate(ds))
assert_equal(len(parts), 2)
for i, p in enumerate(parts):
assert_array_equal(p.sa['partitions'].unique, [1, 2])
assert_equal(p.a.partitions_set, i)
assert_equal(len(p), len(ds))
开发者ID:Soletmons,项目名称:PyMVPA,代码行数:9,代码来源:test_generators.py
示例14: test_sphere
def test_sphere():
# test sphere initialization
s = ne.Sphere(1)
center0 = (0, 0, 0)
center1 = (1, 1, 1)
assert_equal(len(s(center0)), 7)
target = array([array([-1, 0, 0]),
array([ 0, -1, 0]),
array([ 0, 0, -1]),
array([0, 0, 0]),
array([0, 0, 1]),
array([0, 1, 0]),
array([1, 0, 0])])
# test of internals -- no recomputation of increments should be done
prev_increments = s._increments
assert_array_equal(s(center0), target)
ok_(prev_increments is s._increments)
# query lower dimensionality
_ = s((0, 0))
ok_(not prev_increments is s._increments)
# test Sphere call
target = [array([0, 1, 1]),
array([1, 0, 1]),
array([1, 1, 0]),
array([1, 1, 1]),
array([1, 1, 2]),
array([1, 2, 1]),
array([2, 1, 1])]
res = s(center1)
assert_array_equal(array(res), target)
# They all should be tuples
ok_(np.all([isinstance(x, tuple) for x in res]))
# test for larger diameter
s = ne.Sphere(4)
assert_equal(len(s(center1)), 257)
# test extent keyword
#s = ne.Sphere(4,extent=(1,1,1))
#assert_array_equal(array(s((0,0,0))), array([[0,0,0]]))
# test Errors during initialisation and call
#assert_raises(ValueError, ne.Sphere, 2)
#assert_raises(ValueError, ne.Sphere, 1.0)
# no longer extent available
assert_raises(TypeError, ne.Sphere, 1, extent=(1))
assert_raises(TypeError, ne.Sphere, 1, extent=(1.0, 1.0, 1.0))
s = ne.Sphere(1)
#assert_raises(ValueError, s, (1))
if __debug__:
# No float coordinates allowed for now...
# XXX might like to change that ;)
#
assert_raises(ValueError, s, (1.0, 1.0, 1.0))
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:57,代码来源:test_neighborhood.py
示例15: _assert_ds_less_or_equal
def _assert_ds_less_or_equal(x, y):
# x and y are a Dataset; x should contain a subset of
# elements in .sa, fa, a and have the same samples as y
# Note: no support for fancy objects such as mappers
assert_array_equal(x.samples, y.samples)
for label in ('a', 'fa', 'sa'):
vx = getattr(x, label)
vy = getattr(y, label)
_assert_array_collectable_less_or_equal(vx, vy)
开发者ID:StevenLOL,项目名称:PyMVPA,代码行数:9,代码来源:test_cosmo.py
示例16: test_size_random_prototypes
def test_size_random_prototypes(self):
self.build_vector_based_pm()
fraction = 0.5
prototype_number = max(int(len(self.samples)*fraction),1)
## debug("MAP","Generating "+str(prototype_number)+" random prototypes.")
self.prototypes2 = np.array(random.sample(list(self.samples), prototype_number))
self.pm2 = PrototypeMapper(similarities=self.similarities, prototypes=self.prototypes2)
self.pm2.train(self.samples)
assert_array_equal(self.pm2.proj.shape, (self.samples.shape[0], self.pm2.prototypes.shape[0]*len(self.similarities)))
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:9,代码来源:test_prototypemapper.py
示例17: test_nonfinite_features_removal
def test_nonfinite_features_removal(self):
r = np.random.normal(size=(4, 5))
ds = dataset_wizard(r, targets=1, chunks=1)
ds.samples[2,0]=np.NaN
ds.samples[3,3]=np.Inf
dsc = remove_nonfinite_features(ds)
self.assertTrue(dsc.nfeatures == 3)
assert_array_equal(ds[:, [1, 2, 4]].samples, dsc.samples)
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:10,代码来源:test_datasetfx.py
示例18: test_balancer
def test_balancer():
ds = give_data()
# only mark the selection in an attribute
bal = Balancer()
res = bal(ds)
# we get a new dataset, with shared samples
assert_false(ds is res)
assert_true(ds.samples is res.samples.base)
# should kick out 2 samples in each chunk of 10
assert_almost_equal(np.mean(res.sa.balanced_set), 0.8)
# same as above, but actually apply the selection
bal = Balancer(apply_selection=True, count=5)
# just run it once
res = bal(ds)
# we get a new dataset, with shared samples
assert_false(ds is res)
# should kick out 2 samples in each chunk of 10
assert_equal(len(res), int(0.8 * len(ds)))
# now use it as a generator
dses = list(bal.generate(ds))
assert_equal(len(dses), 5)
# with limit
bal = Balancer(limit={'chunks': 3}, apply_selection=True)
res = bal(ds)
assert_equal(res.sa['chunks'].unique, (3,))
assert_equal(get_nelements_per_value(res.sa.targets).values(),
[2] * 4)
# same but include all offlimit samples
bal = Balancer(limit={'chunks': 3}, include_offlimit=True,
apply_selection=True)
res = bal(ds)
assert_array_equal(res.sa['chunks'].unique, range(10))
# chunk three still balanced, but the rest is not, i.e. all samples included
assert_equal(get_nelements_per_value(res[res.sa.chunks == 3].sa.targets).values(),
[2] * 4)
assert_equal(get_nelements_per_value(res.sa.chunks).values(),
[10, 10, 10, 8, 10, 10, 10, 10, 10, 10])
# fixed amount
bal = Balancer(amount=1, limit={'chunks': 3}, apply_selection=True)
res = bal(ds)
assert_equal(get_nelements_per_value(res.sa.targets).values(),
[1] * 4)
# fraction
bal = Balancer(amount=0.499, limit=None, apply_selection=True)
res = bal(ds)
assert_array_equal(
np.round(np.array(get_nelements_per_value(ds.sa.targets).values()) * 0.5),
np.array(get_nelements_per_value(res.sa.targets).values()))
# check on feature attribute
ds.fa['one'] = np.tile([1,2], 5)
ds.fa['chk'] = np.repeat([1,2], 5)
bal = Balancer(attr='one', amount=2, limit='chk', apply_selection=True)
res = bal(ds)
assert_equal(get_nelements_per_value(res.fa.one).values(),
[4] * 2)
开发者ID:Soletmons,项目名称:PyMVPA,代码行数:55,代码来源:test_generators.py
示例19: test_attrpermute
def test_attrpermute():
ds = give_data()
ds.sa['ids'] = range(len(ds))
pristine_data = ds.samples.copy()
permutation = AttributePermutator(['targets', 'ids'], assure=True)
pds = permutation(ds)
# should not touch the data
assert_array_equal(pristine_data, pds.samples)
# even keep the very same array
assert_true(pds.samples.base is ds.samples)
# there is no way that it can be the same attribute
assert_false(np.all(pds.sa.ids == ds.sa.ids))
# ids should reflect permutation setup
assert_array_equal(pds.sa.targets, ds.sa.targets[pds.sa.ids])
# other attribute should remain intact
assert_array_equal(pds.sa.chunks, ds.sa.chunks)
# now chunk-wise permutation
permutation = AttributePermutator('ids', limit='chunks')
pds = permutation(ds)
# first ten should remain first ten
assert_false(np.any(pds.sa.ids[:10] > 9))
# same thing, but only permute single chunk
permutation = AttributePermutator('ids', limit={'chunks': 3})
pds = permutation(ds)
# one chunk should change
assert_false(np.any(pds.sa.ids[30:40] > 39))
assert_false(np.any(pds.sa.ids[30:40] < 30))
# the rest not
assert_array_equal(pds.sa.ids[:30], range(30))
# or a list of chunks
permutation = AttributePermutator('ids', limit={'chunks': [3,4]})
pds = permutation(ds)
# two chunks should change
assert_false(np.any(pds.sa.ids[30:50] > 49))
assert_false(np.any(pds.sa.ids[30:50] < 30))
# the rest not
assert_array_equal(pds.sa.ids[:30], range(30))
# and now try generating more permutations
nruns = 2
permutation = AttributePermutator(['targets', 'ids'], assure=True, count=nruns)
pds = list(permutation.generate(ds))
assert_equal(len(pds), nruns)
for p in pds:
assert_false(np.all(p.sa.ids == ds.sa.ids))
# permute feature attrs
ds.fa['ids'] = range(ds.shape[1])
permutation = AttributePermutator('fa.ids', assure=True)
pds = permutation(ds)
assert_false(np.all(pds.fa.ids == ds.fa.ids))
开发者ID:arnaudsj,项目名称:PyMVPA,代码行数:54,代码来源:test_generators.py
示例20: test_streamline_equal_mapper
def test_streamline_equal_mapper(self):
self.build_streamline_things()
self.prototypes_equal = self.dataset.samples
self.pm = PrototypeMapper(similarities=self.similarities,
prototypes=self.prototypes_equal,
demean=False)
self.pm.train(self.dataset.samples)
## debug("MAP","projected data: "+str(self.pm.proj))
# check size:
assert_array_equal(self.pm.proj.shape, (len(self.dataset.samples), len(self.prototypes_equal)*len(self.similarities)))
# test symmetry
assert_array_almost_equal(self.pm.proj, self.pm.proj.T)
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:13,代码来源:test_prototypemapper.py
注:本文中的mvpa2.testing.tools.assert_array_equal函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论