本文整理汇总了Python中mvpa.testing.tools.assert_array_equal函数的典型用法代码示例。如果您正苦于以下问题:Python assert_array_equal函数的具体用法?Python assert_array_equal怎么用?Python assert_array_equal使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了assert_array_equal函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_basic
def test_basic(self):
dataset = data_generators.linear1d_gaussian_noise()
k = GeneralizedLinearKernel()
clf = GPR(k)
clf.train(dataset)
y = clf.predict(dataset.samples)
assert_array_equal(y.shape, dataset.targets.shape)
开发者ID:B-Rich,项目名称:PyMVPA,代码行数:7,代码来源:test_gpr.py
示例2: test_simple_n_minus_one_cv
def test_simple_n_minus_one_cv(self):
data = get_mv_pattern(3)
data.init_origids('samples')
self.failUnless( data.nsamples == 120 )
self.failUnless( data.nfeatures == 2 )
self.failUnless(
(data.sa.targets == \
[0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0] * 6).all())
self.failUnless(
(data.sa.chunks == \
[k for k in range(1, 7) for i in range(20)]).all())
assert_equal(len(np.unique(data.sa.origids)), data.nsamples)
transerror = TransferError(sample_clf_nl)
cv = CrossValidatedTransferError(
transerror,
NFoldSplitter(cvtype=1),
enable_ca=['confusion', 'training_confusion',
'samples_error'])
results = cv(data)
self.failUnless((results.samples < 0.2).all() and (results.samples >= 0.0).all())
# TODO: test accessibility of {training_,}confusion{,s} of
# CrossValidatedTransferError
self.failUnless(isinstance(cv.ca.samples_error, dict))
self.failUnless(len(cv.ca.samples_error) == data.nsamples)
# one value for each origid
assert_array_equal(sorted(cv.ca.samples_error.keys()),
sorted(data.sa.origids))
for k, v in cv.ca.samples_error.iteritems():
self.failUnless(len(v) == 1)
开发者ID:geeragh,项目名称:PyMVPA,代码行数:34,代码来源:test_clfcrossval.py
示例3: test_sphere_scaled
def test_sphere_scaled():
s1 = ne.Sphere(3)
s = ne.Sphere(3, element_sizes=(1, 1))
# Should give exactly the same results since element_sizes are 1s
for p in ((0, 0), (-23, 1)):
assert_array_equal(s1(p), s(p))
ok_(len(s(p)) == len(set(s(p))))
# Raise exception if query dimensionality does not match element_sizes
assert_raises(ValueError, s, (1,))
s = ne.Sphere(3, element_sizes=(1.5, 2))
assert_array_equal(s((0, 0)),
[(-2, 0), (-1, -1), (-1, 0), (-1, 1),
(0, -1), (0, 0), (0, 1),
(1, -1), (1, 0), (1, 1), (2, 0)])
s = ne.Sphere(1.5, element_sizes=(1.5, 1.5, 1.5))
res = s((0, 0, 0))
ok_(np.all([np.sqrt(np.sum(np.array(x)**2)) <= 1.5 for x in res]))
ok_(len(res) == 7)
# all neighbors so no more than 1 voxel away -- just a cube, for
# some "sphere" effect radius had to be 3.0 ;)
td = np.sqrt(3*1.5**2)
s = ne.Sphere(td, element_sizes=(1.5, 1.5, 1.5))
res = s((0, 0, 0))
ok_(np.all([np.sqrt(np.sum(np.array(x)**2)) <= td for x in res]))
ok_(np.all([np.sum(np.abs(x) > 1) == 0 for x in res]))
ok_(len(res) == 27)
开发者ID:B-Rich,项目名称:PyMVPA,代码行数:31,代码来源:test_neighborhood.py
示例4: test_aggregation
def test_aggregation(self):
data = dataset_wizard(np.arange( 20 ).reshape((4, 5)), targets=1, chunks=1)
ag_data = aggregate_features(data, np.mean)
ok_(ag_data.nsamples == 4)
ok_(ag_data.nfeatures == 1)
assert_array_equal(ag_data.samples[:, 0], [2, 7, 12, 17])
开发者ID:B-Rich,项目名称:PyMVPA,代码行数:8,代码来源:test_datasetfx.py
示例5: test_size_random_prototypes
def test_size_random_prototypes(self):
self.build_vector_based_pm()
fraction = 0.5
prototype_number = max(int(len(self.samples)*fraction),1)
## debug("MAP","Generating "+str(prototype_number)+" random prototypes.")
self.prototypes2 = np.array(random.sample(self.samples, prototype_number))
self.pm2 = PrototypeMapper(similarities=self.similarities, prototypes=self.prototypes2)
self.pm2.train(self.samples)
assert_array_equal(self.pm2.proj.shape, (self.samples.shape[0], self.pm2.prototypes.shape[0]*len(self.similarities)))
开发者ID:esc,项目名称:PyMVPA,代码行数:9,代码来源:test_prototypemapper.py
示例6: test_sphere
def test_sphere():
# test sphere initialization
s = ne.Sphere(1)
center0 = (0, 0, 0)
center1 = (1, 1, 1)
assert_equal(len(s(center0)), 7)
target = array([array([-1, 0, 0]),
array([ 0, -1, 0]),
array([ 0, 0, -1]),
array([0, 0, 0]),
array([0, 0, 1]),
array([0, 1, 0]),
array([1, 0, 0])])
# test of internals -- no recomputation of increments should be done
prev_increments = s._increments
assert_array_equal(s(center0), target)
ok_(prev_increments is s._increments)
# query lower dimensionality
_ = s((0, 0))
ok_(not prev_increments is s._increments)
# test Sphere call
target = [array([0, 1, 1]),
array([1, 0, 1]),
array([1, 1, 0]),
array([1, 1, 1]),
array([1, 1, 2]),
array([1, 2, 1]),
array([2, 1, 1])]
res = s(center1)
assert_array_equal(array(res), target)
# They all should be tuples
ok_(np.all([isinstance(x, tuple) for x in res]))
# test for larger diameter
s = ne.Sphere(4)
assert_equal(len(s(center1)), 257)
# test extent keyword
#s = ne.Sphere(4,extent=(1,1,1))
#assert_array_equal(array(s((0,0,0))), array([[0,0,0]]))
# test Errors during initialisation and call
#assert_raises(ValueError, ne.Sphere, 2)
#assert_raises(ValueError, ne.Sphere, 1.0)
# no longer extent available
assert_raises(TypeError, ne.Sphere, 1, extent=(1))
assert_raises(TypeError, ne.Sphere, 1, extent=(1.0, 1.0, 1.0))
s = ne.Sphere(1)
#assert_raises(ValueError, s, (1))
if __debug__:
# No float coordinates allowed for now...
# XXX might like to change that ;)
#
assert_raises(ValueError, s, (1.0, 1.0, 1.0))
开发者ID:B-Rich,项目名称:PyMVPA,代码行数:57,代码来源:test_neighborhood.py
示例7: test_partitionmapper
def test_partitionmapper():
ds = give_data()
oep = OddEvenPartitioner()
parts = list(oep.generate(ds))
assert_equal(len(parts), 2)
for i, p in enumerate(parts):
assert_array_equal(p.sa['partitions'].unique, [1, 2])
assert_equal(p.a.partitions_set, i)
assert_equal(len(p), len(ds))
开发者ID:esc,项目名称:PyMVPA,代码行数:9,代码来源:test_generators.py
示例8: test_chainmapper
def test_chainmapper():
# the chain needs at lest one mapper
assert_raises(ValueError, ChainMapper, [])
# a typical first mapper is to flatten
cm = ChainMapper([FlattenMapper()])
# few container checks
assert_equal(len(cm), 1)
assert_true(isinstance(cm[0], FlattenMapper))
# now training
# come up with data
samples_shape = (2, 2, 4)
data_shape = (4,) + samples_shape
data = np.arange(np.prod(data_shape)).reshape(data_shape)
pristinedata = data.copy()
target = [
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
[16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31],
[32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47],
[48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63],
]
target = np.array(target)
# if it is not trained it knows nothing
cm.train(data)
# a new mapper should appear when doing feature selection
cm.append(FeatureSliceMapper(range(1, 16)))
assert_equal(cm.forward1(data[0]).shape, (15,))
assert_equal(len(cm), 2)
# multiple slicing
cm.append(FeatureSliceMapper([9, 14]))
assert_equal(cm.forward1(data[0]).shape, (2,))
assert_equal(len(cm), 3)
# check reproduction
cm_clone = eval(repr(cm))
assert_equal(repr(cm_clone), repr(cm))
# what happens if we retrain the whole beast an same data as before
cm.train(data)
assert_equal(cm.forward1(data[0]).shape, (2,))
assert_equal(len(cm), 3)
# let's map something
mdata = cm.forward(data)
assert_array_equal(mdata, target[:, [10, 15]])
# and back
rdata = cm.reverse(mdata)
# original shape
assert_equal(rdata.shape, data.shape)
# content as far it could be restored
assert_array_equal(rdata[rdata > 0], data[rdata > 0])
assert_equal(np.sum(rdata > 0), 8)
开发者ID:arokem,项目名称:PyMVPA,代码行数:55,代码来源:test_mapper.py
示例9: test_attrpermute
def test_attrpermute():
ds = give_data()
ds.sa['ids'] = range(len(ds))
pristine_data = ds.samples.copy()
permutation = AttributePermutator(['targets', 'ids'], assure=True)
pds = permutation(ds)
# should not touch the data
assert_array_equal(pristine_data, pds.samples)
# even keep the very same array
assert_true(pds.samples.base is ds.samples)
# there is no way that it can be the same attribute
assert_false(np.all(pds.sa.ids == ds.sa.ids))
# ids should reflect permutation setup
assert_array_equal(pds.sa.targets, ds.sa.targets[pds.sa.ids])
# other attribute should remain intact
assert_array_equal(pds.sa.chunks, ds.sa.chunks)
# now chunk-wise permutation
permutation = AttributePermutator('ids', limit='chunks')
pds = permutation(ds)
# first ten should remain first ten
assert_false(np.any(pds.sa.ids[:10] > 9))
# same thing, but only permute single chunk
permutation = AttributePermutator('ids', limit={'chunks': 3})
pds = permutation(ds)
# one chunk should change
assert_false(np.any(pds.sa.ids[30:40] > 39))
assert_false(np.any(pds.sa.ids[30:40] < 30))
# the rest not
assert_array_equal(pds.sa.ids[:30], range(30))
# or a list of chunks
permutation = AttributePermutator('ids', limit={'chunks': [3,4]})
pds = permutation(ds)
# two chunks should change
assert_false(np.any(pds.sa.ids[30:50] > 49))
assert_false(np.any(pds.sa.ids[30:50] < 30))
# the rest not
assert_array_equal(pds.sa.ids[:30], range(30))
# and now try generating more permutations
nruns = 2
permutation = AttributePermutator(['targets', 'ids'], assure=True, count=nruns)
pds = list(permutation.generate(ds))
assert_equal(len(pds), nruns)
for p in pds:
assert_false(np.all(p.sa.ids == ds.sa.ids))
# permute feature attrs
ds.fa['ids'] = range(ds.shape[1])
permutation = AttributePermutator('fa.ids', assure=True)
pds = permutation(ds)
assert_false(np.all(pds.fa.ids == ds.fa.ids))
开发者ID:esc,项目名称:PyMVPA,代码行数:54,代码来源:test_generators.py
示例10: test_streamline_equal_mapper
def test_streamline_equal_mapper(self):
self.build_streamline_things()
self.prototypes_equal = self.dataset.samples
self.pm = PrototypeMapper(similarities=self.similarities,
prototypes=self.prototypes_equal,
demean=False)
self.pm.train(self.dataset.samples)
## debug("MAP","projected data: "+str(self.pm.proj))
# check size:
assert_array_equal(self.pm.proj.shape, (len(self.dataset.samples), len(self.prototypes_equal)*len(self.similarities)))
# test symmetry
assert_array_almost_equal(self.pm.proj, self.pm.proj.T)
开发者ID:esc,项目名称:PyMVPA,代码行数:13,代码来源:test_prototypemapper.py
示例11: test_forward_dense_array_mapper
def test_forward_dense_array_mapper():
mask = np.ones((3, 2), dtype="bool")
map_ = mask_mapper(mask)
# test shape reports
assert_equal(map_.forward1(mask).shape, (6,))
# test 1sample mapping
assert_array_equal(map_.forward1(np.arange(6).reshape(3, 2)), [0, 1, 2, 3, 4, 5])
# test 4sample mapping
foursample = map_.forward(np.arange(24).reshape(4, 3, 2))
assert_array_equal(
foursample, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11], [12, 13, 14, 15, 16, 17], [18, 19, 20, 21, 22, 23]]
)
# check incomplete masks
mask[1, 1] = 0
map_ = mask_mapper(mask)
assert_equal(map_.forward1(mask).shape, (5,))
assert_array_equal(map_.forward1(np.arange(6).reshape(3, 2)), [0, 1, 2, 4, 5])
# check that it doesn't accept wrong dataspace
assert_raises(ValueError, map_.forward, np.arange(4).reshape(2, 2))
# check fail if neither mask nor shape
assert_raises(ValueError, mask_mapper)
# check that a full mask is automatically created when providing shape
m = mask_mapper(shape=(2, 3, 4))
mp = m.forward1(np.arange(24).reshape(2, 3, 4))
assert_array_equal(mp, np.arange(24))
开发者ID:B-Rich,项目名称:PyMVPA,代码行数:32,代码来源:test_arraymapper.py
示例12: test_glmnet_state
def test_glmnet_state():
#data = datasets['dumb2']
# for some reason the R code fails with the dumb data
data = datasets['chirp_linear']
clf = GLMNET_R()
clf.train(data)
clf.ca.enable('predictions')
p = clf.predict(data.samples)
assert_array_equal(p, clf.ca.predictions)
开发者ID:esc,项目名称:PyMVPA,代码行数:14,代码来源:test_glmnet.py
示例13: test_glmnet_c
def test_glmnet_c():
# define binary prob
data = datasets['dumb2']
# use GLMNET on binary problem
clf = GLMNET_C()
clf.ca.enable('estimates')
clf.train(data)
# test predictions
pre = clf.predict(data.samples)
assert_array_equal(pre, data.targets)
开发者ID:esc,项目名称:PyMVPA,代码行数:14,代码来源:test_glmnet.py
示例14: test_custom_split
def test_custom_split(self):
#simulate half splitter
hs = CustomPartitioner([(None,[0,1,2,3,4]),(None,[5,6,7,8,9])])
spl = Splitter(attr='partitions')
splits = [ list(spl.generate(p)) for p in hs.generate(self.data) ]
self.failUnless(len(splits) == 2)
for i,p in enumerate(splits):
self.failUnless( len(p) == 2 )
self.failUnless( p[0].nsamples == 50 )
self.failUnless( p[1].nsamples == 50 )
assert_array_equal(splits[0][1].sa['chunks'].unique, [0, 1, 2, 3, 4])
assert_array_equal(splits[0][0].sa['chunks'].unique, [5, 6, 7, 8, 9])
assert_array_equal(splits[1][1].sa['chunks'].unique, [5, 6, 7, 8, 9])
assert_array_equal(splits[1][0].sa['chunks'].unique, [0, 1, 2, 3, 4])
# check fully customized split with working and validation set specified
cs = CustomPartitioner([([0,3,4],[5,9])])
# we want to discared the unselected partition of the data, hence attr_value
spl = Splitter(attr='partitions', attr_values=[1,2])
splits = [ list(spl.generate(p)) for p in cs.generate(self.data) ]
self.failUnless(len(splits) == 1)
for i,p in enumerate(splits):
self.failUnless( len(p) == 2 )
self.failUnless( p[0].nsamples == 30 )
self.failUnless( p[1].nsamples == 20 )
self.failUnless((splits[0][1].sa['chunks'].unique == [5, 9]).all())
self.failUnless((splits[0][0].sa['chunks'].unique == [0, 3, 4]).all())
开发者ID:B-Rich,项目名称:PyMVPA,代码行数:32,代码来源:test_splitter.py
示例15: test_streamline_random_mapper
def test_streamline_random_mapper(self):
self.build_streamline_things()
# Adding one more similarity to test multiple similarities in the streamline case:
self.similarities.append(StreamlineSimilarity(distance=corouge))
fraction = 0.5
prototype_number = max(int(len(self.dataset.samples)*fraction),1)
## debug("MAP","Generating "+str(prototype_number)+" random prototypes.")
self.prototypes_random = self.dataset.samples[np.random.permutation(self.dataset.samples.size)][:prototype_number]
## debug("MAP","prototypes: "+str(self.prototypes_random))
self.pm = PrototypeMapper(similarities=self.similarities, prototypes=self.prototypes_random, demean=False)
self.pm.train(self.dataset.samples) # , fraction=1.0)
# test size:
assert_array_equal(self.pm.proj.shape, (len(self.dataset.samples), len(self.prototypes_random)*len(self.similarities)))
开发者ID:esc,项目名称:PyMVPA,代码行数:16,代码来源:test_prototypemapper.py
示例16: test_label_splitter
def test_label_splitter(self):
oes = OddEvenSplitter(attr='targets')
splits = [ (first, second) for (first, second) in oes(self.data) ]
assert_array_equal(splits[0][0].sa['targets'].unique, [0,2])
assert_array_equal(splits[0][1].sa['targets'].unique, [1,3])
assert_array_equal(splits[1][0].sa['targets'].unique, [1,3])
assert_array_equal(splits[1][1].sa['targets'].unique, [0,2])
开发者ID:arokem,项目名称:PyMVPA,代码行数:9,代码来源:test_splitter.py
示例17: test_splitter
def test_splitter():
ds = give_data()
# split with defaults
spl1 = Splitter('chunks')
assert_raises(NotImplementedError, spl1, ds)
splits = list(spl1.generate(ds))
assert_equal(len(splits), len(ds.sa['chunks'].unique))
for split in splits:
# it should have perform basic slicing!
assert_true(split.samples.base is ds.samples)
assert_equal(len(split.sa['chunks'].unique), 1)
assert_true('lastsplit' in split.a)
assert_true(splits[-1].a.lastsplit)
# now again, more customized
spl2 = Splitter('targets', attr_values = [0,1,1,2,3,3,3], count=4,
noslicing=True)
splits = list(spl2.generate(ds))
assert_equal(len(splits), 4)
for split in splits:
# it should NOT have perform basic slicing!
assert_false(split.samples.base is ds.samples)
assert_equal(len(split.sa['targets'].unique), 1)
assert_equal(len(split.sa['chunks'].unique), 10)
assert_true(splits[-1].a.lastsplit)
# two should be identical
assert_array_equal(splits[1].samples, splits[2].samples)
# now go wild and split by feature attribute
ds.fa['roi'] = np.repeat([0,1], 5)
# splitter should auto-detect that this is a feature attribute
spl3 = Splitter('roi')
splits = list(spl3.generate(ds))
assert_equal(len(splits), 2)
for split in splits:
assert_true(split.samples.base is ds.samples)
assert_equal(len(split.fa['roi'].unique), 1)
assert_equal(split.shape, (100, 5))
# and finally test chained splitters
cspl = ChainNode([spl2, spl3, spl1])
splits = list(cspl.generate(ds))
# 4 target splits and 2 roi splits each and 10 chunks each
assert_equal(len(splits), 80)
开发者ID:esc,项目名称:PyMVPA,代码行数:47,代码来源:test_generators.py
示例18: test_collections
def test_collections():
sa = SampleAttributesCollection()
assert_equal(len(sa), 0)
assert_raises(ValueError, sa.__setitem__, 'test', 0)
l = range(5)
sa['test'] = l
# auto-wrapped
assert_true(isinstance(sa['test'], ArrayCollectable))
assert_equal(len(sa), 1)
# names which are already present in dict interface
assert_raises(ValueError, sa.__setitem__, 'values', range(5))
sa_c = copy.deepcopy(sa)
assert_equal(len(sa), len(sa_c))
assert_array_equal(sa.test, sa_c.test)
开发者ID:esc,项目名称:PyMVPA,代码行数:17,代码来源:test_collections.py
示例19: test_sifter
def test_sifter():
# somewhat duplicating the doctest
ds = Dataset(samples=np.arange(8).reshape((4,2)),
sa={'chunks': [ 0 , 1 , 2 , 3 ],
'targets': ['c', 'c', 'p', 'p']})
par = ChainNode([NFoldPartitioner(cvtype=2, attr='chunks'),
Sifter([('partitions', 2),
('targets', ['c', 'p'])])
])
dss = list(par.generate(ds))
assert_equal(len(dss), 4)
for ds_ in dss:
testing = ds[ds_.sa.partitions == 2]
assert_array_equal(np.unique(testing.sa.targets), ['c', 'p'])
# and we still have both targets present in training
training = ds[ds_.sa.partitions == 1]
assert_array_equal(np.unique(training.sa.targets), ['c', 'p'])
开发者ID:esc,项目名称:PyMVPA,代码行数:17,代码来源:test_generators.py
示例20: test_label_splitter
def test_label_splitter(self):
oes = OddEvenPartitioner(attr='targets')
spl = Splitter(attr='partitions')
splits = [ list(spl.generate(p)) for p in oes.generate(self.data) ]
assert_array_equal(splits[0][0].sa['targets'].unique, [0,2])
assert_array_equal(splits[0][1].sa['targets'].unique, [1,3])
assert_array_equal(splits[1][0].sa['targets'].unique, [1,3])
assert_array_equal(splits[1][1].sa['targets'].unique, [0,2])
开发者ID:B-Rich,项目名称:PyMVPA,代码行数:10,代码来源:test_splitter.py
注:本文中的mvpa.testing.tools.assert_array_equal函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论