本文整理汇总了Python中mvpa2.testing.tools.assert_raises函数的典型用法代码示例。如果您正苦于以下问题:Python assert_raises函数的具体用法?Python assert_raises怎么用?Python assert_raises使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了assert_raises函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_assert_objectarray_equal
def test_assert_objectarray_equal():
if versions['numpy'] < '1.4':
raise SkipTest("Skipping because of known segfaults with numpy < 1.4")
# explicit dtype so we could test with numpy < 1.6
a = np.array([np.array([0, 1]), np.array(1)], dtype=object)
b = np.array([np.array([0, 1]), np.array(1)], dtype=object)
# they should be ok for both types of comparison
for strict in True, False:
# good with self
assert_objectarray_equal(a, a, strict=strict)
# good with a copy
assert_objectarray_equal(a, a.copy(), strict=strict)
# good while operating with an identical one
# see http://projects.scipy.org/numpy/ticket/2117
assert_objectarray_equal(a, b, strict=strict)
# now check if we still fail for a good reason
for value_equal, b in (
(False, np.array(1)),
(False, np.array([1])),
(False, np.array([np.array([0, 1]), np.array((1, 2))], dtype=object)),
(False, np.array([np.array([0, 1]), np.array(1.1)], dtype=object)),
(True, np.array([np.array([0, 1]), np.array(1.0)], dtype=object)),
(True, np.array([np.array([0, 1]), np.array(1, dtype=object)], dtype=object)),
):
assert_raises(AssertionError, assert_objectarray_equal, a, b)
if value_equal:
# but should not raise for non-default strict=False
assert_objectarray_equal(a, b, strict=False)
else:
assert_raises(AssertionError, assert_objectarray_equal, a, b, strict=False)
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:32,代码来源:test_testing.py
示例2: test_mapper_vs_zscore
def test_mapper_vs_zscore():
"""Test by comparing to results of elderly z-score function
"""
# data: 40 sample feature line in 20d space (40x20; samples x features)
dss = [
dataset_wizard(np.concatenate(
[np.arange(40) for i in range(20)]).reshape(20,-1).T,
targets=1, chunks=1),
] + datasets.values()
for ds in dss:
ds1 = deepcopy(ds)
ds2 = deepcopy(ds)
zsm = ZScoreMapper(chunks_attr=None)
assert_raises(RuntimeError, zsm.forward, ds1.samples)
idhashes = (idhash(ds1), idhash(ds1.samples))
zsm.train(ds1)
idhashes_train = (idhash(ds1), idhash(ds1.samples))
assert_equal(idhashes, idhashes_train)
# forward dataset
ds1z_ds = zsm.forward(ds1)
idhashes_forwardds = (idhash(ds1), idhash(ds1.samples))
# must not modify samples in place!
assert_equal(idhashes, idhashes_forwardds)
# forward samples explicitly
ds1z = zsm.forward(ds1.samples)
idhashes_forward = (idhash(ds1), idhash(ds1.samples))
assert_equal(idhashes, idhashes_forward)
zscore(ds2, chunks_attr=None)
assert_array_almost_equal(ds1z, ds2.samples)
assert_array_equal(ds1.samples, ds.samples)
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:35,代码来源:test_zscoremapper.py
示例3: test_sphere_scaled
def test_sphere_scaled():
s1 = ne.Sphere(3)
s = ne.Sphere(3, element_sizes=(1, 1))
# Should give exactly the same results since element_sizes are 1s
for p in ((0, 0), (-23, 1)):
assert_array_equal(s1(p), s(p))
ok_(len(s(p)) == len(set(s(p))))
# Raise exception if query dimensionality does not match element_sizes
assert_raises(ValueError, s, (1,))
s = ne.Sphere(3, element_sizes=(1.5, 2))
assert_array_equal(s((0, 0)),
[(-2, 0), (-1, -1), (-1, 0), (-1, 1),
(0, -1), (0, 0), (0, 1),
(1, -1), (1, 0), (1, 1), (2, 0)])
s = ne.Sphere(1.5, element_sizes=(1.5, 1.5, 1.5))
res = s((0, 0, 0))
ok_(np.all([np.sqrt(np.sum(np.array(x)**2)) <= 1.5 for x in res]))
ok_(len(res) == 7)
# all neighbors so no more than 1 voxel away -- just a cube, for
# some "sphere" effect radius had to be 3.0 ;)
td = np.sqrt(3*1.5**2)
s = ne.Sphere(td, element_sizes=(1.5, 1.5, 1.5))
res = s((0, 0, 0))
ok_(np.all([np.sqrt(np.sum(np.array(x)**2)) <= td for x in res]))
ok_(np.all([np.sum(np.abs(x) > 1) == 0 for x in res]))
ok_(len(res) == 27)
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:31,代码来源:test_neighborhood.py
示例4: test_basic_collectable
def test_basic_collectable():
c = Collectable()
# empty by default
assert_equal(c.name, None)
assert_equal(c.value, None)
assert_equal(c.__doc__, None)
# late assignment
c.name = 'somename'
c.value = 12345
assert_equal(c.name, 'somename')
assert_equal(c.value, 12345)
# immediate content
c = Collectable('value', 'myname', "This is a test")
assert_equal(c.name, 'myname')
assert_equal(c.value, 'value')
assert_equal(c.__doc__, "This is a test")
assert_equal(str(c), 'myname')
# repr
e = eval(repr(c))
assert_equal(e.name, 'myname')
assert_equal(e.value, 'value')
assert_equal(e.__doc__, "This is a test")
# shallow copy does not create a view of value array
c.value = np.arange(5)
d = copy.copy(c)
assert_false(d.value.base is c.value)
# names starting with _ are not allowed
assert_raises(ValueError, c._set_name, "_underscore")
开发者ID:andreirusu,项目名称:PyMVPA,代码行数:34,代码来源:test_collections.py
示例5: test_corrstability_smoketest
def test_corrstability_smoketest(ds):
if not 'chunks' in ds.sa:
return
if len(ds.sa['targets'].unique) > 30:
# was regression dataset
return
# very basic testing since
cs = CorrStability()
#ds = datasets['uni2small']
out = cs(ds)
assert_equal(out.shape, (ds.nfeatures,))
ok_(np.all(out >= -1.001)) # it should be a correlation after all
ok_(np.all(out <= 1.001))
# and theoretically those nonbogus features should have higher values
if 'nonbogus_targets' in ds.fa:
bogus_features = np.array([x==None for x in ds.fa.nonbogus_targets])
assert_array_less(np.mean(out[bogus_features]), np.mean(out[~bogus_features]))
# and if we move targets to alternative location
ds = ds.copy(deep=True)
ds.sa['alt'] = ds.T
ds.sa.pop('targets')
assert_raises(KeyError, cs, ds)
cs = CorrStability('alt')
out_ = cs(ds)
assert_array_equal(out, out_)
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:26,代码来源:test_corrstability.py
示例6: test_sifter_with_balancing
def test_sifter_with_balancing():
# extended previous test which was already
# "... somewhat duplicating the doctest"
ds = Dataset(samples=np.arange(12).reshape((-1, 2)),
sa={'chunks': [ 0 , 1 , 2 , 3 , 4, 5 ],
'targets': ['c', 'c', 'c', 'p', 'p', 'p']})
# Without sifter -- just to assure that we do get all of them
# i.e. 6*5*4*3/(4!) = 15
par = ChainNode([NFoldPartitioner(cvtype=4, attr='chunks')])
assert_equal(len(list(par.generate(ds))), 15)
# so we will take 4 chunks out of available 7, but would care only
# about those partitions where we have balanced number of 'c' and 'p'
# entries
assert_raises(ValueError,
lambda x: list(Sifter([('targets', dict(wrong=1))]).generate(x)),
ds)
par = ChainNode([NFoldPartitioner(cvtype=4, attr='chunks'),
Sifter([('partitions', 2),
('targets',
dict(uvalues=['c', 'p'],
balanced=True))])
])
dss = list(par.generate(ds))
# print [ x[x.sa.partitions==2].sa.targets for x in dss ]
assert_equal(len(dss), 9)
for ds_ in dss:
testing = ds[ds_.sa.partitions == 2]
assert_array_equal(np.unique(testing.sa.targets), ['c', 'p'])
# and we still have both targets present in training
training = ds[ds_.sa.partitions == 1]
assert_array_equal(np.unique(training.sa.targets), ['c', 'p'])
开发者ID:Soletmons,项目名称:PyMVPA,代码行数:34,代码来源:test_generators.py
示例7: test_permute_chunks
def test_permute_chunks():
def is_sorted(x):
return np.array_equal(np.sort(x), x)
ds = give_data()
# change targets labels
# there is no target labels permuting within chunks,
# assure = True would be error
ds.sa['targets'] = range(len(ds.sa.targets))
permutation = AttributePermutator(attr='targets',
chunk_attr='chunks',
strategy='chunks',
assure=True)
pds = permutation(ds)
assert_false(is_sorted(pds.sa.targets))
assert_true(np.array_equal(pds.samples, ds.samples))
for chunk_id in np.unique(pds.sa.chunks):
chunk_ds = pds[pds.sa.chunks == chunk_id]
assert_true(is_sorted(chunk_ds.sa.targets))
permutation = AttributePermutator(attr='targets',
strategy='chunks')
assert_raises(ValueError, permutation, ds)
开发者ID:beausievers,项目名称:PyMVPA,代码行数:26,代码来源:test_generators.py
示例8: test_product_flatten
def test_product_flatten():
nsamples = 17
product_name_values = [('chan', ['C1', 'C2']),
('freq', np.arange(4, 20, 6)),
('time', np.arange(-200, 800, 200))]
shape = (nsamples,) + tuple(len(v) for _, v in product_name_values)
sample_names = ['samp%d' % i for i in xrange(nsamples)]
# generate random data in four dimensions
data = np.random.normal(size=shape)
ds = Dataset(data, sa=dict(sample_names=sample_names))
# apply flattening to ds
flattener = ProductFlattenMapper(product_name_values)
# test I/O (only if h5py is available)
if externals.exists('h5py'):
from mvpa2.base.hdf5 import h5save, h5load
import tempfile
import os
_, testfn = tempfile.mkstemp('mapper.h5py', 'test_product')
h5save(testfn, flattener)
flattener = h5load(testfn)
os.unlink(testfn)
mds = flattener(ds)
prod = lambda x:reduce(operator.mul, x)
# ensure the size is ok
assert_equal(mds.shape, (nsamples,) + (prod(shape[1:]),))
ndim = len(product_name_values)
idxs = [range(len(v)) for _, v in product_name_values]
for si in xrange(nsamples):
for fi, p in enumerate(itertools.product(*idxs)):
data_tup = (si,) + p
x = mds[si, fi]
# value should match
assert_equal(data[data_tup], x.samples[0, 0])
# indices should match as well
all_idxs = tuple(x.fa['chan_freq_time_indices'].value.ravel())
assert_equal(p, all_idxs)
# values and indices in each dimension should match
for i, (name, value) in enumerate(product_name_values):
assert_equal(x.fa[name].value, value[p[i]])
assert_equal(x.fa[name + '_indices'].value, p[i])
product_name_values += [('foo', [1, 2, 3])]
flattener = ProductFlattenMapper(product_name_values)
assert_raises(ValueError, flattener, ds)
开发者ID:pckillerbrici,项目名称:PyMVPA,代码行数:59,代码来源:test_mapper.py
示例9: test_vector_alignment_find_rotation_illegal_inputs
def test_vector_alignment_find_rotation_illegal_inputs(self):
arr = np.asarray
illegal_args = [
[arr([1, 2]), arr([1, 3])],
[arr([1, 2, 3]), arr([1, 3])],
[arr([1, 2, 3]), np.random.normal(size=(3, 3))]
]
for illegal_arg in illegal_args:
assert_raises((ValueError, IndexError),
vector_alignment_find_rotation, *illegal_arg)
开发者ID:swaroopgj,项目名称:PyMVPA,代码行数:11,代码来源:test_surfing_surface.py
示例10: test_attrmap_conflicts
def test_attrmap_conflicts():
am_n = AttributeMap({'a':1, 'b':2, 'c':1})
am_t = AttributeMap({'a':1, 'b':2, 'c':1}, collisions_resolution='tuple')
am_l = AttributeMap({'a':1, 'b':2, 'c':1}, collisions_resolution='lucky')
q_f = ['a', 'b', 'a', 'c']
# should have no effect on forward mapping
ok_(np.all(am_n.to_numeric(q_f) == am_t.to_numeric(q_f)))
ok_(np.all(am_t.to_numeric(q_f) == am_l.to_numeric(q_f)))
assert_raises(ValueError, am_n.to_literal, [2])
r_t = am_t.to_literal([2, 1])
r_l = am_l.to_literal([2, 1])
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:12,代码来源:test_attrmap.py
示例11: test_mean_tpr
def test_mean_tpr():
# Let's test now on some disbalanced sets
assert_raises(ValueError, mean_tpr, [1], [])
assert_raises(ValueError, mean_tpr, [], [1])
assert_raises(ValueError, mean_tpr, [], [])
# now interesting one where there were no target when it was in predicted
assert_raises(ValueError, mean_tpr, [1], [0])
assert_raises(ValueError, mean_tpr, [0, 1], [0, 0])
# but it should be ok to have some targets not present in prediction
assert_equal(mean_tpr([0, 0], [0, 1]), .5)
# the same regardless how many samples in 0-class, if all misclassified
# (winner by # of samples takes all)
assert_equal(mean_tpr([0, 0, 0], [0, 0, 1]), .5)
# whenever mean-accuracy would be different
assert_almost_equal(mean_match_accuracy([0, 0, 0], [0, 0, 1]), 2/3.)
开发者ID:PyMVPA,项目名称:PyMVPA,代码行数:16,代码来源:test_errorfx.py
示例12: test_splitter
def test_splitter():
ds = give_data()
# split with defaults
spl1 = Splitter('chunks')
assert_raises(NotImplementedError, spl1, ds)
splits = list(spl1.generate(ds))
assert_equal(len(splits), len(ds.sa['chunks'].unique))
for split in splits:
# it should have perform basic slicing!
assert_true(split.samples.base is ds.samples)
assert_equal(len(split.sa['chunks'].unique), 1)
assert_true('lastsplit' in split.a)
assert_true(splits[-1].a.lastsplit)
# now again, more customized
spl2 = Splitter('targets', attr_values = [0,1,1,2,3,3,3], count=4,
noslicing=True)
splits = list(spl2.generate(ds))
assert_equal(len(splits), 4)
for split in splits:
# it should NOT have perform basic slicing!
assert_false(split.samples.base is ds.samples)
assert_equal(len(split.sa['targets'].unique), 1)
assert_equal(len(split.sa['chunks'].unique), 10)
assert_true(splits[-1].a.lastsplit)
# two should be identical
assert_array_equal(splits[1].samples, splits[2].samples)
# now go wild and split by feature attribute
ds.fa['roi'] = np.repeat([0,1], 5)
# splitter should auto-detect that this is a feature attribute
spl3 = Splitter('roi')
splits = list(spl3.generate(ds))
assert_equal(len(splits), 2)
for split in splits:
assert_true(split.samples.base is ds.samples)
assert_equal(len(split.fa['roi'].unique), 1)
assert_equal(split.shape, (100, 5))
# and finally test chained splitters
cspl = ChainNode([spl2, spl3, spl1])
splits = list(cspl.generate(ds))
# 4 target splits and 2 roi splits each and 10 chunks each
assert_equal(len(splits), 80)
开发者ID:Soletmons,项目名称:PyMVPA,代码行数:47,代码来源:test_generators.py
示例13: test_collections
def test_collections():
sa = SampleAttributesCollection()
assert_equal(len(sa), 0)
assert_raises(ValueError, sa.__setitem__, 'test', 0)
l = range(5)
sa['test'] = l
# auto-wrapped
assert_true(isinstance(sa['test'], ArrayCollectable))
assert_equal(len(sa), 1)
# names which are already present in dict interface
assert_raises(ValueError, sa.__setitem__, 'values', range(5))
sa_c = copy.deepcopy(sa)
assert_equal(len(sa), len(sa_c))
assert_array_equal(sa.test, sa_c.test)
开发者ID:andreirusu,项目名称:PyMVPA,代码行数:17,代码来源:test_collections.py
示例14: test_cached_query_engine
def test_cached_query_engine():
"""Test cached query engine
"""
sphere = ne.Sphere(1)
# dataset with just one "space"
ds = datasets['3dlarge']
qe0 = ne.IndexQueryEngine(myspace=sphere)
qec = ne.CachedQueryEngine(qe0)
# and ground truth one
qe = ne.IndexQueryEngine(myspace=sphere)
results_ind = []
results_kw = []
def cmp_res(res1, res2):
comp = [x == y for x, y in zip(res1, res2)]
ok_(np.all(comp))
for iq, q in enumerate((qe, qec)):
q.train(ds)
# sequential train on the same should be ok in both cases
q.train(ds)
res_ind = [q[fid] for fid in xrange(ds.nfeatures)]
res_kw = [q(myspace=x) for x in ds.fa.myspace]
# test if results match
cmp_res(res_ind, res_kw)
results_ind.append(res_ind)
results_kw.append(res_kw)
# now check if results of cached were the same as of regular run
cmp_res(results_ind[0], results_ind[1])
# Now do sanity checks
assert_raises(ValueError, qec.train, ds[:, :-1])
assert_raises(ValueError, qec.train, ds.copy())
ds2 = ds.copy()
qec.untrain()
qec.train(ds2)
# should be the same results on the copy
cmp_res(results_ind[0], [qec[fid] for fid in xrange(ds.nfeatures)])
cmp_res(results_kw[0], [qec(myspace=x) for x in ds.fa.myspace])
ok_(qec.train(ds2) is None)
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:43,代码来源:test_neighborhood.py
示例15: test_query_engine
def test_query_engine():
data = np.arange(54)
# indices in 3D
ind = np.transpose((np.ones((3, 3, 3)).nonzero()))
# sphere generator for 3 elements diameter
sphere = ne.Sphere(1)
# dataset with just one "space"
ds = Dataset([data, data], fa={'s_ind': np.concatenate((ind, ind))})
# and the query engine attaching the generator to the "index-space"
qe = ne.IndexQueryEngine(s_ind=sphere)
# cannot train since the engine does not know about the second space
assert_raises(ValueError, qe.train, ds)
# now do it again with a full spec
ds = Dataset([data, data], fa={'s_ind': np.concatenate((ind, ind)),
't_ind': np.repeat([0,1], 27)})
qe = ne.IndexQueryEngine(s_ind=sphere, t_ind=None)
qe.train(ds)
# internal representation check
# YOH: invalid for new implementation with lookup tables (dictionaries)
#assert_array_equal(qe._searcharray,
# np.arange(54).reshape(qe._searcharray.shape) + 1)
# should give us one corner, collapsing the 't_ind'
assert_array_equal(qe(s_ind=(0, 0, 0)),
[0, 1, 3, 9, 27, 28, 30, 36])
# directly specifying an index for 't_ind' without having an ROI
# generator, should give the same corner, but just once
assert_array_equal(qe(s_ind=(0, 0, 0), t_ind=0), [0, 1, 3, 9])
# just out of the mask -- no match
assert_array_equal(qe(s_ind=(3, 3, 3)), [])
# also out of the mask -- but single match
assert_array_equal(qe(s_ind=(2, 2, 3), t_ind=1), [53])
# query by id
assert_array_equal(qe(s_ind=(0, 0, 0), t_ind=0), qe[0])
assert_array_equal(qe(s_ind=(0, 0, 0), t_ind=[0, 1]),
qe(s_ind=(0, 0, 0)))
# should not fail if t_ind is outside
assert_array_equal(qe(s_ind=(0, 0, 0), t_ind=[0, 1, 10]),
qe(s_ind=(0, 0, 0)))
# should fail if asked about some unknown thing
assert_raises(ValueError, qe.__call__, s_ind=(0, 0, 0), buga=0)
# Test by using some literal feature atttribute
ds.fa['lit'] = ['roi1', 'ro2', 'r3']*18
# should work as well as before
assert_array_equal(qe(s_ind=(0, 0, 0)), [0, 1, 3, 9, 27, 28, 30, 36])
# should fail if asked about some unknown (yet) thing
assert_raises(ValueError, qe.__call__, s_ind=(0,0,0), lit='roi1')
# Create qe which can query literals as well
qe_lit = ne.IndexQueryEngine(s_ind=sphere, t_ind=None, lit=None)
qe_lit.train(ds)
# should work as well as before
assert_array_equal(qe_lit(s_ind=(0, 0, 0)), [0, 1, 3, 9, 27, 28, 30, 36])
# and subselect nicely -- only /3 ones
assert_array_equal(qe_lit(s_ind=(0, 0, 0), lit='roi1'),
[0, 3, 9, 27, 30, 36])
assert_array_equal(qe_lit(s_ind=(0, 0, 0), lit=['roi1', 'ro2']),
[0, 1, 3, 9, 27, 28, 30, 36])
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:59,代码来源:test_neighborhood.py
示例16: test_sphere
def test_sphere():
# test sphere initialization
s = ne.Sphere(1)
center0 = (0, 0, 0)
center1 = (1, 1, 1)
assert_equal(len(s(center0)), 7)
target = array([array([-1, 0, 0]),
array([ 0, -1, 0]),
array([ 0, 0, -1]),
array([0, 0, 0]),
array([0, 0, 1]),
array([0, 1, 0]),
array([1, 0, 0])])
# test of internals -- no recomputation of increments should be done
prev_increments = s._increments
assert_array_equal(s(center0), target)
ok_(prev_increments is s._increments)
# query lower dimensionality
_ = s((0, 0))
ok_(not prev_increments is s._increments)
# test Sphere call
target = [array([0, 1, 1]),
array([1, 0, 1]),
array([1, 1, 0]),
array([1, 1, 1]),
array([1, 1, 2]),
array([1, 2, 1]),
array([2, 1, 1])]
res = s(center1)
assert_array_equal(array(res), target)
# They all should be tuples
ok_(np.all([isinstance(x, tuple) for x in res]))
# test for larger diameter
s = ne.Sphere(4)
assert_equal(len(s(center1)), 257)
# test extent keyword
#s = ne.Sphere(4,extent=(1,1,1))
#assert_array_equal(array(s((0,0,0))), array([[0,0,0]]))
# test Errors during initialisation and call
#assert_raises(ValueError, ne.Sphere, 2)
#assert_raises(ValueError, ne.Sphere, 1.0)
# no longer extent available
assert_raises(TypeError, ne.Sphere, 1, extent=(1))
assert_raises(TypeError, ne.Sphere, 1, extent=(1.0, 1.0, 1.0))
s = ne.Sphere(1)
#assert_raises(ValueError, s, (1))
if __debug__:
# No float coordinates allowed for now...
# XXX might like to change that ;)
#
assert_raises(ValueError, s, (1.0, 1.0, 1.0))
开发者ID:Anhmike,项目名称:PyMVPA,代码行数:57,代码来源:test_neighborhood.py
示例17: test_gifti_dataset
def test_gifti_dataset(fn, format_, include_nodes):
expected_ds = _get_test_dataset(include_nodes)
expected_ds_sa = expected_ds.copy(deep=True)
expected_ds_sa.sa['chunks'] = [4, 3, 2, 1, 3, 2]
expected_ds_sa.sa['targets'] = ['t%d' % i for i in xrange(6)]
# build GIFTI file from scratch
gifti_string = _build_gifti_string(format_, include_nodes)
with open(fn, 'w') as f:
f.write(gifti_string)
# reading GIFTI file
ds = gifti_dataset(fn)
assert_datasets_almost_equal(ds, expected_ds)
# test GiftiImage input
img = nb_giftiio.read(fn)
ds2 = gifti_dataset(img)
assert_datasets_almost_equal(ds2, expected_ds)
# test using Nibabel's output from write
nb_giftiio.write(img, fn)
ds3 = gifti_dataset(fn)
assert_datasets_almost_equal(ds3, expected_ds)
# test targets and chunks arguments
ds3_sa = gifti_dataset(fn, targets=expected_ds_sa.targets,
chunks=expected_ds_sa.chunks)
assert_datasets_almost_equal(ds3_sa, expected_ds_sa)
# test map2gifti
img2 = map2gifti(ds)
ds4 = gifti_dataset(img2)
assert_datasets_almost_equal(ds4, expected_ds)
map2gifti(ds, fn, encoding=format_)
ds5 = gifti_dataset(fn)
assert_datasets_almost_equal(ds5, expected_ds)
# test map2gifti with array input; nodes are not stored
map2gifti(ds.samples, fn)
ds6 = gifti_dataset(fn)
if include_nodes:
assert_raises(AssertionError, assert_datasets_almost_equal,
ds6, expected_ds)
else:
assert_datasets_almost_equal(ds6, expected_ds)
assert_raises(TypeError, gifti_dataset, ds3_sa)
assert_raises(TypeError, map2gifti, img, fn)
开发者ID:Soletmons,项目名称:PyMVPA,代码行数:52,代码来源:test_giftidataset.py
示例18: test_array_collectable
def test_array_collectable():
c = ArrayCollectable()
# empty by default
assert_equal(c.name, None)
assert_equal(c.value, None)
# late assignment
c.name = 'somename'
assert_raises(ValueError, c._set, 12345)
assert_equal(c.value, None)
c.value = np.arange(5)
assert_equal(c.name, 'somename')
assert_array_equal(c.value, np.arange(5))
# immediate content
data = np.random.random(size=(3,10))
c = ArrayCollectable(data.copy(), 'myname',
"This is a test", length=3)
assert_equal(c.name, 'myname')
assert_array_equal(c.value, data)
assert_equal(c.__doc__, "This is a test")
assert_equal(str(c), 'myname')
# repr
from numpy import array
e = eval(repr(c))
assert_equal(e.name, 'myname')
assert_array_almost_equal(e.value, data)
assert_equal(e.__doc__, "This is a test")
# cannot assign array of wrong length
assert_raises(ValueError, c._set, np.arange(5))
assert_equal(len(c), 3)
# shallow copy DOES create a view of value array
c.value = np.arange(3)
d = copy.copy(c)
assert_true(d.value.base is c.value)
# names starting with _ are not allowed
assert_raises(ValueError, c._set_name, "_underscore")
开发者ID:andreirusu,项目名称:PyMVPA,代码行数:42,代码来源:test_collections.py
示例19: test_rfe_sensmap
def test_rfe_sensmap():
# http://lists.alioth.debian.org/pipermail/pkg-exppsy-pymvpa/2013q3/002538.html
# just a smoke test. fails with
from mvpa2.clfs.svm import LinearCSVMC
from mvpa2.clfs.meta import FeatureSelectionClassifier
from mvpa2.measures.base import CrossValidation, RepeatedMeasure
from mvpa2.generators.splitters import Splitter
from mvpa2.generators.partition import NFoldPartitioner
from mvpa2.misc.errorfx import mean_mismatch_error
from mvpa2.mappers.fx import mean_sample
from mvpa2.mappers.fx import maxofabs_sample
from mvpa2.generators.base import Repeater
from mvpa2.featsel.rfe import RFE
from mvpa2.featsel.helpers import FractionTailSelector, BestDetector
from mvpa2.featsel.helpers import NBackHistoryStopCrit
from mvpa2.datasets import vstack
from mvpa2.misc.data_generators import normal_feature_dataset
# Let's simulate the beast -- 6 categories total groupped into 3
# super-ordinate, and actually without any 'superordinate' effect
# since subordinate categories independent
fds = normal_feature_dataset(nlabels=3,
snr=1, # 100, # pure signal! ;)
perlabel=9,
nfeatures=6,
nonbogus_features=range(3),
nchunks=3)
clfsvm = LinearCSVMC()
rfesvm = RFE(clfsvm.get_sensitivity_analyzer(postproc=maxofabs_sample()),
CrossValidation(
clfsvm,
NFoldPartitioner(),
errorfx=mean_mismatch_error, postproc=mean_sample()),
Repeater(2),
fselector=FractionTailSelector(0.70, mode='select', tail='upper'),
stopping_criterion=NBackHistoryStopCrit(BestDetector(), 10),
update_sensitivity=True)
fclfsvm = FeatureSelectionClassifier(clfsvm, rfesvm)
sensanasvm = fclfsvm.get_sensitivity_analyzer(postproc=maxofabs_sample())
# manually repeating/splitting so we do both RFE sensitivity and classification
senses, errors = [], []
for i, pset in enumerate(NFoldPartitioner().generate(fds)):
# split partitioned dataset
split = [d for d in Splitter('partitions').generate(pset)]
senses.append(sensanasvm(split[0])) # and it also should train the classifier so we would ask it about error
errors.append(mean_mismatch_error(fclfsvm.predict(split[1]), split[1].targets))
senses = vstack(senses)
errors = vstack(errors)
# Let's compare against rerunning the beast simply for classification with CV
errors_cv = CrossValidation(fclfsvm, NFoldPartitioner(), errorfx=mean_mismatch_error)(fds)
# and they should match
assert_array_equal(errors, errors_cv)
# buggy!
cv_sensana_svm = RepeatedMeasure(sensanasvm, NFoldPartitioner())
senses_rm = cv_sensana_svm(fds)
#print senses.samples, senses_rm.samples
#print errors, errors_cv.samples
assert_raises(AssertionError,
assert_array_almost_equal,
senses.samples, senses_rm.samples)
raise SkipTest("Known failure for repeated measures: https://github.com/PyMVPA/PyMVPA/issues/117")
开发者ID:beausievers,项目名称:PyMVPA,代码行数:71,代码来源:test_usecases.py
示例20: test_gnbsearchlight_permutations
def test_gnbsearchlight_permutations():
import mvpa2
from mvpa2.base.node import ChainNode
from mvpa2.clfs.gnb import GNB
from mvpa2.generators.base import Repeater
from mvpa2.generators.partition import NFoldPartitioner, OddEvenPartitioner
#import mvpa2.generators.permutation
#reload(mvpa2.generators.permutation)
from mvpa2.generators.permutation import AttributePermutator
from mvpa2.testing.datasets import datasets
from mvpa2.measures.base import CrossValidation
from mvpa2.measures.gnbsearchlight import sphere_gnbsearchlight
from mvpa2.measures.searchlight import sphere_searchlight
from mvpa2.mappers.fx import mean_sample
from mvpa2.misc.errorfx import mean_mismatch_error
from mvpa2.clfs.stats import MCNullDist
from mvpa2.testing.tools import assert_raises, ok_, assert_array_less
# mvpa2.debug.active = ['APERM', 'SLC'] #, 'REPM']
# mvpa2.debug.metrics += ['pid']
count = 10
nproc = 1 + int(mvpa2.externals.exists('pprocess'))
ds = datasets['3dsmall'].copy()
ds.fa['voxel_indices'] = ds.fa.myspace
slkwargs = dict(radius=3, space='voxel_indices', enable_ca=['roi_sizes'],
center_ids=[1, 10, 70, 100])
mvpa2.seed(mvpa2._random_seed)
clf = GNB()
splt = NFoldPartitioner(cvtype=2, attr='chunks')
repeater = Repeater(count=count)
permutator = AttributePermutator('targets', limit={'partitions': 1}, count=1)
null_sl = sphere_gnbsearchlight(clf, ChainNode([splt, permutator], space=splt.get_space()),
postproc=mean_sample(), errorfx=mean_mismatch_error,
**slkwargs)
distr_est = MCNullDist(repeater, tail='left', measure=null_sl,
enable_ca=['dist_samples'])
sl = sphere_gnbsearchlight(clf, splt,
reuse_neighbors=True,
null_dist=distr_est, postproc=mean_sample(),
errorfx=mean_mismatch_error,
**slkwargs)
if __debug__: # assert is done only without -O mode
assert_raises(NotImplementedError, sl, ds)
# "ad-hoc searchlights can't handle yet varying targets across partitions"
if False:
# after above limitation is removed -- enable
sl_map = sl(ds)
sl_null_prob = sl.ca.null_prob.samples.copy()
mvpa2.seed(mvpa2._random_seed)
### 'normal' Searchlight
clf = GNB()
splt = NFoldPartitioner(cvtype=2, attr='chunks')
repeater = Repeater(count=count)
permutator = AttributePermutator('targets', limit={'partitions': 1}, count=1)
# rng=np.random.RandomState(0)) # to trigger failure since the same np.random state
# would be reused across all pprocesses
null_cv = CrossValidation(clf, ChainNode([splt, permutator], space=splt.get_space()),
postproc=mean_sample())
null_sl_normal = sphere_searchlight(null_cv, nproc=nproc, **slkwargs)
distr_est_normal = MCNullDist(repeater, tail='left', measure=null_sl_normal,
enable_ca=['dist_samples'])
cv = CrossValidation(clf, splt, errorfx=mean_mismatch_error,
enable_ca=['stats'], postproc=mean_sample() )
sl = sphere_searchlight(cv, nproc=nproc, null_dist=distr_est_normal, **slkwargs)
sl_map_normal = sl(ds)
sl_null_prob_normal = sl.ca.null_prob.samples.copy()
# For every feature -- we should get some variance in estimates In
# case of failure they are all really close to each other (up to
# numerical precision), so variance will be close to 0
assert_array_less(-np.var(distr_est_normal.ca.dist_samples.samples[0],
axis=1), -1e-5)
for s in distr_est_normal.ca.dist_samples.samples[0]:
ok_(len(np.unique(s)) > 1)
开发者ID:beausievers,项目名称:PyMVPA,代码行数:82,代码来源:test_usecases.py
注:本文中的mvpa2.testing.tools.assert_raises函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论