本文整理汇总了Python中mvpa.testing.tools.assert_equal函数的典型用法代码示例。如果您正苦于以下问题:Python assert_equal函数的具体用法?Python assert_equal怎么用?Python assert_equal使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了assert_equal函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_forward_dense_array_mapper
def test_forward_dense_array_mapper():
mask = np.ones((3, 2), dtype="bool")
map_ = mask_mapper(mask)
# test shape reports
assert_equal(map_.forward1(mask).shape, (6,))
# test 1sample mapping
assert_array_equal(map_.forward1(np.arange(6).reshape(3, 2)), [0, 1, 2, 3, 4, 5])
# test 4sample mapping
foursample = map_.forward(np.arange(24).reshape(4, 3, 2))
assert_array_equal(
foursample, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11], [12, 13, 14, 15, 16, 17], [18, 19, 20, 21, 22, 23]]
)
# check incomplete masks
mask[1, 1] = 0
map_ = mask_mapper(mask)
assert_equal(map_.forward1(mask).shape, (5,))
assert_array_equal(map_.forward1(np.arange(6).reshape(3, 2)), [0, 1, 2, 4, 5])
# check that it doesn't accept wrong dataspace
assert_raises(ValueError, map_.forward, np.arange(4).reshape(2, 2))
# check fail if neither mask nor shape
assert_raises(ValueError, mask_mapper)
# check that a full mask is automatically created when providing shape
m = mask_mapper(shape=(2, 3, 4))
mp = m.forward1(np.arange(24).reshape(2, 3, 4))
assert_array_equal(mp, np.arange(24))
开发者ID:B-Rich,项目名称:PyMVPA,代码行数:32,代码来源:test_arraymapper.py
示例2: test_repr
def test_repr():
# this time give mask only by its target length
sm = StaticFeatureSelection(slice(None), space='myspace')
# check reproduction
sm_clone = eval(repr(sm))
assert_equal(repr(sm_clone), repr(sm))
开发者ID:B-Rich,项目名称:PyMVPA,代码行数:7,代码来源:test_mapper.py
示例3: test_repeater
def test_repeater():
reps = 4
r = Repeater(reps, space='OMG')
dsl = [ds for ds in r.generate(Dataset([0,1]))]
assert_equal(len(dsl), reps)
for i, ds in enumerate(dsl):
assert_equal(ds.a.OMG, i)
开发者ID:esc,项目名称:PyMVPA,代码行数:7,代码来源:test_generators.py
示例4: test_slicing
def test_slicing(self):
spl = HalfSplitter()
splits = [ (train, test) for (train, test) in spl(self.data) ]
for s in splits:
# we get slicing all the time
assert_true(s[0].samples.base is self.data.samples)
assert_true(s[1].samples.base is self.data.samples)
spl = HalfSplitter(noslicing=True)
splits = [ (train, test) for (train, test) in spl(self.data) ]
for s in splits:
# we no slicing at all
assert_false(s[0].samples.base is self.data.samples)
assert_false(s[1].samples.base is self.data.samples)
spl = NFoldSplitter()
splits = [ (train, test) for (train, test) in spl(self.data) ]
for i, s in enumerate(splits):
# training only first and last split
if i == 0 or i == len(splits) - 1:
assert_true(s[0].samples.base is self.data.samples)
else:
assert_false(s[0].samples.base is self.data.samples)
# we get slicing all the time
assert_true(s[1].samples.base is self.data.samples)
step_ds = Dataset(np.random.randn(20,2),
sa={'chunks': np.tile([0,1], 10)})
spl = OddEvenSplitter()
splits = [ (train, test) for (train, test) in spl(step_ds) ]
assert_equal(len(splits), 2)
for s in splits:
# we get slicing all the time
assert_true(s[0].samples.base is step_ds.samples)
assert_true(s[1].samples.base is step_ds.samples)
开发者ID:arokem,项目名称:PyMVPA,代码行数:32,代码来源:test_splitter.py
示例5: test_simple_n_minus_one_cv
def test_simple_n_minus_one_cv(self):
data = get_mv_pattern(3)
data.init_origids('samples')
self.failUnless( data.nsamples == 120 )
self.failUnless( data.nfeatures == 2 )
self.failUnless(
(data.sa.targets == \
[0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0] * 6).all())
self.failUnless(
(data.sa.chunks == \
[k for k in range(1, 7) for i in range(20)]).all())
assert_equal(len(np.unique(data.sa.origids)), data.nsamples)
transerror = TransferError(sample_clf_nl)
cv = CrossValidatedTransferError(
transerror,
NFoldSplitter(cvtype=1),
enable_ca=['confusion', 'training_confusion',
'samples_error'])
results = cv(data)
self.failUnless((results.samples < 0.2).all() and (results.samples >= 0.0).all())
# TODO: test accessibility of {training_,}confusion{,s} of
# CrossValidatedTransferError
self.failUnless(isinstance(cv.ca.samples_error, dict))
self.failUnless(len(cv.ca.samples_error) == data.nsamples)
# one value for each origid
assert_array_equal(sorted(cv.ca.samples_error.keys()),
sorted(data.sa.origids))
for k, v in cv.ca.samples_error.iteritems():
self.failUnless(len(v) == 1)
开发者ID:geeragh,项目名称:PyMVPA,代码行数:34,代码来源:test_clfcrossval.py
示例6: test_sampleslicemapper
def test_sampleslicemapper():
# this does nothing but Dataset.__getitem__ which is tested elsewhere -- but
# at least we run it
ds = datasets['uni2small']
ssm = SampleSliceMapper(slice(3, 8, 2))
sds = ssm(ds)
assert_equal(len(sds), 3)
开发者ID:B-Rich,项目名称:PyMVPA,代码行数:7,代码来源:test_mapper.py
示例7: test_repr
def test_repr():
# this time give mask only by its target length
sm = FeatureSliceMapper(slice(None), inspace="myspace")
# check reproduction
sm_clone = eval(repr(sm))
assert_equal(repr(sm_clone), repr(sm))
开发者ID:arokem,项目名称:PyMVPA,代码行数:7,代码来源:test_mapper.py
示例8: test_strip_boundary
def test_strip_boundary():
ds = datasets['hollow']
ds.sa['btest'] = np.repeat([0,1], 20)
sn = StripBoundariesSamples('btest', 1, 2)
sds = sn(ds)
assert_equal(len(sds), len(ds) - 3)
for i in [19, 20, 21]:
assert_false(i in sds.samples.sid)
开发者ID:B-Rich,项目名称:PyMVPA,代码行数:8,代码来源:test_mapper.py
示例9: test_eep_bin
def test_eep_bin():
eb = EEPBin(os.path.join(pymvpa_dataroot, 'eep.bin'))
assert_equal(eb.nchannels, 32)
assert_equal(eb.nsamples, 2)
assert_equal(eb.ntimepoints, 4)
assert_true(eb.t0 - eb.dt < 0.00000001)
assert_equal(len(eb.channels), 32)
assert_equal(eb.data.shape, (2, 32, 4))
开发者ID:B-Rich,项目名称:PyMVPA,代码行数:9,代码来源:test_eepdataset.py
示例10: test_sphere
def test_sphere():
# test sphere initialization
s = ne.Sphere(1)
center0 = (0, 0, 0)
center1 = (1, 1, 1)
assert_equal(len(s(center0)), 7)
target = array([array([-1, 0, 0]),
array([ 0, -1, 0]),
array([ 0, 0, -1]),
array([0, 0, 0]),
array([0, 0, 1]),
array([0, 1, 0]),
array([1, 0, 0])])
# test of internals -- no recomputation of increments should be done
prev_increments = s._increments
assert_array_equal(s(center0), target)
ok_(prev_increments is s._increments)
# query lower dimensionality
_ = s((0, 0))
ok_(not prev_increments is s._increments)
# test Sphere call
target = [array([0, 1, 1]),
array([1, 0, 1]),
array([1, 1, 0]),
array([1, 1, 1]),
array([1, 1, 2]),
array([1, 2, 1]),
array([2, 1, 1])]
res = s(center1)
assert_array_equal(array(res), target)
# They all should be tuples
ok_(np.all([isinstance(x, tuple) for x in res]))
# test for larger diameter
s = ne.Sphere(4)
assert_equal(len(s(center1)), 257)
# test extent keyword
#s = ne.Sphere(4,extent=(1,1,1))
#assert_array_equal(array(s((0,0,0))), array([[0,0,0]]))
# test Errors during initialisation and call
#assert_raises(ValueError, ne.Sphere, 2)
#assert_raises(ValueError, ne.Sphere, 1.0)
# no longer extent available
assert_raises(TypeError, ne.Sphere, 1, extent=(1))
assert_raises(TypeError, ne.Sphere, 1, extent=(1.0, 1.0, 1.0))
s = ne.Sphere(1)
#assert_raises(ValueError, s, (1))
if __debug__:
# No float coordinates allowed for now...
# XXX might like to change that ;)
#
assert_raises(ValueError, s, (1.0, 1.0, 1.0))
开发者ID:B-Rich,项目名称:PyMVPA,代码行数:57,代码来源:test_neighborhood.py
示例11: test_attrpermute
def test_attrpermute():
ds = give_data()
ds.sa['ids'] = range(len(ds))
pristine_data = ds.samples.copy()
permutation = AttributePermutator(['targets', 'ids'], assure=True)
pds = permutation(ds)
# should not touch the data
assert_array_equal(pristine_data, pds.samples)
# even keep the very same array
assert_true(pds.samples.base is ds.samples)
# there is no way that it can be the same attribute
assert_false(np.all(pds.sa.ids == ds.sa.ids))
# ids should reflect permutation setup
assert_array_equal(pds.sa.targets, ds.sa.targets[pds.sa.ids])
# other attribute should remain intact
assert_array_equal(pds.sa.chunks, ds.sa.chunks)
# now chunk-wise permutation
permutation = AttributePermutator('ids', limit='chunks')
pds = permutation(ds)
# first ten should remain first ten
assert_false(np.any(pds.sa.ids[:10] > 9))
# same thing, but only permute single chunk
permutation = AttributePermutator('ids', limit={'chunks': 3})
pds = permutation(ds)
# one chunk should change
assert_false(np.any(pds.sa.ids[30:40] > 39))
assert_false(np.any(pds.sa.ids[30:40] < 30))
# the rest not
assert_array_equal(pds.sa.ids[:30], range(30))
# or a list of chunks
permutation = AttributePermutator('ids', limit={'chunks': [3,4]})
pds = permutation(ds)
# two chunks should change
assert_false(np.any(pds.sa.ids[30:50] > 49))
assert_false(np.any(pds.sa.ids[30:50] < 30))
# the rest not
assert_array_equal(pds.sa.ids[:30], range(30))
# and now try generating more permutations
nruns = 2
permutation = AttributePermutator(['targets', 'ids'], assure=True, count=nruns)
pds = list(permutation.generate(ds))
assert_equal(len(pds), nruns)
for p in pds:
assert_false(np.all(p.sa.ids == ds.sa.ids))
# permute feature attrs
ds.fa['ids'] = range(ds.shape[1])
permutation = AttributePermutator('fa.ids', assure=True)
pds = permutation(ds)
assert_false(np.all(pds.fa.ids == ds.fa.ids))
开发者ID:esc,项目名称:PyMVPA,代码行数:54,代码来源:test_generators.py
示例12: test_glmnet_r_sensitivities
def test_glmnet_r_sensitivities():
data = datasets['chirp_linear']
clf = GLMNET_R()
clf.train(data)
# now ask for the sensitivities WITHOUT having to pass the dataset
# again
sens = clf.get_sensitivity_analyzer(force_train=False)(None)
assert_equal(sens.shape, (1, data.nfeatures))
开发者ID:esc,项目名称:PyMVPA,代码行数:12,代码来源:test_glmnet.py
示例13: test_glmnet_c_sensitivities
def test_glmnet_c_sensitivities():
data = normal_feature_dataset(perlabel=10, nlabels=2, nfeatures=4)
# use GLMNET on binary problem
clf = GLMNET_C()
clf.train(data)
# now ask for the sensitivities WITHOUT having to pass the dataset
# again
sens = clf.get_sensitivity_analyzer(force_train=False)(None)
#failUnless(sens.shape == (data.nfeatures,))
assert_equal(sens.shape, (len(data.UT), data.nfeatures))
开发者ID:esc,项目名称:PyMVPA,代码行数:13,代码来源:test_glmnet.py
示例14: test_harvesting
def test_harvesting(self):
# get a dataset with a very high SNR
data = get_mv_pattern(10)
# do crossval with default errorfx and 'mean' combiner
transerror = TransferError(clfswh['linear'][0])
cv = CrossValidatedTransferError(
transerror,
NFoldSplitter(cvtype=1),
harvest_attribs=['transerror.clf.ca.training_time'])
result = cv(data)
ok_(cv.ca.harvested.has_key('transerror.clf.ca.training_time'))
assert_equal(len(cv.ca.harvested['transerror.clf.ca.training_time']),
len(data.UC))
开发者ID:geeragh,项目名称:PyMVPA,代码行数:13,代码来源:test_clfcrossval.py
示例15: test_attrmap_repr
def test_attrmap_repr():
assert_equal(repr(AttributeMap()), "AttributeMap()")
assert_equal(repr(AttributeMap(dict(a=2, b=1))),
"AttributeMap({'a': 2, 'b': 1})")
assert_equal(repr(AttributeMap(dict(a=2, b=1), mapnumeric=True)),
"AttributeMap({'a': 2, 'b': 1}, mapnumeric=True)")
assert_equal(repr(AttributeMap(dict(a=2, b=1), mapnumeric=True, collisions_resolution='tuple')),
"AttributeMap({'a': 2, 'b': 1}, mapnumeric=True, collisions_resolution='tuple')")
开发者ID:B-Rich,项目名称:PyMVPA,代码行数:8,代码来源:test_attrmap.py
示例16: test_sifter
def test_sifter():
# somewhat duplicating the doctest
ds = Dataset(samples=np.arange(8).reshape((4,2)),
sa={'chunks': [ 0 , 1 , 2 , 3 ],
'targets': ['c', 'c', 'p', 'p']})
par = ChainNode([NFoldPartitioner(cvtype=2, attr='chunks'),
Sifter([('partitions', 2),
('targets', ['c', 'p'])])
])
dss = list(par.generate(ds))
assert_equal(len(dss), 4)
for ds_ in dss:
testing = ds[ds_.sa.partitions == 2]
assert_array_equal(np.unique(testing.sa.targets), ['c', 'p'])
# and we still have both targets present in training
training = ds[ds_.sa.partitions == 1]
assert_array_equal(np.unique(training.sa.targets), ['c', 'p'])
开发者ID:esc,项目名称:PyMVPA,代码行数:17,代码来源:test_generators.py
示例17: test_featuregroup_mapper
def test_featuregroup_mapper():
ds = Dataset(np.arange(24).reshape(3,8))
ds.fa['roi'] = [0, 1] * 4
# just to check
ds.sa['chunks'] = np.arange(3)
# correct results
csamples = [[3, 4], [11, 12], [19, 20]]
croi = [0, 1]
cchunks = np.arange(3)
m = mean_group_feature(['roi'])
mds = m.forward(ds)
assert_equal(mds.shape, (3, 2))
assert_array_equal(mds.samples, csamples)
assert_array_equal(mds.fa.roi, np.unique([0, 1] * 4))
# FAs should simply remain the same
assert_array_equal(mds.sa.chunks, np.arange(3))
开发者ID:geeragh,项目名称:PyMVPA,代码行数:18,代码来源:test_fxmapper.py
示例18: test_discarded_boundaries
def test_discarded_boundaries(self):
ds = datasets['hollow']
# four runs
ds.sa['chunks'] = np.repeat(np.arange(4), 10)
# do odd even splitting for lots of boundaries in few splits
part = ChainNode([OddEvenPartitioner(),
StripBoundariesSamples('chunks', 1, 2)])
parts = [d.samples.sid for d in part.generate(ds)]
# both dataset should have the same samples, because the boundaries are
# identical and the same sample should be stripped
assert_array_equal(parts[0], parts[1])
# we strip 3 samples per boundary
assert_equal(len(parts[0]), len(ds) - (3 * 3))
for i in [9, 10, 11, 19, 20, 21, 29, 30, 31]:
assert_false(i in parts[0])
开发者ID:B-Rich,项目名称:PyMVPA,代码行数:19,代码来源:test_splitter.py
示例19: test_slicing
def test_slicing(self):
hs = HalfPartitioner()
spl = Splitter(attr='partitions')
splits = list(hs.generate(self.data))
for s in splits:
# partitioned dataset shared the data
assert_true(s.samples.base is self.data.samples)
splits = [ list(spl.generate(p)) for p in hs.generate(self.data) ]
for s in splits:
# we get slicing all the time
assert_true(s[0].samples.base.base is self.data.samples)
assert_true(s[1].samples.base.base is self.data.samples)
spl = Splitter(attr='partitions', noslicing=True)
splits = [ list(spl.generate(p)) for p in hs.generate(self.data) ]
for s in splits:
# we no slicing at all
assert_false(s[0].samples.base is self.data.samples)
assert_false(s[1].samples.base is self.data.samples)
nfs = NFoldPartitioner()
spl = Splitter(attr='partitions')
splits = [ list(spl.generate(p)) for p in nfs.generate(self.data) ]
for i, s in enumerate(splits):
# training only first and last split
if i == 0 or i == len(splits) - 1:
assert_true(s[0].samples.base.base is self.data.samples)
else:
assert_true(s[0].samples.base is None)
# we get slicing all the time
assert_true(s[1].samples.base.base is self.data.samples)
step_ds = Dataset(np.random.randn(20,2),
sa={'chunks': np.tile([0,1], 10)})
oes = OddEvenPartitioner()
spl = Splitter(attr='partitions')
splits = list(oes.generate(step_ds))
for s in splits:
# partitioned dataset shared the data
assert_true(s.samples.base is step_ds.samples)
splits = [ list(spl.generate(p)) for p in oes.generate(step_ds) ]
assert_equal(len(splits), 2)
for s in splits:
# we get slicing all the time
assert_true(s[0].samples.base.base is step_ds.samples)
assert_true(s[1].samples.base.base is step_ds.samples)
开发者ID:B-Rich,项目名称:PyMVPA,代码行数:43,代码来源:test_splitter.py
示例20: test_simple_n_minus_one_cv
def test_simple_n_minus_one_cv(self):
data = get_mv_pattern(3)
data.init_origids('samples')
self.failUnless( data.nsamples == 120 )
self.failUnless( data.nfeatures == 2 )
self.failUnless(
(data.sa.targets == \
[0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0] * 6).all())
self.failUnless(
(data.sa.chunks == \
[k for k in range(1, 7) for i in range(20)]).all())
assert_equal(len(np.unique(data.sa.origids)), data.nsamples)
cv = CrossValidation(sample_clf_nl, NFoldPartitioner(),
enable_ca=['stats', 'training_stats'])
# 'samples_error'])
results = cv(data)
self.failUnless((results.samples < 0.2).all() and (results.samples >= 0.0).all())
开发者ID:B-Rich,项目名称:PyMVPA,代码行数:20,代码来源:test_clfcrossval.py
注:本文中的mvpa.testing.tools.assert_equal函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论