本文整理汇总了Python中nengo.spa.Vocabulary类的典型用法代码示例。如果您正苦于以下问题:Python Vocabulary类的具体用法?Python Vocabulary怎么用?Python Vocabulary使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Vocabulary类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_extend
def test_extend(rng):
v = Vocabulary(16, rng=rng)
v.parse('A+B')
assert v.keys == ['A', 'B']
assert not v.unitary
# Test extending the vocabulary
v.extend(['C', 'D'])
assert v.keys == ['A', 'B', 'C', 'D']
# Test extending the vocabulary with various unitary options
v.extend(['E', 'F'], unitary=['E'])
assert v.keys == ['A', 'B', 'C', 'D', 'E', 'F']
assert v.unitary == ['E']
# Check if 'E' is unitary
fft_val = np.fft.fft(v['E'].v)
fft_imag = fft_val.imag
fft_real = fft_val.real
fft_norms = np.sqrt(fft_imag ** 2 + fft_real ** 2)
assert np.allclose(fft_norms, np.ones(16))
v.extend(['G', 'H'], unitary=True)
assert v.keys == ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']
assert v.unitary == ['E', 'G', 'H']
开发者ID:4n6strider,项目名称:nengo,代码行数:25,代码来源:test_vocabulary.py
示例2: test_transform
def test_transform():
v1 = Vocabulary(32, rng=np.random.RandomState(7))
v2 = Vocabulary(64, rng=np.random.RandomState(8))
A = v1.parse('A')
B = v1.parse('B')
C = v1.parse('C')
t = v1.transform_to(v2)
assert v2.parse('A').compare(np.dot(t, A.v)) > 0.95
assert v2.parse('C+B').compare(np.dot(t, C.v + B.v)) > 0.95
t = v1.transform_to(v2, keys=['A', 'B'])
assert v2.parse('A').compare(np.dot(t, A.v)) > 0.95
assert v2.parse('B').compare(np.dot(t, C.v + B.v)) > 0.95
开发者ID:Ocode,项目名称:nengo,代码行数:15,代码来源:test_vocabulary.py
示例3: test_prob_cleanup
def test_prob_cleanup(rng):
v = Vocabulary(64, rng=rng)
assert 1.0 > v.prob_cleanup(0.7, 10000) > 0.9999
assert 0.9999 > v.prob_cleanup(0.6, 10000) > 0.999
assert 0.99 > v.prob_cleanup(0.5, 1000) > 0.9
v = Vocabulary(128, rng=rng)
assert 0.999 > v.prob_cleanup(0.4, 1000) > 0.997
assert 0.99 > v.prob_cleanup(0.4, 10000) > 0.97
assert 0.9 > v.prob_cleanup(0.4, 100000) > 0.8
开发者ID:CamZHU,项目名称:nengo,代码行数:10,代码来源:test_vocabulary.py
示例4: test_am_wta
def test_am_wta(Simulator, plt, seed, rng):
"""Test the winner-take-all ability of the associative memory."""
D = 64
vocab = Vocabulary(D, rng=rng)
vocab.parse('A+B+C+D')
def input_func(t):
if t < 0.2:
return vocab.parse('A+0.8*B').v
elif t < 0.3:
return np.zeros(D)
else:
return vocab.parse('0.8*A+B').v
with nengo.Network('model', seed=seed) as m:
am = AssociativeMemory(vocab, wta_output=True)
in_node = nengo.Node(output=input_func, label='input')
nengo.Connection(in_node, am.input)
in_p = nengo.Probe(in_node)
out_p = nengo.Probe(am.output, synapse=0.03)
sim = Simulator(m)
sim.run(0.5)
t = sim.trange()
more_a = (t > 0.15) & (t < 0.2)
more_b = t > 0.45
plt.subplot(2, 1, 1)
plt.plot(t, nengo.spa.similarity(sim.data[in_p], vocab))
plt.ylabel("Input")
plt.ylim(top=1.1)
plt.legend(vocab.keys, loc='best')
plt.subplot(2, 1, 2)
plt.plot(t, nengo.spa.similarity(sim.data[out_p], vocab))
plt.plot(t[more_a], np.ones(t.shape)[more_a] * 0.8, c='g', lw=2)
plt.plot(t[more_b], np.ones(t.shape)[more_b] * 0.8, c='g', lw=2)
plt.ylabel("Output")
plt.legend(vocab.keys, loc='best')
assert similarity(sim.data[out_p][more_a], vocab.parse("A").v) > 0.8
assert similarity(sim.data[out_p][more_a], vocab.parse("B").v) < 0.2
assert similarity(sim.data[out_p][more_b], vocab.parse("B").v) > 0.8
assert similarity(sim.data[out_p][more_b], vocab.parse("A").v) < 0.2
开发者ID:LittileBee,项目名称:nengo,代码行数:45,代码来源:test_assoc_mem.py
示例5: test_am_assoc_mem_threshold
def test_am_assoc_mem_threshold(Simulator):
"""Standard associative memory (differing input and output vocabularies).
Options: threshold = 0.5, non-inhibitable, non-wta, does not output
utilities or thresholded utilities.
"""
rng = np.random.RandomState(1)
D = 64
vocab = Vocabulary(D, rng=rng)
vocab.parse('A+B+C+D')
D2 = int(D / 2)
vocab2 = Vocabulary(D2, rng=rng)
vocab2.parse('A+B+C+D')
def input_func(t):
if t < 0.5:
return vocab.parse('0.49*A').v
else:
return vocab.parse('0.79*A').v
m = nengo.Network('model', seed=123)
with m:
am = AssociativeMemory(vocab, vocab2, threshold=0.5)
in_node = nengo.Node(output=input_func, label='input')
out_node = nengo.Node(size_in=D2, label='output')
nengo.Connection(in_node, am.input)
nengo.Connection(am.output, out_node, synapse=0.03)
in_p = nengo.Probe(in_node)
out_p = nengo.Probe(out_node)
sim = Simulator(m)
sim.run(1.0)
assert np.allclose(sim.data[in_p][490:500], vocab.parse("0.49*A").v,
atol=.15, rtol=.01)
assert np.allclose(sim.data[in_p][-10:], vocab.parse("0.79*A").v,
atol=.15, rtol=.01)
assert np.allclose(sim.data[out_p][490:500], vocab2.parse("0").v,
atol=.15, rtol=.01)
assert np.allclose(sim.data[out_p][-10:], vocab2.parse("A").v,
atol=.15, rtol=.01)
开发者ID:goaaron,项目名称:blouw-etal-2015,代码行数:44,代码来源:test_assoc_mem.py
示例6: test_transform
def test_transform(rng):
v1 = Vocabulary(32, rng=rng)
v2 = Vocabulary(64, rng=rng)
A = v1.parse("A")
B = v1.parse("B")
C = v1.parse("C")
t = v1.transform_to(v2)
assert v2.parse("A").compare(np.dot(t, A.v)) > 0.95
assert v2.parse("C+B").compare(np.dot(t, C.v + B.v)) > 0.9
t = v1.transform_to(v2, keys=["A", "B"])
assert v2.parse("A").compare(np.dot(t, A.v)) > 0.95
assert v2.parse("B").compare(np.dot(t, B.v)) > 0.95
开发者ID:qqming113,项目名称:nengo,代码行数:15,代码来源:test_vocabulary.py
示例7: test_include_pairs
def test_include_pairs():
v = Vocabulary(10)
v["A"]
v["B"]
v["C"]
assert v.key_pairs is None
v.include_pairs = True
assert v.key_pairs == ["A*B", "A*C", "B*C"]
v.include_pairs = False
assert v.key_pairs is None
v.include_pairs = True
v["D"]
assert v.key_pairs == ["A*B", "A*C", "B*C", "A*D", "B*D", "C*D"]
v = Vocabulary(12, include_pairs=True)
v["A"]
v["B"]
v["C"]
assert v.key_pairs == ["A*B", "A*C", "B*C"]
开发者ID:qqming113,项目名称:nengo,代码行数:19,代码来源:test_vocabulary.py
示例8: test_include_pairs
def test_include_pairs(rng):
v = Vocabulary(10, rng=rng)
v['A']
v['B']
v['C']
assert v.key_pairs is None
v.include_pairs = True
assert v.key_pairs == ['A*B', 'A*C', 'B*C']
v.include_pairs = False
assert v.key_pairs is None
v.include_pairs = True
v['D']
assert v.key_pairs == ['A*B', 'A*C', 'B*C', 'A*D', 'B*D', 'C*D']
v = Vocabulary(12, include_pairs=True)
v['A']
v['B']
v['C']
assert v.key_pairs == ['A*B', 'A*C', 'B*C']
开发者ID:CamZHU,项目名称:nengo,代码行数:19,代码来源:test_vocabulary.py
示例9: initialize_vis_vocab
def initialize_vis_vocab(self, vis_dim, vis_sps):
if vis_sps.shape[0] != len(self.vis_sp_strs):
raise RuntimeError('Vocabulatory.initialize_vis_vocab: ' +
'Mismatch in shape of raw vision SPs and ' +
'number of vision SP labels.')
self.vis_dim = vis_dim
self.vis = Vocabulary(self.vis_dim)
for i, sp_str in enumerate(self.vis_sp_strs):
self.vis.add(sp_str, vis_sps[i, :])
开发者ID:xchoo,项目名称:spaun2.0,代码行数:11,代码来源:vocabulator.py
示例10: test_create_pointer_warning
def test_create_pointer_warning(rng):
v = Vocabulary(2, rng=rng)
# five pointers shouldn't fit
with warns(UserWarning):
v.parse('A')
v.parse('B')
v.parse('C')
v.parse('D')
v.parse('E')
开发者ID:CamZHU,项目名称:nengo,代码行数:10,代码来源:test_vocabulary.py
示例11: test_am_spa_interaction
def test_am_spa_interaction(Simulator, seed, rng):
"""Make sure associative memory interacts with other SPA modules."""
D = 16
vocab = Vocabulary(D, rng=rng)
vocab.parse('A+B+C+D')
D2 = int(D / 2)
vocab2 = Vocabulary(D2, rng=rng)
vocab2.parse('A+B+C+D')
def input_func(t):
return '0.49*A' if t < 0.5 else '0.79*A'
with nengo.spa.SPA(seed=seed) as m:
m.buf = nengo.spa.Buffer(D)
m.input = nengo.spa.Input(buf=input_func)
m.am = AssociativeMemory(vocab, vocab2,
input_keys=['A', 'B', 'C'],
output_keys=['B', 'C', 'D'],
default_output_key='A',
threshold=0.5,
inhibitable=True,
wta_output=True,
threshold_output=True)
cortical_actions = nengo.spa.Actions('am = buf')
m.c_act = nengo.spa.Cortical(cortical_actions)
# Check to see if model builds properly. No functionality test needed
Simulator(m)
开发者ID:falconlulu,项目名称:nengo,代码行数:31,代码来源:test_assoc_mem.py
示例12: test_subset
def test_subset(rng):
v1 = Vocabulary(32, rng=rng)
v1.parse('A+B+C+D+E+F+G')
# Test creating a vocabulary subset
v2 = v1.create_subset(['A', 'C', 'E'])
assert v2.keys == ['A', 'C', 'E']
assert v2['A'] == v1['A']
assert v2['C'] == v1['C']
assert v2['E'] == v1['E']
assert v2.parent is v1
# Test creating a subset from a subset (it should create off the parent)
v3 = v2.create_subset(['C', 'E'])
assert v3.parent is v2.parent and v2.parent is v1
v3.include_pairs = True
assert v3.key_pairs == ['C*E']
assert not v1.include_pairs
assert not v2.include_pairs
# Test transform_to between subsets (should be identity transform)
t = v1.transform_to(v2)
assert v2.parse('A').compare(np.dot(t, v1.parse('A').v)) >= 0.99999999
开发者ID:4n6strider,项目名称:nengo,代码行数:25,代码来源:test_vocabulary.py
示例13: test_am_spa_interaction
def test_am_spa_interaction(Simulator):
"""Standard associative memory interacting with other SPA modules.
Options: threshold = 0.5, non-inhibitable, non-wta, does not output
utilities or thresholded utilities.
"""
rng = np.random.RandomState(1)
D = 16
vocab = Vocabulary(D, rng=rng)
vocab.parse('A+B+C+D')
D2 = int(D / 2)
vocab2 = Vocabulary(D2, rng=rng)
vocab2.parse('A+B+C+D')
def input_func(t):
if t < 0.5:
return '0.49*A'
else:
return '0.79*A'
m = nengo.spa.SPA('model', seed=123)
with m:
m.buf = nengo.spa.Buffer(D)
m.input = nengo.spa.Input(buf=input_func)
m.am = AssociativeMemory(vocab, vocab2, threshold=0.5)
cortical_actions = nengo.spa.Actions('am = buf')
m.c_act = nengo.spa.Cortical(cortical_actions)
# Check to see if model builds properly. No functionality test needed
Simulator(m)
开发者ID:goaaron,项目名称:blouw-etal-2015,代码行数:34,代码来源:test_assoc_mem.py
示例14: test_am_defaults
def test_am_defaults(Simulator):
"""Default assoc memory.
Options: auto-associative, threshold = 0.3, non-inhibitable, non-wta,
does not output utilities or thresholded utilities.
"""
rng = np.random.RandomState(1)
D = 64
vocab = Vocabulary(D, rng=rng)
vocab.parse('A+B+C+D')
m = nengo.Network('model', seed=123)
with m:
am = AssociativeMemory(vocab)
in_node = nengo.Node(output=vocab.parse("A").v,
label='input')
out_node = nengo.Node(size_in=D, label='output')
nengo.Connection(in_node, am.input)
nengo.Connection(am.output, out_node, synapse=0.03)
in_p = nengo.Probe(in_node)
out_p = nengo.Probe(out_node)
sim = Simulator(m)
sim.run(1.0)
assert np.allclose(sim.data[in_p][-10:], vocab.parse("A").v,
atol=.1, rtol=.01)
assert np.allclose(sim.data[out_p][-10:], vocab.parse("A").v,
atol=.1, rtol=.01)
开发者ID:goaaron,项目名称:blouw-etal-2015,代码行数:32,代码来源:test_assoc_mem.py
示例15: test_am_basic
def test_am_basic(Simulator, plt, seed, rng):
"""Basic associative memory test."""
D = 64
vocab = Vocabulary(D, rng=rng)
vocab.parse('A+B+C+D')
with nengo.Network('model', seed=seed) as m:
am = AssociativeMemory(vocab)
in_node = nengo.Node(output=vocab.parse("A").v, label='input')
nengo.Connection(in_node, am.input)
in_p = nengo.Probe(in_node)
out_p = nengo.Probe(am.output, synapse=0.03)
sim = Simulator(m)
sim.run(0.2)
t = sim.trange()
plt.subplot(2, 1, 1)
plt.plot(t, nengo.spa.similarity(sim.data[in_p], vocab))
plt.ylabel("Input")
plt.ylim(top=1.1)
plt.legend(vocab.keys, loc='best')
plt.subplot(2, 1, 2)
plt.plot(t, nengo.spa.similarity(sim.data[out_p], vocab))
plt.plot(t[t > 0.15], np.ones(t.shape)[t > 0.15] * 0.8, c='g', lw=2)
plt.ylabel("Output")
plt.legend(vocab.keys, loc='best')
assert similarity(sim.data[in_p][t > 0.15], vocab.parse("A").v) > 0.99
assert similarity(sim.data[out_p][t > 0.15], vocab.parse("A").v) > 0.8
开发者ID:LittileBee,项目名称:nengo,代码行数:32,代码来源:test_assoc_mem.py
示例16: test_readonly
def test_readonly(rng):
v1 = Vocabulary(32, rng=rng)
v1.parse('A+B+C')
v1.readonly = True
with pytest.raises(ValueError):
v1.parse('D')
开发者ID:4n6strider,项目名称:nengo,代码行数:8,代码来源:test_vocabulary.py
示例17: test_am_threshold
def test_am_threshold(Simulator, plt, seed, rng):
"""Associative memory thresholding with differing input/output vocabs."""
D = 64
vocab = Vocabulary(D, rng=rng)
vocab.parse('A+B+C+D')
D2 = int(D / 2)
vocab2 = Vocabulary(D2, rng=rng)
vocab2.parse('A+B+C+D')
def input_func(t):
return vocab.parse('0.49*A').v if t < 0.1 else vocab.parse('0.79*A').v
with nengo.Network('model', seed=seed) as m:
am = AssociativeMemory(vocab, vocab2, threshold=0.5)
in_node = nengo.Node(output=input_func, label='input')
nengo.Connection(in_node, am.input)
in_p = nengo.Probe(in_node)
out_p = nengo.Probe(am.output, synapse=0.03)
sim = Simulator(m)
sim.run(0.3)
t = sim.trange()
below_th = t < 0.1
above_th = t > 0.25
plt.subplot(2, 1, 1)
plt.plot(t, nengo.spa.similarity(sim.data[in_p], vocab))
plt.ylabel("Input")
plt.legend(vocab.keys, loc='best')
plt.subplot(2, 1, 2)
plt.plot(t, nengo.spa.similarity(sim.data[out_p], vocab2))
plt.plot(t[above_th], np.ones(t.shape)[above_th] * 0.8, c='g', lw=2)
plt.ylabel("Output")
plt.legend(vocab.keys, loc='best')
assert similarity(sim.data[in_p][below_th], vocab.parse("A").v) > 0.48
assert similarity(sim.data[in_p][above_th], vocab.parse("A").v) > 0.78
assert similarity(sim.data[out_p][below_th], vocab2.parse("0").v) < 0.01
assert similarity(sim.data[out_p][above_th], vocab2.parse("A").v) > 0.8
开发者ID:LittileBee,项目名称:nengo,代码行数:41,代码来源:test_assoc_mem.py
示例18: Position
vis_sp_strs.extend(ps_task_vis_sp_strs)
# --- Position (enumerated) semantic pointers ---
pos_sp_strs = ['POS%i' % (i + 1) for i in range(cfg.max_enum_list_pos)]
# --- Operations semantic pointers
ops_sp_strs = ['ADD', 'INC']
# --- Unitary semantic pointers
unitary_sp_strs = [num_sp_strs[0], pos_sp_strs[0]]
unitary_sp_strs.extend(ops_sp_strs)
# ####################### Vocabulary definitions ##############################
# --- Primary vocabulary ---
vocab = Vocabulary(cfg.sp_dim, unitary=unitary_sp_strs, rng=cfg.rng)
# --- Add numerical sp's ---
vocab.parse('%s+%s' % (ops_sp_strs[0], num_sp_strs[0]))
add_sp = vocab[ops_sp_strs[0]]
num_sp = vocab[num_sp_strs[0]].copy()
for i in range(len(num_sp_strs) - 1):
num_sp = num_sp.copy() * add_sp
vocab.add(num_sp_strs[i + 1], num_sp)
# --- Add positional sp's ---
vocab.parse('%s+%s' % (ops_sp_strs[1], pos_sp_strs[0]))
inc_sp = vocab[ops_sp_strs[1]]
pos_sp = vocab[pos_sp_strs[0]].copy()
for i in range(len(pos_sp_strs) - 1):
pos_sp = pos_sp.copy() * inc_sp
开发者ID:Stanford-BIS,项目名称:spaun2.0,代码行数:31,代码来源:vocabs.py
示例19: test_am_complex
def test_am_complex(Simulator, plt, seed, rng):
"""Complex auto-associative memory test.
Has a default output vector, outputs utilities, and becomes inhibited.
"""
D = 64
vocab = Vocabulary(D, rng=rng)
vocab.parse('A+B+C+D+E+F')
vocab2 = vocab.create_subset(["A", "B", "C", "D"])
def input_func(t):
if t < 0.25:
return vocab.parse('A+0.8*B').v
elif t < 0.5:
return vocab.parse('0.8*A+B').v
else:
return vocab.parse('E').v
def inhib_func(t):
return int(t > 0.75)
with nengo.Network('model', seed=seed) as m:
am = AssociativeMemory(vocab2,
default_output_vector=vocab.parse("F").v,
inhibitable=True,
output_utilities=True,
output_thresholded_utilities=True)
in_node = nengo.Node(output=input_func, label='input')
inhib_node = nengo.Node(output=inhib_func, label='inhib')
nengo.Connection(in_node, am.input)
nengo.Connection(inhib_node, am.inhibit)
in_p = nengo.Probe(in_node)
out_p = nengo.Probe(am.output, synapse=0.03)
utils_p = nengo.Probe(am.utilities, synapse=0.05)
utils_th_p = nengo.Probe(am.thresholded_utilities, synapse=0.05)
sim = Simulator(m)
sim.run(1.0)
t = sim.trange()
# Input: A+0.8B
more_a = (t >= 0.2) & (t < 0.25)
# Input: 0.8B+A
more_b = (t >= 0.45) & (t < 0.5)
# Input: E (but E isn't in the memory vocabulary, so should output F)
all_e = (t >= 0.7) & (t < 0.75)
# Input: E (but inhibited, so should output nothing)
inhib = (t >= 0.95)
def plot(i, y, ylabel):
plt.subplot(4, 1, i)
plt.plot(t, y)
plt.axvline(0.25, c='k')
plt.axvline(0.5, c='k')
plt.axvline(0.75, c='k')
plt.ylabel(ylabel)
plt.legend(vocab.keys[:y.shape[1]], loc='best', fontsize='xx-small')
plot(1, nengo.spa.similarity(sim.data[in_p], vocab), "Input")
plot(2, sim.data[utils_p], "Utilities")
plot(3, sim.data[utils_th_p], "Thresholded utilities")
plot(4, nengo.spa.similarity(sim.data[out_p], vocab), "Output")
assert all(np.mean(sim.data[utils_p][more_a], axis=0)[:2] > [0.8, 0.5])
assert all(np.mean(sim.data[utils_p][more_a], axis=0)[2:] < [0.01, 0.01])
assert all(np.mean(sim.data[utils_p][more_b], axis=0)[:2] > [0.5, 0.8])
assert all(np.mean(sim.data[utils_p][more_b], axis=0)[2:] < [0.01, 0.01])
assert similarity(sim.data[utils_p][all_e], np.ones((1, 4))) < 0.05
assert similarity(sim.data[utils_p][inhib], np.ones((1, 4))) < 0.05
assert all(np.mean(sim.data[utils_th_p][more_a], axis=0)[:2] > [0.8, 0.8])
assert all(
np.mean(sim.data[utils_th_p][more_a], axis=0)[2:] < [0.01, 0.01])
assert all(np.mean(sim.data[utils_th_p][more_b], axis=0)[:2] > [0.8, 0.8])
assert all(
np.mean(sim.data[utils_th_p][more_b], axis=0)[2:] < [0.01, 0.01])
assert similarity(sim.data[utils_th_p][all_e], np.ones((1, 4))) < 0.05
assert similarity(sim.data[utils_th_p][inhib], np.ones((1, 4))) < 0.05
assert similarity(sim.data[out_p][more_a], vocab.parse("A").v) > 0.8
assert similarity(sim.data[out_p][more_a], vocab.parse("B").v) > 0.8
assert similarity(sim.data[out_p][more_b], vocab.parse("A").v) > 0.8
assert similarity(sim.data[out_p][more_b], vocab.parse("B").v) > 0.8
assert similarity(sim.data[out_p][all_e], vocab.parse("F").v) > 0.8
assert similarity(sim.data[out_p][inhib], np.ones((1, D))) < 0.05
开发者ID:LittileBee,项目名称:nengo,代码行数:83,代码来源:test_assoc_mem.py
示例20: test_capital
def test_capital(rng):
v = Vocabulary(16, rng=rng)
with pytest.raises(KeyError):
v.parse('a')
with pytest.raises(KeyError):
v.parse('A+B+C+a')
开发者ID:CamZHU,项目名称:nengo,代码行数:6,代码来源:test_vocabulary.py
注:本文中的nengo.spa.Vocabulary类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论