本文整理汇总了Python中theano.tensor.vector函数的典型用法代码示例。如果您正苦于以下问题:Python vector函数的具体用法?Python vector怎么用?Python vector使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了vector函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: calculate
def calculate(w1, w2, data, display):
x = T.vector('x')
w = T.vector('w')
s = 1 / (1 + T.exp(-T.dot(x, w)))
logistic = theano.function([x, w], s)
if display:
print("With: w1 = %f and w2 = %f" % (w1, w2))
sum_error = 0
sum_error_square = 0
if isinstance(data, str) or not len(data):
if not len(data):
data = 'Data.txt'
with open('dataFiles/' + data) as fp:
reader = csv.reader(fp, delimiter=',')
for line in reader:
data.append([int(line[0]), float(line[1]), float(line[2])])
if display:
print('y\t\tf(x)\t\tE\t\tE^2')
for i in range(0, len(data)):
x1 = data[i][1]
x2 = data[i][2]
f = logistic([x1, x2], [w1, w2])
e = data[i][0] - f
e2 = e ** 2
sum_error += e
sum_error_square += e2
if display:
print('%f\t%f\t%f\t%f' % (data[i][0], f, e, e2))
if display:
print("\nSum:\t\t\t\t%f\t%f" % (sum_error, sum_error_square))
return sum_error_square
开发者ID:sookool99,项目名称:Summer2015,代码行数:34,代码来源:error.py
示例2: test_softmax_optimizations_w_bias2
def test_softmax_optimizations_w_bias2(self):
x = tensor.matrix('x')
b = tensor.vector('b')
c = tensor.vector('c')
one_of_n = tensor.lvector('one_of_n')
op = crossentropy_categorical_1hot
env = gof.Env(
[x, b, c, one_of_n],
[op(softmax(T.add(x,b,c)), one_of_n)])
assert env.outputs[0].owner.op == op
print 'BEFORE'
for node in env.toposort():
print node.op
print '----'
theano.compile.mode.optdb.query(
theano.compile.mode.OPT_FAST_RUN).optimize(env)
print 'AFTER'
for node in env.toposort():
print node.op
print '===='
assert len(env.toposort()) == 3
assert str(env.outputs[0].owner.op) == 'OutputGuard'
assert env.outputs[0].owner.inputs[0].owner.op == crossentropy_softmax_argmax_1hot_with_bias
开发者ID:lberrada,项目名称:Theano,代码行数:28,代码来源:test_nnet.py
示例3: test_grad_lazy_if
def test_grad_lazy_if(self):
# Tests that we can compute the gradients through lazy if
x = tensor.vector('x', dtype=self.dtype)
y = tensor.vector('y', dtype=self.dtype)
c = tensor.iscalar('c')
z = ifelse(c, x, y)
gx, gy = tensor.grad(z.sum(), [x, y])
f = theano.function([c, x, y], [self.cast_output(gx),
self.cast_output(gy)],
mode=self.mode)
# There is only 2 of the 3 ifelse that are moved on the GPU.
# The one that stay on the CPU is for the shape.
self.assertFunctionContains(f, self.get_ifelse(1), min=2, max=3)
rng = numpy.random.RandomState(utt.fetch_seed())
xlen = rng.randint(200)
ylen = rng.randint(200)
vx = numpy.asarray(rng.uniform(size=(xlen,)), self.dtype)
vy = numpy.asarray(rng.uniform(size=(ylen,)), self.dtype)
gx0, gy0 = f(1, vx, vy)
assert numpy.allclose(gx0.shape, vx.shape)
assert numpy.allclose(gy0.shape, vy.shape)
assert numpy.all(numpy.asarray(gx0) == 1.)
assert numpy.all(numpy.asarray(gy0) == 0.)
gx0, gy0 = f(0, vx, vy)
assert numpy.allclose(gx0.shape, vx.shape)
assert numpy.allclose(gy0.shape, vy.shape)
assert numpy.all(numpy.asarray(gx0) == 0.)
assert numpy.all(numpy.asarray(gy0) == 1.)
开发者ID:aboSamoor,项目名称:Theano,代码行数:32,代码来源:test_ifelse.py
示例4: test_wrong_rcond_dimension
def test_wrong_rcond_dimension(self):
x = tensor.vector()
y = tensor.vector()
z = tensor.vector()
b = theano.tensor.nlinalg.lstsq()(x, y, z)
f = function([x, y, z], b)
self.assertRaises(np.linalg.LinAlgError, f, [2, 1], [2, 1], [2, 1])
开发者ID:EugenePY,项目名称:Theano,代码行数:7,代码来源:test_nlinalg.py
示例5: _compile_func
def _compile_func():
beta = T.vector('beta')
b = T.scalar('b')
X = T.matrix('X')
y = T.vector('y')
C = T.scalar('C')
params = [beta, b, X, y, C]
cost = 0.5 * (T.dot(beta, beta) + b * b) + C * T.sum(
T.nnet.softplus(
-T.dot(T.diag(y), T.dot(X, beta) + b)
)
)
# Function computing in one go the cost, its gradient
# with regard to beta and with regard to the bias.
cost_grad = theano.function(params,[
cost,
T.grad(cost, beta),
T.grad(cost, b)
])
# Function for computing element-wise sigmoid, used for
# prediction.
log_predict = theano.function(
[beta, b, X],
T.nnet.sigmoid(b + T.dot(X, beta)),
on_unused_input='warn'
)
return (cost_grad, log_predict)
开发者ID:alexisVallet,项目名称:dpm-identification,代码行数:29,代码来源:lr.py
示例6: test_infer_shape
def test_infer_shape(self):
for ndim in [1, 3]:
x = T.TensorType(config.floatX, [False] * ndim)()
shp = (np.arange(ndim) + 1) * 3
a = np.random.random(shp).astype(config.floatX)
for axis in self._possible_axis(ndim):
for dtype in ["int8", "uint8", "uint64"]:
r_var = T.scalar(dtype=dtype)
r = np.asarray(3, dtype=dtype)
if dtype in self.numpy_unsupported_dtypes:
r_var = T.vector(dtype=dtype)
self.assertRaises(TypeError, repeat, x, r_var)
else:
self._compile_and_check([x, r_var],
[RepeatOp(axis=axis)(x, r_var)],
[a, r],
self.op_class)
r_var = T.vector(dtype=dtype)
if axis is None:
r = np.random.randint(
1, 6, size=a.size).astype(dtype)
elif a.size > 0:
r = np.random.randint(
1, 6, size=a.shape[axis]).astype(dtype)
else:
r = np.random.randint(
1, 6, size=(10,)).astype(dtype)
self._compile_and_check(
[x, r_var],
[RepeatOp(axis=axis)(x, r_var)],
[a, r],
self.op_class)
开发者ID:Thrandis,项目名称:Theano,代码行数:35,代码来源:test_extra_ops.py
示例7: test_tagging
def test_tagging():
brick = TestBrick(0)
x = tensor.vector('x')
y = tensor.vector('y')
z = tensor.vector('z')
def check_output_variable(o):
assert get_application_call(o).application.brick is brick
assert (get_application_call(o.owner.inputs[0]).application.brick
is brick)
# Case 1: both positional arguments are provided.
u, v = brick.apply(x, y)
for o in [u, v]:
check_output_variable(o)
# Case 2: `b` is given as a keyword argument.
u, v = brick.apply(x, y=y)
for o in [u, v]:
check_output_variable(o)
# Case 3: two positional and one keyword argument.
u, v, w = brick.apply(x, y, z=z)
for o in [u, v, w]:
check_output_variable(o)
# Case 4: one positional argument.
u, v = brick.apply(x)
check_output_variable(u)
assert v == 1
# Case 5: variable was wrapped in a list. We can not handle that.
u, v = brick.apply([x])
assert_raises(AttributeError, check_output_variable, u)
开发者ID:CVML,项目名称:blocks,代码行数:34,代码来源:test_bricks.py
示例8: test_grad_lazy_if
def test_grad_lazy_if(self):
# Tests that we can compute the gradients through lazy if
x = tensor.vector('x')
y = tensor.vector('y')
c = tensor.iscalar('c')
z = ifelse(c, x, y)
gx, gy = tensor.grad(z.sum(), [x, y])
f = theano.function([c, x, y], [gx, gy])
rng = numpy.random.RandomState(utt.fetch_seed())
xlen = rng.randint(200)
ylen = rng.randint(200)
vx = numpy.asarray(rng.uniform(size=(xlen,)), theano.config.floatX)
vy = numpy.asarray(rng.uniform(size=(ylen,)), theano.config.floatX)
gx0, gy0 = f(1, vx, vy)
assert numpy.allclose(gx0.shape, vx.shape)
assert numpy.allclose(gy0.shape, vy.shape)
assert numpy.all(gx0 == 1.)
assert numpy.all(gy0 == 0.)
gx0, gy0 = f(0, vx, vy)
assert numpy.allclose(gx0.shape, vx.shape)
assert numpy.allclose(gy0.shape, vy.shape)
assert numpy.all(gx0 == 0.)
assert numpy.all(gy0 == 1.)
开发者ID:glorotxa,项目名称:Theano,代码行数:27,代码来源:test_ifelse.py
示例9: test_lop_override
def test_lop_override(self, cls_ofg):
x = T.vector()
y = 1. / (1. + T.exp(-x))
def lop_ov(inps, outs, grads):
y_, = outs
dedy_, = grads
return [2. * y_ * (1. - y_) * dedy_]
y_, dedy = T.vector(), T.vector()
op_lop_ov = cls_ofg([x, y_, dedy], [2. * y_ * (1. - y_) * dedy])
xx = T.vector()
yy1 = T.sum(T.nnet.sigmoid(xx))
gyy1 = 2. * T.grad(yy1, xx)
for ov in [lop_ov, op_lop_ov]:
op = cls_ofg([x], [y], lop_overrides=ov)
yy2 = T.sum(op(xx))
gyy2 = T.grad(yy2, xx)
fn = function([xx], [gyy1, gyy2])
xval = np.random.rand(32).astype(config.floatX)
y1val, y2val = fn(xval)
assert np.allclose(y1val, y2val)
开发者ID:Theano,项目名称:Theano,代码行数:25,代码来源:test_builders.py
示例10: __init__
def __init__(self,dic_size,window,unit_id,tag_num,net_size,weight_decay,word_dim = 50, learning_rate = 0.1):
def f_softplus(x): return T.log(T.exp(x) + 1)# - np.log(2)
def f_rectlin(x): return x*(x>0)
def f_rectlin2(x): return x*(x>0) + 0.01 * x
nonlinear = {'tanh': T.tanh, 'sigmoid': T.nnet.sigmoid, 'softplus': f_softplus, 'rectlin': f_rectlin, 'rectlin2': f_rectlin2}
self.non_unit = nonlinear[unit_id]
self.weight_decay = weight_decay
self.tag_num = tag_num
self.window_size = window
self.learning_rate = learning_rate
self.worddim = word_dim
self.w, self.b, self.A = self.init_w(net_size,tag_num)
self.w2vtable = self.init_wtable(word_dim,dic_size)#table of word vectors
x = T.vector('x')
w = []
b = []
for i in range(len(self.w)):
w.append(T.matrix())
b.append(T.vector())
output = self.network(x,w,b)
og = []
for j in range(self.tag_num):
og.extend(T.grad(output[j],w+b+[x]))
self.outfunction = theano.function([x]+w+b, output)
self.goutfunction = theano.function([x]+w+b,[output]+og)
开发者ID:mswellhao,项目名称:active_NER,代码行数:27,代码来源:baseline.py
示例11: test_logpy
def test_logpy():
x = tensor.vector()
y = tensor.vector()
z = tensor.inc_subtensor(x[1:3], y)
node = z.owner
# otw theano chokes on var attributes when nose tries to print a traceback
# XXX this should be un-monkey-patched after the test runs by e.g. a
# context manager decorator
theano.gof.Apply.__repr__ = object.__repr__
theano.gof.Apply.__str__ = object.__str__
w = dict((name, var(name)) for name in [
'start', 'stop', 'step', 'set_instead_of_inc', 'inputs', 'outputs',
'inplace', 'whole_op', 'dta',
])
pattern = raw_init(theano.Apply,
op=raw_init(theano.tensor.IncSubtensor,
idx_list=[slice(w['start'], w['stop'], w['step'])],
inplace=w['inplace'],
set_instead_of_inc=w['set_instead_of_inc'],
destroyhandler_tolerate_aliased=w['dta']),
inputs=w['inputs'],
outputs=w['outputs'])
match, = run(0, w, (eq, node, pattern))
assert match['stop'] == 3
assert match['inputs'] == [x, y]
开发者ID:jaberg,项目名称:theano_workspace,代码行数:30,代码来源:test_logpy_opt.py
示例12: __init__
def __init__(self, C, D):
self.W = theano.shared(np.ones((C,D), dtype='float32'))
t_M = T.matrix('M', dtype='float32')
t_vM = T.vector('M', dtype='float32')
t_Y = T.vector('Y', dtype='float32')
t_I = T.vector('I', dtype='float32')
t_s = T.vector('s', dtype='float32')
t_eps = T.scalar('epsilon', dtype='float32')
self.input_integration = theano.function(
[t_Y],
T.dot(T.log(self.W),t_Y),
allow_input_downcast=True
)
self.M_summation = theano.function(
[t_M],
T.sum(t_M, axis=0),
allow_input_downcast=True
)
self.recurrent_softmax = theano.function(
[t_I,t_vM],
t_vM*T.exp(t_I)/T.sum(t_vM*T.exp(t_I)),
allow_input_downcast=True
)
self.weight_update = theano.function(
[t_Y,t_s,t_eps],
self.W,
updates={
self.W:
self.W + t_eps*(T.outer(t_s,t_Y) - t_s[:,np.newaxis]*self.W)
},
allow_input_downcast=True
)
self.epsilon = None
self._Y = None
self._s = None
开发者ID:dennisforster,项目名称:NeSi,代码行数:35,代码来源:poisson_theano.py
示例13: test_rmsprop_0
def test_rmsprop_0():
# input
x = TT.vector(name='x')
B = theano.shared(floatX(np.ones((3, 5))), name='B')
c = theano.shared(floatX(np.ones(3)), name='c')
params = [B, c]
# output
y_pred = TT.nnet.softmax(TT.dot(B, x.T).T + c)
y_gold = TT.vector(name="y_gold")
# cost and grads
cost = TT.sum((y_pred - y_gold)**2)
grads = TT.grad(cost, wrt=params)
# funcs
cost_func, update_func, rms_params = rmsprop(params, grads,
[x], y_gold, cost)
# check return values
assert len(rms_params) == 4
assert isinstance(rms_params[0][0], TT.sharedvar.TensorSharedVariable)
assert not np.any(rms_params[0][0].get_value())
# check convergence
X = [floatX(np.random.rand(5)) for _ in xrange(N)]
Y = [floatX(np.random.rand(3)) for _ in xrange(N)]
icost = init_cost = end_cost = 0.
for i in xrange(MAX_I):
icost = 0.
for x, y in zip(X, Y):
icost += cost_func(x, y)
update_func()
if i == 0:
init_cost = icost
elif i == MAX_I - 1:
end_cost = icost
assert end_cost < init_cost
开发者ID:WladimirSidorenko,项目名称:DiscourseSenser,代码行数:33,代码来源:test_theano_utils.py
示例14: setup_decoder_step
def setup_decoder_step(self):
"""Advance the decoder by one step. Used at test time."""
y_t = T.lscalar('y_t_for_dec')
c_prev = T.vector('c_prev_for_dec')
h_prev = T.vector('h_prev_for_dec')
h_t = self.spec.f_dec(y_t, c_prev, h_prev)
self._decoder_step = theano.function(inputs=[y_t, c_prev, h_prev], outputs=h_t)
开发者ID:arunchaganty,项目名称:nn-semparse,代码行数:7,代码来源:attention.py
示例15: test_softmax_optimizations_w_bias_vector
def test_softmax_optimizations_w_bias_vector(self):
x = tensor.vector('x')
b = tensor.vector('b')
one_of_n = tensor.lvector('one_of_n')
op = crossentropy_categorical_1hot
fgraph = gof.FunctionGraph(
[x, b, one_of_n],
[op(softmax(x + b), one_of_n)])
assert fgraph.outputs[0].owner.op == op
#print 'BEFORE'
#for node in fgraph.toposort():
# print node.op
#print printing.pprint(node.outputs[0])
#print '----'
theano.compile.mode.optdb.query(
theano.compile.mode.OPT_FAST_RUN).optimize(fgraph)
#print 'AFTER'
#for node in fgraph.toposort():
# print node.op
#print '===='
assert len(fgraph.toposort()) == 3
assert str(fgraph.outputs[0].owner.op) == 'OutputGuard'
assert (fgraph.outputs[0].owner.inputs[0].owner.op ==
crossentropy_softmax_argmax_1hot_with_bias)
开发者ID:repos-python,项目名称:Theano,代码行数:25,代码来源:test_nnet.py
示例16: build_model
def build_model(reg_constant=0.1, var1_name='var1', var2_name='var2'):
"""
Build MF model in theano
:param reg_constant: Regularization constant
:param var1_name: Name of first variable (e.g. users)
:param var2_name: Name of second variable (e.g. items)
:return: theano function implementing MF model
"""
ratings = T.vector('ratings')
var1_vector = T.vector('{}_vector'.format(var1_name))
var2_matrix = T.matrix('{}_matrix'.format(var2_name))
predictions = T.dot(var2_matrix[:, 1:], var1_vector[1:]) + var2_matrix[:, 0] + var1_vector[0]
prediction_error = ((ratings - predictions) ** 2).sum()
l2_penalty = (var1_vector ** 2).sum() + (var2_matrix ** 2).sum().sum()
cost = prediction_error + reg_constant * l2_penalty
var1_grad, var2_grad = T.grad(cost, [var1_vector, var2_matrix])
var1_grad /= var2_matrix.shape[0]
f = theano.function(inputs=[ratings, var1_vector, var2_matrix], outputs=[cost, var1_grad, var2_grad])
return f
开发者ID:jalexvig,项目名称:collaborative_filtering,代码行数:26,代码来源:latent_factors.py
示例17: main
def main(save_to, num_batches, continue_=False):
mlp = MLP([Tanh(), Identity()], [1, 10, 1],
weights_init=IsotropicGaussian(0.01),
biases_init=Constant(0), seed=1)
mlp.initialize()
x = tensor.vector('numbers')
y = tensor.vector('roots')
cost = SquaredError().apply(y[:, None], mlp.apply(x[:, None]))
cost.name = "cost"
main_loop = MainLoop(
GradientDescent(
cost=cost, params=ComputationGraph(cost).parameters,
step_rule=Scale(learning_rate=0.001)),
get_data_stream(range(100)),
model=Model(cost),
extensions=([LoadFromDump(save_to)] if continue_ else []) +
[Timing(),
FinishAfter(after_n_batches=num_batches),
DataStreamMonitoring(
[cost], get_data_stream(range(100, 200)),
prefix="test"),
TrainingDataMonitoring([cost], after_epoch=True),
Dump(save_to),
Printing()])
main_loop.run()
return main_loop
开发者ID:basaundi,项目名称:blocks,代码行数:27,代码来源:sqrt.py
示例18: test_uniform_vector
def test_uniform_vector(self):
rng_R = random_state_type()
low = tensor.vector()
high = tensor.vector()
post_r, out = uniform(rng_R, low=low, high=high)
assert out.ndim == 1
f = compile.function([rng_R, low, high], [post_r, out], accept_inplace=True)
def as_floatX(thing):
return numpy.asarray(thing, dtype=theano.config.floatX)
low_val = as_floatX([0.1, 0.2, 0.3])
high_val = as_floatX([1.1, 2.2, 3.3])
rng = numpy.random.RandomState(utt.fetch_seed())
numpy_rng = numpy.random.RandomState(utt.fetch_seed())
# Arguments of size (3,)
rng0, val0 = f(rng, low_val, high_val)
numpy_val0 = as_floatX(numpy_rng.uniform(low=low_val, high=high_val))
assert numpy.all(val0 == numpy_val0)
# arguments of size (2,)
rng1, val1 = f(rng0, low_val[:-1], high_val[:-1])
numpy_val1 = as_floatX(numpy_rng.uniform(low=low_val[:-1], high=high_val[:-1]))
assert numpy.all(val1 == numpy_val1)
# Specifying the size explicitly
g = compile.function([rng_R, low, high], uniform(rng_R, low=low, high=high, size=(3,)), accept_inplace=True)
rng2, val2 = g(rng1, low_val, high_val)
numpy_val2 = as_floatX(numpy_rng.uniform(low=low_val, high=high_val, size=(3,)))
assert numpy.all(val2 == numpy_val2)
self.assertRaises(ValueError, g, rng2, low_val[:-1], high_val[:-1])
开发者ID:gokul-uf,项目名称:Theano,代码行数:32,代码来源:test_raw_random.py
示例19: _compile_bp
def _compile_bp(self):
'''
compile backpropagation foreach of the dqns.
'''
self.bprop_by_goal = {}
for (goal, dqn) in self.dqn_by_goal.items():
states = dqn.states
action_values = dqn.action_values
params = dqn.params
targets = T.vector('target')
shared_values = T.vector('shared_values')
last_actions = T.lvector('action')
# loss function.
mse = layers.MSE(action_values[T.arange(action_values.shape[0]),
last_actions], targets) \
+ T.mean(abs(action_values[T.arange(action_values.shape[0]),
last_actions] - shared_values))
# l2 penalty.
l2_penalty = 0.
for param in params:
l2_penalty += (param ** 2).sum()
cost = mse + self.l2_reg * l2_penalty
# back propagation.
updates = optimizers.Adam(cost, params, alpha=self.lr)
td_errors = T.sqrt(mse)
self.bprop_by_goal[goal] = theano.function(inputs=[states, last_actions, targets, shared_values],
outputs=td_errors, updates=updates)
开发者ID:amoliu,项目名称:curriculum-deep-RL,代码行数:31,代码来源:uvfa.py
示例20: test_normal_vector
def test_normal_vector(self):
rng_R = random_state_type()
avg = tensor.vector()
std = tensor.vector()
post_r, out = normal(rng_R, avg=avg, std=std)
assert out.ndim == 1
f = compile.function([rng_R, avg, std], [post_r, out], accept_inplace=True)
def as_floatX(thing):
return numpy.asarray(thing, dtype=theano.config.floatX)
avg_val = [1, 2, 3]
std_val = as_floatX([0.1, 0.2, 0.3])
rng = numpy.random.RandomState(utt.fetch_seed())
numpy_rng = numpy.random.RandomState(utt.fetch_seed())
# Arguments of size (3,)
rng0, val0 = f(rng, avg_val, std_val)
numpy_val0 = as_floatX(numpy_rng.normal(loc=as_floatX(avg_val), scale=as_floatX(std_val)))
assert numpy.all(val0 == numpy_val0)
# arguments of size (2,)
rng1, val1 = f(rng0, avg_val[:-1], std_val[:-1])
numpy_val1 = numpy.asarray(numpy_rng.normal(loc=avg_val[:-1], scale=std_val[:-1]), dtype=theano.config.floatX)
assert numpy.all(val1 == numpy_val1)
# Specifying the size explicitly
g = compile.function([rng_R, avg, std], normal(rng_R, avg=avg, std=std, size=(3,)), accept_inplace=True)
rng2, val2 = g(rng1, avg_val, std_val)
numpy_val2 = numpy.asarray(numpy_rng.normal(loc=avg_val, scale=std_val, size=(3,)), dtype=theano.config.floatX)
assert numpy.all(val2 == numpy_val2)
self.assertRaises(ValueError, g, rng2, avg_val[:-1], std_val[:-1])
开发者ID:gokul-uf,项目名称:Theano,代码行数:32,代码来源:test_raw_random.py
注:本文中的theano.tensor.vector函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论