本文整理汇总了Python中theano.tensor.dvector函数的典型用法代码示例。如果您正苦于以下问题:Python dvector函数的具体用法?Python dvector怎么用?Python dvector使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dvector函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_profiling
def test_profiling():
old1 = theano.config.profile
old2 = theano.config.profile_memory
try:
theano.config.profile = True
theano.config.profile_memory = True
x = T.dvector("x")
y = T.dvector("y")
z = x + y
p = theano.ProfileStats(False)
if theano.config.mode in ["DebugMode", "DEBUG_MODE"]:
m = "FAST_RUN"
else:
m = None
f = theano.function([x, y], z, profile=p, name="test_profiling",
mode=m)
output = f([1, 2, 3, 4], [1, 1, 1, 1])
buf = StringIO.StringIO()
f.profile.summary(buf)
finally:
theano.config.profile = old1
theano.config.profile_memory = old2
开发者ID:Jerryzcn,项目名称:Theano,代码行数:25,代码来源:test_profiling.py
示例2: theano_setup
def theano_setup(self):
W = T.dmatrix('W')
b = T.dvector('b')
c = T.dvector('c')
x = T.dmatrix('x')
s = T.dot(x, W) + c
# h = 1 / (1 + T.exp(-s))
# h = T.nnet.sigmoid(s)
h = T.tanh(s)
# r = T.dot(h,W.T) + b
# r = theano.printing.Print("r=")(2*T.tanh(T.dot(h,W.T) + b))
ract = T.dot(h,W.T) + b
r = self.output_scaling_factor * T.tanh(ract)
#g = function([W,b,c,x], h)
#f = function([W,b,c,h], r)
#fg = function([W,b,c,x], r)
# Another variable to be able to call a function
# with a noisy x and compare it to a reference x.
y = T.dmatrix('y')
all_losses = ((r - y)**2)
loss = T.sum(all_losses)
#loss = ((r - y)**2).sum()
self.theano_encode_decode = function([W,b,c,x], r)
self.theano_all_losses = function([W,b,c,x,y], [all_losses, T.abs_(s), T.abs_(ract)])
self.theano_gradients = function([W,b,c,x,y], [T.grad(loss, W), T.grad(loss, b), T.grad(loss, c)])
开发者ID:gyom,项目名称:cae.py,代码行数:31,代码来源:dae_theano.py
示例3: dtw
def dtw(array1, array2):
"""
Accepts: two one dimensional arrays
Returns: (float) DTW distance between them.
"""
s = np.zeros((array1.size+1, array2.size+1))
s[:,0] = 1e6
s[0,:] = 1e6
s[0,0] = 0.0
# Set up symbolic variables
square = T.dmatrix('square')
vec1 = T.dvector('vec1')
vec2 = T.dvector('vec2')
vec1_length = T.dscalar('vec1_length')
vec2_length = T.dscalar('vec2_length')
outer_loop = T.arange(vec1_length, dtype='int64')
inner_loop = T.arange(vec2_length, dtype='int64')
# Run the outer loop
path, _ = scan(fn=outer,
outputs_info=[dict(initial=square, taps=[-1])],
non_sequences=[inner_loop, vec1, vec2],
sequences=outer_loop)
# Compile the function
theano_square = function([vec1, vec2, square, vec1_length, vec2_length], path, on_unused_input='warn')
# Call the compiled function and return the actual distance
return theano_square(array1, array2, s, array1.size, array2.size)[-1][array1.size, array2.size]
开发者ID:astanway,项目名称:theano-dtw,代码行数:31,代码来源:dtw.py
示例4: make_minimizer
def make_minimizer(Model):
L, y = T.ivector('L'), T.dvector('y')
mu, eps = T.dscalar('mu'), T.dscalar('eps')
R, eta = T.dtensor3('R'), T.dvector('eta')
model = Model(L, y, mu, R, eta, eps)
return theano.function([L, y, mu, R, eta, eps], model.minimize())
开发者ID:pminervini,项目名称:knowledge-propagation,代码行数:7,代码来源:momentum.py
示例5: __init__
def __init__(self,N,Nsub,NRGC,prior=1):
self.N = N
self.Nsub = Nsub
self.NRGC = NRGC
U = Th.dmatrix() # SYMBOLIC variables #
V1 = Th.dvector() #
V2 = Th.dvector() #
STA = Th.dvector() #
STC = Th.dmatrix() #
theta = Th.dot( U.T , V1 ) #
UV1U = Th.dot( U , theta ) #
UV1V2U= Th.dot( V1 * U.T , (V2 * U.T).T ) #
posterior = -0.5 * Th.sum( V1 * V2 * U.T*U.T ) \
-0.25* Th.sum( UV1V2U.T * UV1V2U ) \
-0.5 * Th.sum( UV1U * UV1U * UV1U *V2 *V2 * V1 ) \
-0.5 * Th.sum( UV1U * UV1U * V2 * V1 ) \
-0.5 * Th.sum( theta * theta ) \
+ Th.dot( theta.T , STA ) \
+ Th.sum( Th.dot( V1* V2*U.T , U ) \
* (STC + STA.T*STA) )
dpost_dU = Th.grad( cost = posterior , #
wrt = U ) #
dpost_dV1 = Th.grad( cost = posterior , #
wrt = V1 ) #
dpost_dV2 = Th.grad( cost = posterior , #
wrt = V2 ) #
# self.posterior = function( [U,V2,V1,STA,STC], UV1V2U) #
self.posterior = function( [U,V2,V1,STA,STC], posterior) #
self.dpost_dU = function( [U,V2,V1,STA,STC], dpost_dU ) #
self.dpost_dV1 = function( [U,V2,V1,STA,STC], dpost_dV1 ) #
self.dpost_dV2 = function( [U,V2,V1,STA,STC], dpost_dV2 ) #
开发者ID:kolia,项目名称:subunits,代码行数:31,代码来源:LQuadLExP_taylor.py
示例6: __init__
def __init__(self, first_W):
self.log_regression = LogisticRegression(first_W)
st = T.dvector('st')
ac = T.dvector('ac')
z = ac*ac
self.q_ = th.function(inputs=[st, ac],
outputs=[self.log_regression.cost(T.concatenate([ac, z, st, ac[:-1] * st[:-1]]))])
开发者ID:Seplanna,项目名称:interactive-recomendation,代码行数:7,代码来源:logistic_regression.py
示例7: LQLEP_wBarrier
def LQLEP_wBarrier( LQLEP = Th.dscalar(), ldet = Th.dscalar(), v1 = Th.dvector(),
N_spike = Th.dscalar(), ImM = Th.dmatrix(), U = Th.dmatrix(),
V2 = Th.dvector(), u = Th.dvector(), C = Th.dmatrix(),
**other):
'''
The actual Linear-Quadratic-Exponential-Poisson log-likelihood,
as a function of theta and M,
with a barrier on the log-det term and a prior.
'''
sq_nonlinearity = V2**2.*Th.sum( Th.dot(U,C)*U, axis=[1]) #Th.sum(U**2,axis=[1])
nonlinearity = V2 * Th.sqrt( Th.sum( Th.dot(U,C)*U, axis=[1])) #Th.sum(U**2,axis=[1]) )
if other.has_key('uc'):
LQLEP_wPrior = LQLEP + 0.5 * N_spike * ( 1./(ldet+250.)**2. \
- 0.000001 * Th.sum(Th.log(1.-4*sq_nonlinearity))) \
+ 10. * Th.sum( (u[2:]+u[:-2]-2*u[1:-1])**2. ) \
+ 10. * Th.sum( (other['uc'][2:]+other['uc'][:-2]-2*other['uc'][1:-1])**2. ) \
+ 0.000000001 * Th.sum( v1**2. )
# + 100. * Th.sum( v1 )
# + 0.0001*Th.sum( V2**2 )
else:
LQLEP_wPrior = LQLEP + 0.5 * N_spike * ( 1./(ldet+250.)**2. \
- 0.000001 * Th.sum(Th.log(1.-4*sq_nonlinearity))) \
+ 10. * Th.sum( (u[2:]+u[:-2]-2*u[1:-1])**2. ) \
+ 0.000000001 * Th.sum( v1**2. )
# + 100. * Th.sum( v1 )
# + 0.0001*Th.sum( V2**2 )
eigsImM,barrier = eig( ImM )
barrier = 1-(Th.sum(Th.log(eigsImM))>-250) * \
(Th.min(eigsImM)>0) * (Th.max(4*sq_nonlinearity)<1)
other.update(locals())
return named( **other )
开发者ID:kolia,项目名称:subunits,代码行数:31,代码来源:QuadPoiss.py
示例8: test_uniform_vector
def test_uniform_vector(self):
random = RandomStreams(utt.fetch_seed())
low = tensor.dvector()
high = tensor.dvector()
out = random.uniform(low=low, high=high)
assert out.ndim == 1
f = function([low, high], out)
low_val = [.1, .2, .3]
high_val = [1.1, 2.2, 3.3]
seed_gen = numpy.random.RandomState(utt.fetch_seed())
numpy_rng = numpy.random.RandomState(int(seed_gen.randint(2**30)))
# Arguments of size (3,)
val0 = f(low_val, high_val)
numpy_val0 = numpy_rng.uniform(low=low_val, high=high_val)
print('THEANO', val0)
print('NUMPY', numpy_val0)
assert numpy.all(val0 == numpy_val0)
# arguments of size (2,)
val1 = f(low_val[:-1], high_val[:-1])
numpy_val1 = numpy_rng.uniform(low=low_val[:-1], high=high_val[:-1])
print('THEANO', val1)
print('NUMPY', numpy_val1)
assert numpy.all(val1 == numpy_val1)
# Specifying the size explicitly
g = function([low, high], random.uniform(low=low, high=high, size=(3,)))
val2 = g(low_val, high_val)
numpy_rng = numpy.random.RandomState(int(seed_gen.randint(2**30)))
numpy_val2 = numpy_rng.uniform(low=low_val, high=high_val, size=(3,))
assert numpy.all(val2 == numpy_val2)
self.assertRaises(ValueError, g, low_val[:-1], high_val[:-1])
开发者ID:ChinaQuants,项目名称:Theano,代码行数:34,代码来源:test_shared_randomstreams.py
示例9: init_propagate_function
def init_propagate_function(self):
x = T.dvector()
y = T.dmatrix()
b = T.dvector()
z = T.dot(x, y) + b
f = theano.function([x,y,b], z)
return f
开发者ID:johannbm,项目名称:MTDT-Projects,代码行数:7,代码来源:hidden_layer.py
示例10: neural_net
def neural_net(
x=T.dmatrix(), #our points, one point per row
y=T.dmatrix(), #our targets
w=T.dmatrix(), #first layer weights
b=T.dvector(), #first layer bias
v=T.dmatrix(), #second layer weights
c=T.dvector(), #second layer bias
step=T.dscalar(), #step size for gradient descent
l2_coef=T.dscalar() #l2 regularization amount
):
"""Idea A:
"""
hid = T.tanh(T.dot(x, w) + b)
pred = T.dot(hid, v) + c
sse = T.sum((pred - y) * (pred - y))
w_l2 = T.sum(T.sum(w*w))
v_l2 = T.sum(T.sum(v*v))
loss = sse + l2_coef * (w_l2 + v_l2)
def symbolic_params(cls):
return [cls.w, cls.b, cls.v, cls.c]
def update(cls, x, y, **kwargs):
params = cls.symbolic_params()
gp = T.grad(cls.loss, params)
return [], [In(p, update=p - cls.step * g) for p,g in zip(params, gp)]
def predict(cls, x, **kwargs):
return cls.pred, []
return locals()
开发者ID:olivierverdier,项目名称:Theano,代码行数:31,代码来源:symbolic_module.py
示例11: test_0
def test_0():
N = 16*1000*10*1
if 1:
aval = abs(numpy.random.randn(N).astype('float32'))+.1
bval = numpy.random.randn(N).astype('float32')
a = T.fvector()
b = T.fvector()
else:
aval = abs(numpy.random.randn(N))+.1
bval = numpy.random.randn(N)
a = T.dvector()
b = T.dvector()
f = theano.function([a,b], T.pow(a,b), mode='LAZY')
theano_opencl.elemwise.swap_impls=False
g = theano.function([a,b], T.pow(a,b), mode='LAZY')
print 'ocl time', timeit.Timer(lambda: f(aval, bval)).repeat(3,3)
print 'gcc time', timeit.Timer(lambda: g(aval, bval)).repeat(3,3)
print 'numpy time', timeit.Timer(lambda: aval**bval).repeat(3,3)
assert ((f(aval, bval) - aval**bval)**2).sum() < 1.1
assert ((g(aval, bval) - aval**bval)**2).sum() < 1.1
开发者ID:jaberg,项目名称:TheanoWS,代码行数:27,代码来源:test_elemwise.py
示例12: test_loss_updates_one_layer_positive_relu
def test_loss_updates_one_layer_positive_relu(self):
n_vis = 4
n_hid = 2
hidden_layer = HiddenLayer(n_vis=n_vis, n_hid=n_hid, layer_name='h', activation='relu', param_init_range=0, alpha=0)
# W = theano.shared(value=np.ones((n_vis, n_hid)), name='h_W', borrow=True)
# hidden_layer.W = W
mlp = QNetwork([hidden_layer], discount=1, learning_rate=1)
features = T.dvector('features')
action = T.lscalar('action')
reward = T.dscalar('reward')
next_features = T.dvector('next_features')
loss, updates = mlp.get_loss_and_updates(features, action, reward, next_features)
train = theano.function(
[features, action, reward, next_features],
outputs=loss,
updates=updates,
mode='FAST_COMPILE')
features = [1,1,1,1]
action = 0
reward = 1
next_features = [1,1,1,1]
actual_loss = train(features, action, reward, next_features)
expected_loss = 0.5
actual_weights = list(mlp.layers[0].W.eval())
expected_weights = [[1,0], [1,0], [1,0], [1,0]]
self.assertEqual(actual_loss, expected_loss)
self.assertTrue(np.array_equal(actual_weights, expected_weights))
开发者ID:switchfootsid,项目名称:playing_atari,代码行数:32,代码来源:test_nnet.py
示例13: UV
def UV( U = Th.dmatrix('U') , V1 = Th.dvector('V1') , V2 = Th.dvector('V2') , **result):
'''
Reparameterize theta and M as a function of U, V1 and V2.
'''
result['theta'] = Th.dot( U.T , V1 )
result['M' ] = Th.dot( V1 * U.T , (V2 * U.T).T )
return result
开发者ID:kolia,项目名称:subunits,代码行数:7,代码来源:QuadPoiss_old.py
示例14: test_normal_vector
def test_normal_vector(self):
random = RandomStreams(utt.fetch_seed())
avg = tensor.dvector()
std = tensor.dvector()
out = random.normal(avg=avg, std=std)
assert out.ndim == 1
f = function([avg, std], out)
avg_val = [1, 2, 3]
std_val = [.1, .2, .3]
seed_gen = numpy.random.RandomState(utt.fetch_seed())
numpy_rng = numpy.random.RandomState(int(seed_gen.randint(2**30)))
# Arguments of size (3,)
val0 = f(avg_val, std_val)
numpy_val0 = numpy_rng.normal(loc=avg_val, scale=std_val)
assert numpy.allclose(val0, numpy_val0)
# arguments of size (2,)
val1 = f(avg_val[:-1], std_val[:-1])
numpy_val1 = numpy_rng.normal(loc=avg_val[:-1], scale=std_val[:-1])
assert numpy.allclose(val1, numpy_val1)
# Specifying the size explicitly
g = function([avg, std], random.normal(avg=avg, std=std, size=(3,)))
val2 = g(avg_val, std_val)
numpy_rng = numpy.random.RandomState(int(seed_gen.randint(2**30)))
numpy_val2 = numpy_rng.normal(loc=avg_val, scale=std_val, size=(3,))
assert numpy.allclose(val2, numpy_val2)
self.assertRaises(ValueError, g, avg_val[:-1], std_val[:-1])
开发者ID:ChinaQuants,项目名称:Theano,代码行数:30,代码来源:test_shared_randomstreams.py
示例15: __init__
def __init__(self, sizes, input_dim, output_dim):
self.layers = len(sizes) + 1
in_dim = [input_dim] + sizes
out_dim = sizes + [output_dim]
x = T.dvector('x')
y = T.dvector('y')
self.hyp_params = []
for i, (r,c) in enumerate(zip(in_dim,out_dim)):
if i == 0:
obj = HiddenLayer(x, r, c)
else:
obj = HiddenLayer(obj.output,r,c)
self.hyp_params.append(obj.params)
yhat = obj.output
prediction = T.argmax(yhat)
self.predict = theano.function([x],[yhat])
o_error = T.sum(T.sqr(yhat - y))
# o_error = T.sum(T.nnet.categorical_crossentropy(yhat, y))
updates = []
learning_rate = T.scalar('learning_rate')
for param in self.hyp_params:
updates.append((param['W'], param['W'] - learning_rate * T.grad(o_error,param['W'])))
updates.append((param['b'], param['b'] - learning_rate * T.grad(o_error,param['b'])))
self.train_step = theano.function([x,y,learning_rate],[o_error],
updates = updates)
开发者ID:ranarag,项目名称:theano_works,代码行数:31,代码来源:mlp_theano.py
示例16: test_optimize_xent_vector2
def test_optimize_xent_vector2(self):
verbose = 0
mode = theano.compile.mode.get_default_mode()
if mode == theano.compile.mode.get_mode('FAST_COMPILE'):
mode = 'FAST_RUN'
rng = numpy.random.RandomState(utt.fetch_seed())
x_val = rng.randn(5)
b_val = rng.randn(5)
y_val = numpy.asarray([2])
x = T.dvector('x')
b = T.dvector('b')
y = T.lvector('y')
def print_graph(func):
for i, node in enumerate(func.maker.fgraph.toposort()):
print i, node
# Last node should be the output
print i, printing.pprint(node.outputs[0])
print
## Test that a biased softmax is optimized correctly
bias_expressions = [
T.sum(-T.log(softmax(x + b)[T.arange(y.shape[0]), y])),
-T.sum(T.log(softmax(b + x)[T.arange(y.shape[0]), y])),
-T.sum(T.log(softmax(x + b))[T.arange(y.shape[0]), y]),
T.sum(-T.log(softmax(b + x))[T.arange(y.shape[0]), y])]
for expr in bias_expressions:
f = theano.function([x, b, y], expr, mode=mode)
if verbose:
print_graph(f)
try:
prev, last = f.maker.fgraph.toposort()[-2:]
assert len(f.maker.fgraph.toposort()) == 3
# [big_op, sum, dim_shuffle]
f(x_val, b_val, y_val)
except Exception:
theano.printing.debugprint(f)
raise
backup = config.warn.sum_div_dimshuffle_bug
config.warn.sum_div_dimshuffle_bug = False
try:
g = theano.function([x, b, y], T.grad(expr, x), mode=mode)
finally:
config.warn.sum_div_dimshuffle_bug = backup
if verbose:
print_graph(g)
try:
ops = [node.op for node in g.maker.fgraph.toposort()]
assert len(ops) <= 6
assert crossentropy_softmax_1hot_with_bias_dx in ops
assert softmax_with_bias in ops
assert softmax_grad not in ops
g(x_val, b_val, y_val)
except Exception:
theano.printing.debugprint(g)
raise
开发者ID:srifai,项目名称:Theano,代码行数:60,代码来源:test_nnet.py
示例17: Pretrain
def Pretrain(sda, data, loops, rate):
L = 0
R = 0
input = T.dvector()
through = theano.function( inputs = [input], outputs = input)
for lvl in xrange(sda.n_layers-1):
train = sda.getTrainingFunc(lvl,lvl+1)
for loop in xrange(loops*len(data[0])):
p0 = random.randint(0, len(data[0])-1)
p1 = random.randint(0, len(data[1])-1)
patch0 = numpy.log(abs(0.7*data[0][p0] + 0.3*data[1][p1])**2+1)/20.0*0.8+0.1
patch1 = numpy.log(abs(data[0][p0])**2+1)/20.0*0.8+0.1
patch1 /= numpy.dot(patch1, patch1)
# plt.subplot(211)
# plt.imshow(patch0.reshape((5,128)))
# plt.subplot(212)
# plt.imshow(patch1.reshape((5,128)))
# plt.show()
l,r = train(through(patch1), through(patch1), rate, 0.05)
L = L + l
R = R + r
if loop%500 == 499:
print lvl, loop, ':', 10*numpy.log10(0.75**2/(L/500.0/len(data[0][0]))), R/500.0
L = 0
R = 0
input = T.dvector()
through = theano.function( inputs = [input], outputs = sda.goThrough(input, 0, lvl+1) )
开发者ID:amoliu,项目名称:autosub,代码行数:28,代码来源:parse.py
示例18: test_multilayer_sparse
def test_multilayer_sparse(self):
# fixed parameters
bsize = 10 # batch size
imshp = (5,5)
kshp = ((3,3),(2,2))
nkerns = (10,20) # per output pixel
ssizes = ((1,1),(2,2))
convmodes = ('full','valid',)
# symbolic stuff
kerns = [tensor.dvector(),tensor.dvector()]
input = tensor.dmatrix()
rng = numpy.random.RandomState(3423489)
# build actual input images
img2d = numpy.arange(bsize*numpy.prod(imshp)).reshape((bsize,)+imshp)
img1d = img2d.reshape(bsize,-1)
for mode in ('FAST_COMPILE','FAST_RUN'):
for conv_mode in convmodes:
for ss in ssizes:
l1hid, l1outshp = sp.applySparseFilter(kerns[0], kshp[0],\
nkerns[0], input, imshp, ss, mode=conv_mode)
l2hid, l2outshp = sp.applySparseFilter(kerns[1], kshp[1],\
nkerns[1], l1hid, l1outshp, ss, mode=conv_mode)
l1propup = function([kerns[0], input], l1hid, mode=mode)
l2propup = function([kerns[1], l1hid], l2hid, mode=mode)
# actual values
l1kernvals = numpy.arange(numpy.prod(l1outshp)*numpy.prod(kshp[0]))
l2kernvals = numpy.arange(numpy.prod(l2outshp)*numpy.prod(kshp[1])*nkerns[0])
l1hidval = l1propup(l1kernvals,img1d)
l2hidval = l2propup(l2kernvals,l1hidval)
开发者ID:Dimitris0mg,项目名称:Theano,代码行数:35,代码来源:test_sp.py
示例19: test_loss_updates_one_layer_positive_features_with_negative_weights_relu
def test_loss_updates_one_layer_positive_features_with_negative_weights_relu(self):
n_vis = 4
n_hid = 2
hidden_layer = HiddenLayer(n_vis=n_vis, n_hid=n_hid, layer_name='h', activation='relu', param_init_range=0, alpha=0)
hidden_layer.W.set_value(np.ones((n_vis, n_hid)) * -1)
mlp = QNetwork([hidden_layer], discount=1, learning_rate=1)
features = T.dvector('features')
action = T.lscalar('action')
reward = T.dscalar('reward')
next_features = T.dvector('next_features')
loss, updates = mlp.get_loss_and_updates(features, action, reward, next_features)
train = theano.function(
[features, action, reward, next_features],
outputs=loss,
updates=updates,
mode='FAST_COMPILE')
features = [1,1,1,1]
action = 0
reward = 1
next_features = [1,1,1,1]
actual_loss = train(features, action, reward, next_features)
expected_loss = 0.5
actual_weights = mlp.layers[0].W.eval().tolist()
expected_weights = [[-1,-1], [-1,-1], [-1,-1], [-1,-1]]
self.assertEqual(actual_loss, expected_loss)
self.assertSequenceEqual(actual_weights, expected_weights)
开发者ID:switchfootsid,项目名称:playing_atari,代码行数:31,代码来源:test_nnet.py
示例20: theano_setup
def theano_setup(self):
# The matrices Wb and Wc were originally tied.
# Because of that, I decided to keep Wb and Wc with
# the same shape (instead of being transposed) to
# avoid disturbing the code as much as possible.
Wb = T.dmatrix('Wb')
Wc = T.dmatrix('Wc')
b = T.dvector('b')
c = T.dvector('c')
s = T.dscalar('s')
x = T.dmatrix('x')
h_act = T.dot(x, Wc) + c
if self.act_func[0] == 'tanh':
h = T.tanh(h_act)
elif self.act_func[0] == 'sigmoid':
h = T.nnet.sigmoid(h_act)
elif self.act_func[0] == 'id':
# bad idae
h = h_act
else:
raise("Invalid act_func[0]")
r_act = T.dot(h, Wb.T) + b
if self.act_func[1] == 'tanh':
r = s * T.tanh(r_act)
elif self.act_func[1] == 'sigmoid':
r = s * T.nnet.sigmoid(r_act)
elif self.act_func[1] == 'id':
r = s * r_act
else:
raise("Invalid act_func[1]")
# Another variable to be able to call a function
# with a noisy x and compare it to a reference x.
y = T.dmatrix('y')
loss = ((r - y)**2)
sum_loss = T.sum(loss)
# theano_encode_decode : vectorial function in argument X.
# theano_loss : vectorial function in argument X.
# theano_gradients : returns triplet of gradients, each of
# which involves the all data X summed
# so it's not a "vectorial" function.
self.theano_encode_decode = function([Wb,Wc,b,c,s,x], r)
self.theano_loss = function([Wb,Wc,b,c,s,x,y], loss)
self.theano_gradients = function([Wb,Wc,b,c,s,x,y],
[T.grad(sum_loss, Wb), T.grad(sum_loss, Wc),
T.grad(sum_loss, b), T.grad(sum_loss, c),
T.grad(sum_loss, s)])
# other useful theano functions for the experiments that involve
# adding noise to the hidden states
self.theano_encode = function([Wc,c,x], h)
self.theano_decode = function([Wb,b,s,h], r)
开发者ID:gyom,项目名称:denoising_autoencoder,代码行数:60,代码来源:dae_untied_weights.py
注:本文中的theano.tensor.dvector函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论