本文整理汇总了Python中theano.tensor.nnet.sigmoid函数的典型用法代码示例。如果您正苦于以下问题:Python sigmoid函数的具体用法?Python sigmoid怎么用?Python sigmoid使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sigmoid函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: build_custom_ann
def build_custom_ann(self, layer_list, ann_type = "rlu", nb = 784):
'''
'''
layer_list = [nb] + layer_list
input = T.dvector('input')
target = T.wvector('target')
w_list = []
x_list = []
w_list.append(theano.shared(np.random.uniform(low=-.1, high=.1, size=(layer_list[0],layer_list[1]))))
if ann_type == "rlu":
x_list.append(T.switch(T.dot(input,w_list[0]) > 0, T.dot(input,w_list[0]), 0))
elif ann_type == "sigmoid":
x_list.append(Tann.sigmoid(T.dot(input, w_list[0])))
elif ann_type == "ht":
x_list.append(T.tanh(T.dot(input, w_list[0])))
for count in range(0, len(layer_list) - 2):
w_list.append(theano.shared(np.random.uniform(low=-.1, high=.1, size=(layer_list[count + 1],layer_list[count + 2]))))
if ann_type=="rlu":
x_list.append(T.switch(T.dot(x_list[count],w_list[count + 1]) > 0, T.dot(x_list[count], w_list[count + 1]), 0))
elif ann_type == "sigmoid":
x_list.append(Tann.sigmoid(T.dot(x_list[count],w_list[count + 1])))
elif ann_type == "ht":
x_list.append(T.tanh(T.dot(x_list[count],w_list[count + 1])))
w_list.append(theano.shared(np.random.uniform(low=-.1, high=.1, size=(layer_list[-1], 10))))
x_list.append(T.switch(T.dot(x_list[-1],w_list[-1]) > 0, T.dot(x_list[-1],w_list[-1]), 0))
error = T.sum(pow((target - x_list[-1]), 2))
params = w_list
gradients = T.grad(error, params)
backprops = [(p, p - self.lrate*g) for p,g in zip(params,gradients)]
self.trainer = theano.function(inputs=[input, target], outputs=error, updates=backprops, allow_input_downcast=True)
self.predictor = theano.function(inputs=[input], outputs=x_list[-1], allow_input_downcast=True)
开发者ID:Bergalerga,项目名称:AIProg,代码行数:35,代码来源:ann.py
示例2: get_training_model
def get_training_model(Ws_s, bs_s, dropout=False, lambd=10.0, kappa=1.0):
# Build a dual network, one for the real move, one for a fake random move
# Train on a negative log likelihood of classifying the right move
xc_s, xc_p = get_model(Ws_s, bs_s, dropout=dropout)
xr_s, xr_p = get_model(Ws_s, bs_s, dropout=dropout)
xp_s, xp_p = get_model(Ws_s, bs_s, dropout=dropout)
#loss = -T.log(sigmoid(xc_p + xp_p)).mean() # negative log likelihood
#loss += -T.log(sigmoid(-xp_p - xr_p)).mean() # negative log likelihood
cr_diff = xc_p - xr_p
loss_a = -T.log(sigmoid(cr_diff)).mean()
cp_diff = kappa * (xc_p + xp_p)
loss_b = -T.log(sigmoid( cp_diff)).mean()
loss_c = -T.log(sigmoid(-cp_diff)).mean()
# Add regularization terms
reg = 0
for x in Ws_s + bs_s:
reg += lambd * (x ** 2).mean()
loss = loss_a + loss_b + loss_c
return xc_s, xr_s, xp_s, loss, reg, loss_a, loss_b, loss_c
开发者ID:DestinyF,项目名称:deeppink,代码行数:25,代码来源:train.py
示例3: test_local_sigm_times_exp
def test_local_sigm_times_exp(self):
"""
Test the `local_sigm_times_exp` optimization.
exp(x) * sigm(-x) -> sigm(x)
exp(-x) * sigm(x) -> sigm(-x)
"""
def match(func, ops):
# print [node.op.scalar_op for node in func.maker.fgraph.toposort()]
assert [node.op for node in func.maker.fgraph.toposort()] == ops
m = self.get_mode(excluding=['local_elemwise_fusion', 'inplace'])
x, y = tensor.vectors('x', 'y')
f = theano.function([x], sigmoid(-x) * tensor.exp(x), mode=m)
match(f, [sigmoid])
f = theano.function([x], sigmoid(x) * tensor.exp(-x), mode=m)
match(f, [tensor.neg, sigmoid])
f = theano.function([x], -(-(-(sigmoid(x)))) * tensor.exp(-x), mode=m)
match(f, [tensor.neg, sigmoid, tensor.neg])
f = theano.function(
[x, y],
(sigmoid(x) * sigmoid(-y) * -tensor.exp(-x) *
tensor.exp(x * y) * tensor.exp(y)),
mode=m)
match(f, [sigmoid, tensor.mul, tensor.neg, tensor.exp, sigmoid,
tensor.mul])
开发者ID:LEEKYOUNGHUN,项目名称:Theano,代码行数:28,代码来源:test_sigm.py
示例4: test_local_sigm_times_exp
def test_local_sigm_times_exp(self):
"""
Test the `local_sigm_times_exp` optimization.
exp(x) * sigm(-x) -> sigm(x)
exp(-x) * sigm(x) -> sigm(-x)
"""
def match(func, ops):
# print [node.op.scalar_op for node in func.maker.fgraph.toposort()]
assert [node.op for node in func.maker.fgraph.toposort()] == ops
m = self.get_mode(excluding=['local_elemwise_fusion', 'inplace'])
x, y = tensor.vectors('x', 'y')
f = theano.function([x], sigmoid(-x) * tensor.exp(x), mode=m)
match(f, [sigmoid])
assert check_stack_trace(f, ops_to_check=sigmoid)
f = theano.function([x], sigmoid(x) * tensor.exp(-x), mode=m)
match(f, [tensor.neg, sigmoid])
assert check_stack_trace(f, ops_to_check=sigmoid)
f = theano.function([x], -(-(-(sigmoid(x)))) * tensor.exp(-x), mode=m)
match(f, [tensor.neg, sigmoid, tensor.neg])
# assert check_stack_trace(f, ops_to_check=sigmoid)
f = theano.function(
[x, y],
(sigmoid(x) * sigmoid(-y) * -tensor.exp(-x) *
tensor.exp(x * y) * tensor.exp(y)), mode=m)
topo = f.maker.fgraph.toposort()
for op, nb in [(sigmoid, 2), (tensor.mul, 2),
(tensor.neg, 1), (tensor.exp, 1)]:
assert sum([n.op == op for n in topo]) == nb
开发者ID:HapeMask,项目名称:Theano,代码行数:32,代码来源:test_sigm.py
示例5: lstm_output
def lstm_output(self, y_prev, ch_prev):
"""calculates info to pass to next time step.
ch_prev is a vector of size 2*hdim"""
c_prev = ch_prev[:self.hdim]#T.vector('c_prev')
h_prev = ch_prev[self.hdim:]#T.vector('h_prev')
# gates (input, forget, output)
i_t = sigmoid(T.dot(self.Ui, h_prev))
f_t = sigmoid(T.dot(self.Uf, h_prev))
o_t = sigmoid(T.dot(self.Uo, h_prev))
# new memory cell
c_new_t = T.tanh(T.dot(self.Uc, h_prev))
# final memory cell
c_t = f_t * c_prev + i_t * c_new_t
# final hidden state
h_t = o_t * T.tanh(c_t)
# Input vector for softmax
theta_t = T.dot(self.U, h_t) + self.b
# Softmax prob vector
y_hat_t = softmax(theta_t.T).T
# Softmax wraps output in another list, why??
# (specifically it outputs a 2-d row, not a 1-d column)
# y_hat_t = y_hat_t[0]
# Compute new cost
out_label = T.argmax(y_hat_t)
# final joint state
ch_t = T.concatenate([c_t, h_t])
return (out_label, ch_t), scan_module.until(T.eq(out_label, self.out_end))
开发者ID:arthur-tsang,项目名称:EqnMaster,代码行数:32,代码来源:lstm_dec.py
示例6: test_log1msigm_to_softplus
def test_log1msigm_to_softplus(self):
x = T.matrix()
out = T.log(1 - sigmoid(x))
f = theano.function([x], out, mode=self.m)
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert isinstance(topo[0].op.scalar_op,
theano.tensor.nnet.sigm.ScalarSoftplus)
assert isinstance(topo[1].op.scalar_op, theano.scalar.Neg)
f(numpy.random.rand(54, 11).astype(config.floatX))
# Same test with a flatten
out = T.log(1 - T.flatten(sigmoid(x)))
f = theano.function([x], out, mode=self.m)
topo = f.maker.fgraph.toposort()
assert len(topo) == 3
assert isinstance(topo[0].op, T.Flatten)
assert isinstance(topo[1].op.scalar_op,
theano.tensor.nnet.sigm.ScalarSoftplus)
assert isinstance(topo[2].op.scalar_op, theano.scalar.Neg)
f(numpy.random.rand(54, 11).astype(config.floatX))
# Same test with a reshape
out = T.log(1 - sigmoid(x).reshape([x.size]))
f = theano.function([x], out, mode=self.m)
topo = f.maker.fgraph.toposort()
#assert len(topo) == 3
assert any(isinstance(node.op, T.Reshape) for node in topo)
assert any(isinstance(getattr(node.op, 'scalar_op', None),
theano.tensor.nnet.sigm.ScalarSoftplus)
for node in topo)
f(numpy.random.rand(54, 11).astype(config.floatX))
开发者ID:LEEKYOUNGHUN,项目名称:Theano,代码行数:33,代码来源:test_sigm.py
示例7: scan_function
def scan_function(input, inter_output, W, U, Wz, Uz, Wr, Ur, buw, bz, br):
rj = nnet.sigmoid(T.dot(input, Wr) + T.dot(inter_output, Ur) + br)
zj = nnet.sigmoid(T.dot(input, Wz) + T.dot(inter_output, Uz) + bz)
htilde = T.tanh(T.dot(input, W) + rj * T.dot(inter_output, U) + buw)
inter_output = zj * inter_output + (1 - zj) * htilde
return inter_output
开发者ID:linkinwong,项目名称:dnn-for-disfluency,代码行数:7,代码来源:network.py
示例8: forward
def forward(self, data, h):
z = NNET.sigmoid(THT.dot(data, self.Wz) + THT.dot(h, self.Uz) + self.bz)
r = NNET.sigmoid(THT.dot(data, self.Wr) + THT.dot(h, self.Ur) + self.br)
c = THT.tanh(THT.dot(data, self.Wg) + THT.dot(r * h, self.Ug) + self.bg)
out = (1 - z) * h + z * c
return out
开发者ID:fhdiaze,项目名称:DeepTracking,代码行数:7,代码来源:SingleGru.py
示例9: sample_gradient
def sample_gradient():
print "微分"
x, y = T.dscalars("x", "y")
z = (x+2*y)**2
# dz/dx
gx = T.grad(z, x)
fgx = theano.function([x,y], gx)
print fgx(1.0, 1.0)
# dz/dy
gy = T.grad(z, y)
fgy = theano.function([x,y], gy)
print fgy(1.0, 1.0)
# d{sigmoid(x)}/dx
x = T.dscalar("x")
sig = sigmoid(x)
dsig = T.grad(sig, x)
f = theano.function([x], dsig)
print f(0.0)
print f(1.0)
# d{sigmoid(<x,w>)}/dx
w = T.dscalar("w")
sig = sigmoid(T.dot(x,w))
dsig = T.grad(sig, x)
f = theano.function([x, w], dsig)
print f(1.0, 2.0)
print f(3.0, 4.0)
print
开发者ID:norikinishida,项目名称:snippets,代码行数:27,代码来源:sample.py
示例10: __step
def __step(img, prev_bbox, prev_att, state):
cx = (prev_bbox[:, 2] + prev_bbox[:, 0]) / 2.
cy = (prev_bbox[:, 3] + prev_bbox[:, 1]) / 2.
sigma = TT.exp(prev_att[:, 0]) * (max(img_col, img_row) / 2)
fract = TT.exp(prev_att[:, 1])
amplifier = TT.exp(prev_att[:, 2])
eps = 1e-8
abs_cx = (cx + 1) / 2. * (img_col - 1)
abs_cy = (cy + 1) / 2. * (img_row - 1)
abs_stride = (fract * (max(img_col, img_row) - 1)) * ((1. / (NUM_N - 1.)) if NUM_N > 1 else 0)
FX, FY = __filterbank(abs_cx, abs_cy, abs_stride, sigma)
unnormalized_mask = (FX.dimshuffle(0, 'x', 1, 'x', 2) * FY.dimshuffle(0, 1, 'x', 2, 'x')).sum(axis=2).sum(axis=1)
mask = unnormalized_mask# / (unnormalized_mask.sum(axis=2).sum(axis=1) + eps).dimshuffle(0, 'x', 'x')
masked_img = (mask.dimshuffle(0, 'x', 1, 2) * img) * amplifier.dimshuffle(0, 'x', 'x', 'x')
conv1 = conv2d(masked_img, conv1_filters, subsample=(conv1_stride, conv1_stride))
act1 = TT.tanh(conv1)
flat1 = TT.reshape(act1, (batch_size, conv1_output_dim))
gru_in = TT.concatenate([flat1, prev_bbox], axis=1)
gru_z = NN.sigmoid(TT.dot(gru_in, Wz) + TT.dot(state, Uz) + bz)
gru_r = NN.sigmoid(TT.dot(gru_in, Wr) + TT.dot(state, Ur) + br)
gru_h_ = TT.tanh(TT.dot(gru_in, Wg) + TT.dot(gru_r * state, Ug) + bg)
gru_h = (1 - gru_z) * state + gru_z * gru_h_
bbox = TT.tanh(TT.dot(gru_h, W_fc2) + b_fc2)
att = TT.dot(gru_h, W_fc3) + b_fc3
return bbox, att, gru_h, mask
开发者ID:BarclayII,项目名称:tracking-with-rnn,代码行数:30,代码来源:recurrent_att.py
示例11: make_ann
def make_ann(self, hidden_layers, lr):
self.W = [
theano.shared(
rng.uniform(-0.1, 0.1, size=(784, hidden_layers[0])))
]
self.B = [theano.shared(rng.uniform(-0.1, 0.1, size=(784)))]
innput = T.vector('innput')
self.X = [Tann.sigmoid(T.dot(innput, self.W[0]) + self.B[0])]
params = [self.W[0], self.B[0]]
for n in range(1, len(hidden_layers)):
#Finding number of inputs
n_in = hidden_layers[n - 1]
n_out = hidden_layers[n]
#making Bias and weights for a layer
self.W.append(
theano.shared(rng.uniform(-0.1, 0.1, size=(n_in, n_out))))
#
self.B.append(theano.shared(rng.uniform(-0.1, 0.1, size=(n_in))))
#
self.X.append(
Tann.sigmoid(T.dot(self.W[n], self.W[n - 1]) + self.B[n]))
params.append(self.W[n])
params.append(self.B[n])
#
error = T.sum((innput - self.W[-1])**2)
print(error)
print(params)
#
gradients = T.grad(error, params)
backprop_acts = [(p, p - self.lrate * g)
for p, g in zip(params, gradients)]
self.predictor = theano.function([innput], [self.X])
self.trainer = theano.function([innput], error, updates=backprop_acts)
开发者ID:andlon93,项目名称:ANN_MNIST,代码行数:35,代码来源:ANN.py
示例12: fp
def fp(self, x, _):
relu = lambda x: T.max(x, 0)
h = self.model.hiddens["h_%d" % self.hidden_id]['val']
c = self.model.hiddens["c_%d" % self.hidden_id]['val']
it = sigmoid(T.dot(x, self.Wxi) + T.dot(h, self.Whi) + T.dot(c, self.Wci) + self.Bi)
ft = sigmoid(T.dot(x, self.Wxf) + T.dot(h, self.Whf) + T.dot(c, self.Wcf) + self.Bf)
self.ct = ft * c + it * T.tanh(T.dot(x, self.Wxc) + T.dot(h, self.Whc) + self.Bc)
ot = sigmoid(T.dot(x, self.Wxo) + T.dot(h, self.Who) + T.dot(self.ct, self.Wco) + self.Bo)
self.output = ot * T.tanh(self.ct)
开发者ID:wojzaremba,项目名称:rnn,代码行数:9,代码来源:layer.py
示例13: gru_timestep
def gru_timestep(self, x_t, h_prev):
Lx_t = self.L[:,x_t]
# gates (update, reset)
z_t = sigmoid(T.dot(self.Wz, Lx_t) + T.dot(self.Uz, h_prev))
r_t = sigmoid(T.dot(self.Wr, Lx_t) + T.dot(self.Ur, h_prev))
# combine them
h_new_t = T.tanh(T.dot(self.Wh, Lx_t) + r_t * T.dot(self.Uh, h_prev))
h_t = z_t * h_prev + (1 - z_t) * h_new_t
return h_t
开发者ID:arthur-tsang,项目名称:EqnMaster,代码行数:10,代码来源:gru_enc.py
示例14: rbm_ais_gibbs_for_v
def rbm_ais_gibbs_for_v(rbmA_params, rbmB_params, beta, v_sample, seed=23098):
"""
Parameters:
-----------
rbmA_params: list
Parameters of the baserate model (usually infinite temperature). List
should be of length 3 and contain numpy.ndarrays corresponding to model
parameters (weights, visbias, hidbias).
rbmB_params: list
similar to rbmA_params, but for model at temperature 1.
beta: theano.shared
scalar, represents inverse temperature at which we wish to sample from.
v_sample: theano.shared
matrix of shape (n_runs, nvis), state of current particles.
seed: int
optional seed parameter for sampling from binomial units.
"""
(weights_a, visbias_a, hidbias_a) = rbmA_params
(weights_b, visbias_b, hidbias_b) = rbmB_params
theano_rng = RandomStreams(seed)
# equation 15 (Salakhutdinov & Murray 2008)
ph_a = nnet.sigmoid(
(1 - beta) * (tensor.dot(v_sample, weights_a) + hidbias_a))
ha_sample = theano_rng.binomial(
size=(v_sample.shape[0], len(hidbias_a)),
n=1,
p=ph_a,
dtype=config.floatX)
# equation 16 (Salakhutdinov & Murray 2008)
ph_b = nnet.sigmoid(beta * (tensor.dot(v_sample, weights_b) + hidbias_b))
hb_sample = theano_rng.binomial(
size=(v_sample.shape[0], len(hidbias_b)),
n=1,
p=ph_b,
dtype=config.floatX)
# equation 17 (Salakhutdinov & Murray 2008)
pv_act = (1 - beta) * (tensor.dot(ha_sample, weights_a.T) + visbias_a) + \
beta * (tensor.dot(hb_sample, weights_b.T) + visbias_b)
pv = nnet.sigmoid(pv_act)
new_v_sample = theano_rng.binomial(
size=(v_sample.shape[0], len(visbias_b)),
n=1,
p=pv,
dtype=config.floatX)
return new_v_sample
开发者ID:hannes-brt,项目名称:pylearn,代码行数:55,代码来源:rbm_tools.py
示例15: get_reconstruction_cost
def get_reconstruction_cost(self, updates, pre_nv):
'''
Approximation to the reconstruction error
'''
cross_entropy = T.mean(
T.sum(self.inputs * T.log(sigmoid(pre_nv)) +
(1-self.inputs) * T.log(1 - sigmoid(pre_nv)),
axis=1
)
)
return cross_entropy
开发者ID:Song-Tu,项目名称:DeepHash,代码行数:11,代码来源:rbm.py
示例16: _step
def _step(x_, h_, c_):
preact = tensor.dot(h_, U)
preact += x_
i = nnet.sigmoid(_slice(preact, 0, n_hidden))
f = nnet.sigmoid(_slice(preact, 1, n_hidden))
o = nnet.sigmoid(_slice(preact, 2, n_hidden))
c = tensor.tanh(_slice(preact, 3, n_hidden))
c = f * c_ + i * c
h = o * tensor.tanh(c)
return h, c
开发者ID:markstoehr,项目名称:lstm_acoustic_embedding,代码行数:12,代码来源:lstm.py
示例17: new_output
def new_output(self, y_prev, h_prev):
# gates (update, reset)
z_t = sigmoid(T.dot(self.Uz, h_prev))
r_t = sigmoid(T.dot(self.Ur, h_prev))
# combine them
h_new_t = T.tanh(r_t * T.dot(self.Uh, h_prev))
h_t = z_t * h_prev + (1 - z_t) * h_new_t
# compute new out_label
y_hat_t = softmax((T.dot(self.U, h_t) + self.b).T).T
out_label = T.argmax(y_hat_t)
return (out_label, h_t), scan_module.until(T.eq(out_label, self.out_end))
开发者ID:arthur-tsang,项目名称:EqnMaster,代码行数:12,代码来源:new_dec.py
示例18: _step
def _step(img, prev_bbox, state):
# of (batch_size, nr_filters, some_rows, some_cols)
conv1 = conv2d(img, conv1_filters, subsample=(conv1_stride, conv1_stride))
act1 = TT.tanh(conv1)
flat1 = TT.reshape(act1, (batch_size, conv1_output_dim))
gru_in = TT.concatenate([flat1, prev_bbox], axis=1)
gru_z = NN.sigmoid(TT.dot(gru_in, Wz) + TT.dot(state, Uz) + bz)
gru_r = NN.sigmoid(TT.dot(gru_in, Wr) + TT.dot(state, Ur) + br)
gru_h_ = TT.tanh(TT.dot(gru_in, Wg) + TT.dot(gru_r * state, Ug) + bg)
gru_h = (1-gru_z) * state + gru_z * gru_h_
bbox = TT.tanh(TT.dot(gru_h, W_fc2) + b_fc2)
return bbox, gru_h
开发者ID:BarclayII,项目名称:recurrent-tracker,代码行数:12,代码来源:recurrent_base.py
示例19: build_ann
def build_ann(self, nb = 784, nh = 2, learning_rate = 0.1):
w1 = theano.shared(np.random.uniform(-.1,.1,size=(nb,nh)))
w2 = theano.shared(np.random.uniform(-.1,.1,size=(nh,nb)))
input = T.dvector('input')
b1 = theano.shared(np.random.uniform(-.1,.1,size=nh))
b2 = theano.shared(np.random.uniform(-.1,.1,size=nb))
x1 = Tann.sigmoid(T.dot(input,w1) + b1)
x2 = Tann.sigmoid(T.dot(x1,w2) + b2)
error = T.sum((input - x2)**2)
params = [w1,b1,w2,b2]
gradients = T.grad(error,params)
backprop_acts = [(p, p - learning_rate*g) for p,g in zip(params,gradients)]
self.predictor = theano.function([input],[x2,x1])
self.trainer = theano.function([input],error,updates=backprop_acts)
开发者ID:Bergalerga,项目名称:AIProg,代码行数:14,代码来源:ann.py
示例20: dgru_output
def dgru_output(self, x_t, old_label, h_prev):
Lx_t = self.L[:,x_t]
# gates (update, reset)
z_t = sigmoid(T.dot(self.Wz, Lx_t) + T.dot(self.Uz, h_prev))
r_t = sigmoid(T.dot(self.Wr, Lx_t) + T.dot(self.Ur, h_prev))
# combine them
h_new_t = T.tanh(T.dot(self.Wh, Lx_t) + r_t * T.dot(self.Uh, h_prev))
h_t = z_t * h_prev + (1 - z_t) * h_new_t
y_hat_t = softmax(T.dot(self.U, h_t) + self.b)[0]
out_label = T.argmax(y_hat_t)
return out_label, h_t
开发者ID:arthur-tsang,项目名称:EqnMaster,代码行数:14,代码来源:d_gru.py
注:本文中的theano.tensor.nnet.sigmoid函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论