本文整理汇总了Python中torch.tanh函数的典型用法代码示例。如果您正苦于以下问题:Python tanh函数的具体用法?Python tanh怎么用?Python tanh使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了tanh函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: forward
def forward(self, input_, hx):
"""
Args:
input_: A (batch, input_size) tensor containing input
features.
hx: A tuple (h_0, c_0), which contains the initial hidden
and cell state, where the size of both states is
(batch, hidden_size).
time: The current timestep value, which is used to
get appropriate running statistics.
Returns:
h_1, c_1: Tensors containing the next hidden and cell state.
"""
h_0, c_0 = hx
batch_size = h_0.size(0)
bias_batch = (self.bias.unsqueeze(0)
.expand(batch_size, *self.bias.size()))
wh = torch.mm(h_0, self.weight_hh)
wh = torch.mm(h_0, self.weight_hh)
wi = torch.mm(input_, self.weight_ih)
bn_wh = self.bn_hh(wh)
bn_wi = self.bn_ih(wi)
f, i, o, g = torch.split(bn_wh + bn_wi + bias_batch,
split_size=self.hidden_size, dim=1)
c_1 = torch.sigmoid(f)*c_0 + torch.sigmoid(i)*torch.tanh(g)
h_1 = torch.sigmoid(o) * torch.tanh(self.bn_c(c_1))
return h_1, c_1
开发者ID:Joyce94,项目名称:sentence_classification,代码行数:29,代码来源:bnlstm.py
示例2: forward
def forward(self, x):
x = torch.tanh(self.fc1(x))
x = torch.tanh(self.fc2(x))
mu = self.fc3(x)
logstd = torch.zeros_like(mu)
std = torch.exp(logstd)
return mu, std
开发者ID:lanseyege,项目名称:lets-do-irl,代码行数:7,代码来源:model.py
示例3: test_cse
def test_cse(self):
x = Variable(torch.Tensor([0.4, 0.3]), requires_grad=True)
y = Variable(torch.Tensor([0.7, 0.5]), requires_grad=True)
trace = torch._C._tracer_enter((x, y), 0)
w = (x + y) * (x + y) * (x + y)
t = torch.tanh(w) + torch.tanh(w)
z = (x + y) * (x + y) * (x + y) + t
torch._C._tracer_exit((z,))
torch._C._jit_pass_lint(trace)
torch._C._jit_pass_cse(trace)
self.assertExpected(str(trace))
开发者ID:Northrend,项目名称:pytorch,代码行数:13,代码来源:test_jit.py
示例4: forward
def forward(self, input, doc_lens):
"""
:param input: (B*S, L)
:param doc_lens: (B)
:return:
"""
sent_lens = torch.sum(torch.sign(input), dim=1).data # (B*S); word id is a positive number and pad_id is 0
input = self.embed(input) # (B*S, L, D)
# word level GRU
input = self.word_RNN(input)[0] # (B*S, L, D) -> (B*S, L, 2*H), (B*S, 1, 2*H) -> (B*S, L, 2*H)
# word_out = self.avg_pool1d(x, sent_lens)
word_out = self.max_pool1d(input, sent_lens) # (B*S, L, 2*H) -> (B*S, 2*H)
# make sent features(pad with zeros)
input = self.pad_doc(word_out, doc_lens) # (B*S, 2*H) -> (B, max_doc_len, 2*H)
# sent level GRU
sent_out = self.sent_RNN(input)[0] # (B, max_doc_len, 2*H) -> (B, max_doc_len, 2*H)
# docs = self.avg_pool1d(sent_out, doc_lens) # (B, 2*H)
docs = self.max_pool1d(sent_out, doc_lens) # (B, 2*H)
batch_probs = []
for index, doc_len in enumerate(doc_lens): # for idx, doc_len in (B)
valid_hidden = sent_out[index, :doc_len, :] # (doc_len, 2*H)
doc = torch.tanh(self.fc(docs[index])).unsqueeze(0) # (1, 2*H)
s = torch.zeros(1, 2 * self.args.hidden_dim).to(opt.device) # (1, 2*H)
probs = []
for position, h in enumerate(valid_hidden):
h = h.view(1, -1) # (1, 2*H)
# get position embeddings
abs_index = torch.LongTensor([[position]]).to(opt.device)
abs_features = self.abs_pos_embed(abs_index).squeeze(0)
rel_index = int((position + 1) * 9.0 / doc_len)
rel_index = torch.LongTensor([[rel_index]]).to(opt.device)
rel_features = self.rel_pos_embed(rel_index).squeeze(0)
# classification layer
content = self.content(h) # (1, 2*H) -> (1, 1)
salience = self.salience(h, doc) # (1, 2*H), (1, 2*H) -> (1, 1)
novelty = -1 * self.novelty(h, torch.tanh(s)) # (1, 2*H), (1, 2*H) -> (1, 1)
abs_p = self.abs_pos(abs_features) # (1, 1)
rel_p = self.rel_pos(rel_features) # (1, 1)
prob = torch.sigmoid(content + salience + novelty + abs_p + rel_p + self.bias) # (1, 1); [[0.35]]
s = s + torch.mm(prob, h) # (1, 2*H) + (1, 1) * (1, 2*H) -> (1, 2*H)
probs.append(prob) # S * (1, 1)
batch_probs.append(torch.cat(probs).squeeze()) # (S*1, 1) -> (S) -> B * (S)
# return torch.stack(batch_probs).squeeze() # B * (S) -> (B, S)
return torch.cat(batch_probs).squeeze() # B * (S) -> (B * S)
开发者ID:coder352,项目名称:shellscript,代码行数:49,代码来源:sentence_rnn_rnn.py
示例5: forward
def forward(self, data, last_hidden):
hx, cx = last_hidden
m = self.wmx(data) * self.wmh(hx)
gates = self.wx(data) + self.wh(m)
i, f, o, u = gates.chunk(4, 1)
i = torch.sigmoid(i)
f = torch.sigmoid(f)
u = torch.tanh(u)
o = torch.sigmoid(o)
cy = f * cx + i * u
hy = o * torch.tanh(cy)
return hy, cy
开发者ID:anoopsarkar,项目名称:nlp-class-hw,代码行数:15,代码来源:models.py
示例6: encode
def encode(self, src_sents_var: torch.Tensor, src_sent_lens: List[int]) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
"""
Use a GRU/LSTM to encode source sentences into hidden states
Args:
src_sents: list of source sentence tokens
Returns:
src_encodings: hidden states of tokens in source sentences, this could be a variable
with shape (batch_size, source_sentence_length, encoding_dim), or in orther formats
decoder_init_state: decoder GRU/LSTM's initial state, computed from source encodings
"""
# (src_sent_len, batch_size, embed_size)
src_word_embeds = self.src_embed(src_sents_var)
packed_src_embed = pack_padded_sequence(src_word_embeds, src_sent_lens)
# src_encodings: (src_sent_len, batch_size, hidden_size * 2)
src_encodings, (last_state, last_cell) = self.encoder_lstm(packed_src_embed)
src_encodings, _ = pad_packed_sequence(src_encodings)
# (batch_size, src_sent_len, hidden_size * 2)
src_encodings = src_encodings.permute(1, 0, 2)
dec_init_cell = self.decoder_cell_init(torch.cat([last_cell[0], last_cell[1]], dim=1))
dec_init_state = torch.tanh(dec_init_cell)
return src_encodings, (dec_init_state, dec_init_cell)
开发者ID:chubbymaggie,项目名称:pytorch_basic_nmt,代码行数:28,代码来源:nmt.py
示例7: forward
def forward(self, s_t_hat, h, enc_padding_mask, coverage):
b, t_k, n = list(h.size())
h = h.view(-1, n) # B * t_k x 2*hidden_dim
encoder_feature = self.W_h(h)
dec_fea = self.decode_proj(s_t_hat) # B x 2*hidden_dim
dec_fea_expanded = dec_fea.unsqueeze(1).expand(b, t_k, n).contiguous() # B x t_k x 2*hidden_dim
dec_fea_expanded = dec_fea_expanded.view(-1, n) # B * t_k x 2*hidden_dim
att_features = encoder_feature + dec_fea_expanded # B * t_k x 2*hidden_dim
if self.args.is_coverage:
coverage_input = coverage.view(-1, 1) # B * t_k x 1
coverage_feature = self.W_c(coverage_input) # B * t_k x 2*hidden_dim
att_features = att_features + coverage_feature
e = torch.tanh(att_features) # B * t_k x 2*hidden_dim
scores = self.v(e) # B * t_k x 1
scores = scores.view(-1, t_k) # B x t_k
attn_dist_ = F.softmax(scores, dim=1)*enc_padding_mask # B x t_k
normalization_factor = attn_dist_.sum(1, keepdim=True)
attn_dist = attn_dist_ / normalization_factor
attn_dist = attn_dist.unsqueeze(1) # B x 1 x t_k
h = h.view(-1, t_k, n) # B x t_k x 2*hidden_dim
c_t = torch.bmm(attn_dist, h) # B x 1 x n
c_t = c_t.view(-1, self.args.hidden_dim * 2) # B x 2*hidden_dim
attn_dist = attn_dist.view(-1, t_k) # B x t_k
if self.args.is_coverage:
coverage = coverage.view(-1, t_k)
coverage = coverage + attn_dist
return c_t, attn_dist, coverage
开发者ID:coder352,项目名称:shellscript,代码行数:35,代码来源:attention_original.py
示例8: forward
def forward(self, input, hidden_state):
hidden,c=hidden_state#hidden and c are images with several channels
#print 'hidden ',hidden.size()
#print 'input ',input.size()
combined = torch.cat((input, hidden), 1)#oncatenate in the channels
#print 'combined',combined.size()
A=self.conv(combined)
(ai,af,ao,ag)=torch.split(A,self.num_features,dim=1)#it should return 4 tensors
i=torch.sigmoid(ai)
f=torch.sigmoid(af)
o=torch.sigmoid(ao)
g=torch.tanh(ag)
next_c=f*c+i*g
next_h=o*torch.tanh(next_c)
return next_h, next_c
开发者ID:praveenkumarchandaliya,项目名称:pytorch_convlstm,代码行数:16,代码来源:conv_lstm.py
示例9: decode_step
def decode_step(self, enc_hs, enc_mask, input_, hidden):
src_seq_len, bat_siz = enc_mask.shape
h_t, hidden = self.dec_rnn(input_, hidden)
# Concatenate the ht and hs
# ctx_trans: batch x seq_len x (trg_hid_siz*2)
ctx_trans = torch.cat(
(h_t.unsqueeze(1).expand(-1, src_seq_len, -1), enc_hs[1].transpose(
0, 1)),
dim=2)
trans = F.softmax(self.trans(ctx_trans), dim=-1)
trans_list = trans.split(1, dim=1)
ws = (self.wid_siz - 1) // 2
trans_shift = [
F.pad(t, (-ws + i, src_seq_len - (ws + 1) - i))
for i, t in enumerate(trans_list)
]
trans = torch.cat(trans_shift, dim=1)
trans = trans * enc_mask.transpose(0, 1).unsqueeze(1) + EPSILON
trans = trans / trans.sum(-1, keepdim=True)
trans = trans.log()
# Concatenate the ht and hs
# ctx_emiss: batch x seq_len x (trg_hid_siz+src_hid_size*2)
ctx_emiss = torch.cat(
(h_t.unsqueeze(1).expand(-1, src_seq_len, -1), enc_hs[0].transpose(
0, 1)),
dim=2)
ctx = torch.tanh(self.linear_out(ctx_emiss))
# emiss: batch x seq_len x nb_vocab
emiss = F.log_softmax(self.final_out(ctx), dim=-1)
return trans, emiss, hidden
开发者ID:UriSha,项目名称:sigmorphon,代码行数:33,代码来源:model.py
示例10: forward
def forward(self, input_seq, last_hidden, encoder_outputs):
# Note: we run this one step at a time
# Get the embedding of the current input word (last output word)
batch_size = input_seq.size(0)
embedded = self.embedding(input_seq)
embedded = embedded.view(1, batch_size, self.hidden_size) # S=1 x B x N
# Get current hidden state from input word and last hidden state
rnn_output, hidden = self.gru(embedded, last_hidden)
# Calculate attention from current RNN state and all encoder outputs;
# apply to encoder outputs to get weighted average
context = self.attn(rnn_output, encoder_outputs)
context = context.squeeze(1)
# context is 32 by 256
# Attentional vector using the RNN hidden state and context vector
# concatenated together (Luong eq. 5)
rnn_output = rnn_output.squeeze(0) # S=1 x B x N -> B x N
# rnn_output is 32 by 256
concat_input = torch.cat((rnn_output, context), 1)
concat_output = torch.tanh(self.concat(concat_input))
# Finally predict next token (Luong eq. 6, without softmax)
output = self.out(concat_output)
# output is 32 by vocab_size
output = self.LogSoftmax(output)
# Return final output, hidden state
return output, hidden
开发者ID:vwrj,项目名称:neural_machine_translation,代码行数:31,代码来源:V2-Attention-Vish.py
示例11: forward
def forward(self, input_tensor, cur_state):
h_cur, c_cur = cur_state
combined = torch.cat([input_tensor, h_cur], dim=1) # concatenate along channel axis
combined_conv = self.conv(combined)
cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1)
i = torch.sigmoid(cc_i)
f = torch.sigmoid(cc_f)
o = torch.sigmoid(cc_o)
g = torch.tanh(cc_g)
c_next = f * c_cur + i * g
h_next = o * torch.tanh(c_next)
return h_next, c_next
开发者ID:jiaxiangshang,项目名称:pyhowfar,代码行数:17,代码来源:layers.py
示例12: forward
def forward(self, words):
emb = self.embedding(words)
emb_sum = torch.sum(emb, dim=0) # size(emb_sum) = emb_size
h = emb_sum.view(1, -1) # size(h) = 1 x emb_size
for i in range(self.nlayers):
h = torch.tanh(self.linears[i](h))
out = self.output_layer(h)
return out
开发者ID:aiedward,项目名称:nn4nlp-code,代码行数:8,代码来源:model.py
示例13: f
def f(x, y):
out = x + y
with torch.jit.scope('Foo', out):
out = x * out
with torch.jit.scope('Bar', out):
out = torch.tanh(out)
out = torch.sigmoid(out)
return out
开发者ID:bhuWenDongchao,项目名称:pytorch,代码行数:8,代码来源:test_jit.py
示例14: forward
def forward(self, input_, c_input, hx):
"""
Args:
batch = 1
input_: A (batch, input_size) tensor containing input
features.
c_input: A list with size c_num,each element is the input ct from skip word (batch, hidden_size).
hx: A tuple (h_0, c_0), which contains the initial hidden
and cell state, where the size of both states is
(batch, hidden_size).
Returns:
h_1, c_1: Tensors containing the next hidden and cell state.
"""
h_0, c_0 = hx
batch_size = h_0.size(0)
#assert(batch_size == 1)
bias_batch = (self.bias.unsqueeze(0).expand(batch_size, *self.bias.size()))
wh_b = torch.addmm(bias_batch, h_0, self.weight_hh)
wi = torch.mm(input_, self.weight_ih)
i, o, g = torch.split(wh_b + wi, split_size_or_sections=self.hidden_size, dim=1)
i = torch.sigmoid(i)
g = torch.tanh(g)
o = torch.sigmoid(o)
c_num = len(c_input)
if c_num == 0:
f = 1 - i
c_1 = f*c_0 + i*g
h_1 = o * torch.tanh(c_1)
else:
c_input_var = torch.cat(c_input, 0)
alpha_bias_batch = (self.alpha_bias.unsqueeze(0).expand(batch_size, *self.alpha_bias.size()))
c_input_var = c_input_var.squeeze(1) ## (c_num, hidden_dim)
alpha_wi = torch.addmm(self.alpha_bias, input_, self.alpha_weight_ih).expand(c_num, self.hidden_size)
alpha_wh = torch.mm(c_input_var, self.alpha_weight_hh)
alpha = torch.sigmoid(alpha_wi + alpha_wh)
## alpha = i concat alpha
alpha = torch.exp(torch.cat([i, alpha],0))
alpha_sum = alpha.sum(0)
## alpha = softmax for each hidden element
alpha = torch.div(alpha, alpha_sum)
merge_i_c = torch.cat([g, c_input_var],0)
c_1 = merge_i_c * alpha
c_1 = c_1.sum(0).unsqueeze(0)
h_1 = o * torch.tanh(c_1)
return h_1, c_1
开发者ID:chongp,项目名称:Name-Entity-Recognition,代码行数:46,代码来源:latticelstm.py
示例15: _attention
def _attention(self, query_t, keys_bth, keys_mask):
B, T, H = keys_bth.shape
q = self.W_a(query_t.view(-1, self.hsz)).view(B, 1, H)
u = self.E_a(keys_bth.contiguous().view(-1, self.hsz)).view(B, T, H)
z = torch.tanh(q + u)
a = self.v(z.view(-1, self.hsz)).view(B, T)
a = a.masked_fill(keys_mask == 0, -1e9)
a = F.softmax(a, dim=-1)
return a
开发者ID:dpressel,项目名称:baseline,代码行数:9,代码来源:torchy.py
示例16: norm_flow_reverse
def norm_flow_reverse(self, params, z1, z2):
h = torch.tanh(params[1][0](z2))
mew_ = params[1][1](h)
sig_ = torch.sigmoid(params[1][2](h)) #[PB,Z]
z1 = (z1 - mew_) / sig_
logdet2 = torch.sum(torch.log(sig_), 1)
h = torch.tanh(params[0][0](z1))
mew_ = params[0][1](h)
sig_ = torch.sigmoid(params[0][2](h)) #[PB,Z]
z2 = (z2 - mew_) / sig_
logdet = torch.sum(torch.log(sig_), 1)
#[PB]
logdet = logdet + logdet2
#[PB,Z], [PB]
return z1, z2, logdet
开发者ID:chriscremer,项目名称:Other_Code,代码行数:18,代码来源:distributions.py
示例17: forward
def forward(self, input):
x = F.relu(self.linear_bn(self.linear(input)))
x = x.view(-1, self.d*8, 2, 2)
x = F.relu(self.deconv1_bn(self.deconv1(x)))
x = x[:,:,:-1,:-1] # hacky way to get shapes right (like "SAME" in tf)
x = F.relu(self.deconv2_bn(self.deconv2(x)))
x = F.relu(self.deconv3_bn(self.deconv3(x)))
x = x[:,:,:-1,:-1]
x = torch.tanh(self.deconv4(x))
x = x[:,:,:-1,:-1]
return x
开发者ID:anihamde,项目名称:cs287-s18,代码行数:11,代码来源:gan_models.py
示例18: test_disabled_traced_function
def test_disabled_traced_function(self):
x = Variable(torch.Tensor([0.4]), requires_grad=True)
y = Variable(torch.Tensor([0.7]), requires_grad=True)
@torch.jit.compile(enabled=False)
def doit(x, y):
return torch.sigmoid(torch.tanh(x * (x + y)))
z = doit(x, y)
z2 = doit(x, y)
self.assertEqual(z, torch.sigmoid(torch.tanh(x * (x + y))))
self.assertEqual(z, z2)
开发者ID:Northrend,项目名称:pytorch,代码行数:12,代码来源:test_jit.py
示例19: tanh_quantize
def tanh_quantize(input, bits):
assert bits >= 1, bits
if bits == 1:
return torch.sign(input)
input = torch.tanh(input) # [-1, 1]
input_rescale = (input + 1.0) / 2 #[0, 1]
n = math.pow(2.0, bits) - 1
v = torch.floor(input_rescale * n + 0.5) / n
v = 2 * v - 1 # [-1, 1]
v = 0.5 * torch.log((1 + v) / (1 - v)) # arctanh
return v
开发者ID:YJieZhang,项目名称:pytorch-classification,代码行数:12,代码来源:quant.py
示例20: step
def step(self, x: torch.Tensor,
h_tm1: Tuple[torch.Tensor, torch.Tensor],
src_encodings: torch.Tensor, src_encoding_att_linear: torch.Tensor, src_sent_masks: torch.Tensor) -> Tuple[Tuple, torch.Tensor, torch.Tensor]:
# h_t: (batch_size, hidden_size)
h_t, cell_t = self.decoder_lstm(x, h_tm1)
ctx_t, alpha_t = self.dot_prod_attention(h_t, src_encodings, src_encoding_att_linear, src_sent_masks)
att_t = torch.tanh(self.att_vec_linear(torch.cat([h_t, ctx_t], 1))) # E.q. (5)
att_t = self.dropout(att_t)
return (h_t, cell_t), att_t, alpha_t
开发者ID:chubbymaggie,项目名称:pytorch_basic_nmt,代码行数:12,代码来源:nmt.py
注:本文中的torch.tanh函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论