本文整理汇总了Python中tflearn.lstm函数的典型用法代码示例。如果您正苦于以下问题:Python lstm函数的具体用法?Python lstm怎么用?Python lstm使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了lstm函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: run
def run():
# imagine cnn, the third dim is like the 'chnl'
g = tflearn.input_data(shape=[None, maxlen, len(char_idx)])
g = tflearn.lstm(g, 512, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 512)
g = tflearn.dropout(g, 0.5)
g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
g = tflearn.regression(g, optimizer='adam',
loss='categorical_crossentropy',
learning_rate=0.001)
m = tflearn.SequenceGenerator(g, dictionary=char_idx,
seq_maxlen=maxlen,
clip_gradients=5.0,
checkpoint_path='models/model_us_cities')
for i in range(40):
seed = random_sequence_from_textfile(path, maxlen)
m.fit(X, Y, validation_set=0.1, batch_size=128,
n_epoch=1, run_id='us_cities')
print("-- TESTING...")
print("-- Test with temperature of 1.2 --")
print(m.generate(30, temperature=1.2, seq_seed=seed))
print("-- Test with temperature of 1.0 --")
print(m.generate(30, temperature=1.0, seq_seed=seed))
print("-- Test with temperature of 0.5 --")
print(m.generate(30, temperature=0.5, seq_seed=seed))
开发者ID:kengz,项目名称:ai-notebook,代码行数:28,代码来源:gen_cityname_lstm.py
示例2: do_rnn
def do_rnn(x_train,x_test,y_train,y_test):
global n_words
# Data preprocessing
# Sequence padding
print "GET n_words embedding %d" % n_words
#x_train = pad_sequences(x_train, maxlen=100, value=0.)
#x_test = pad_sequences(x_test, maxlen=100, value=0.)
# Converting labels to binary vectors
y_train = to_categorical(y_train, nb_classes=2)
y_test = to_categorical(y_test, nb_classes=2)
# Network building
net = tflearn.input_data(shape=[None, 100,n_words])
net = tflearn.lstm(net, 10, return_seq=True)
net = tflearn.lstm(net, 10, )
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.1,name="output",
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=3)
model.fit(x_train, y_train, validation_set=(x_test, y_test), show_metric=True,
batch_size=32,run_id="maidou")
开发者ID:DemonZeros,项目名称:1book,代码行数:26,代码来源:16-7.py
示例3: get_model_action
def get_model_action():
# Network building
net = tflearn.input_data(shape=[None, 10, 128], name='net2_layer1')
net = tflearn.lstm(net, n_units=256, return_seq=True, name='net2_layer2')
net = tflearn.dropout(net, 0.6, name='net2_layer3')
net = tflearn.lstm(net, n_units=256, return_seq=False, name='net2_layer4')
net = tflearn.dropout(net, 0.6, name='net2_layer5')
net = tflearn.fully_connected(net, 5, activation='softmax', name='net2_layer6')
net = tflearn.regression(net, optimizer='sgd', loss='categorical_crossentropy', learning_rate=0.001,
name='net2_layer7')
return tflearn.DNN(net, clip_gradients=5.0, tensorboard_verbose=0)
开发者ID:SamsadSajid,项目名称:DeepGamingAI_FIFA,代码行数:11,代码来源:play_fifa.py
示例4: sentnet_LSTM_gray
def sentnet_LSTM_gray(width, height, frame_count, lr, output=9):
network = input_data(shape=[None, width, height], name='input')
#network = tflearn.input_data(shape=[None, 28, 28], name='input')
network = tflearn.lstm(network, 128, return_seq=True)
network = tflearn.lstm(network, 128)
network = tflearn.fully_connected(network, 9, activation='softmax')
network = tflearn.regression(network, optimizer='adam',
loss='categorical_crossentropy', name="output1")
model = tflearn.DNN(network, checkpoint_path='model_lstm',
max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')
return model
开发者ID:gcm0621,项目名称:pygta5,代码行数:13,代码来源:models.py
示例5: do_rnn
def do_rnn(X, Y, testX, testY):
X = np.reshape(X, (-1, 28, 28))
testX = np.reshape(testX, (-1, 28, 28))
net = tflearn.input_data(shape=[None, 28, 28])
net = tflearn.lstm(net, 128, return_seq=True)
net = tflearn.lstm(net, 128)
net = tflearn.fully_connected(net, 10, activation='softmax')
net = tflearn.regression(net, optimizer='adam',
loss='categorical_crossentropy', name="output1")
model = tflearn.DNN(net, tensorboard_verbose=2)
model.fit(X, Y, n_epoch=1, validation_set=(testX,testY), show_metric=True,
snapshot_step=100)
开发者ID:DemonZeros,项目名称:1book,代码行数:13,代码来源:16-1.py
示例6: shakespeare
def shakespeare():
path = "shakespeare_input.txt"
#path = "shakespeare_input-100.txt"
char_idx_file = 'char_idx.pickle'
if not os.path.isfile(path):
urllib.request.urlretrieve(
"https://raw.githubusercontent.com/tflearn/tflearn.github.io/master/resources/shakespeare_input.txt", path)
maxlen = 25
char_idx = None
if os.path.isfile(char_idx_file):
print('Loading previous char_idx')
char_idx = pickle.load(open(char_idx_file, 'rb'))
X, Y, char_idx = \
textfile_to_semi_redundant_sequences(path, seq_maxlen=maxlen, redun_step=3,
pre_defined_char_idx=char_idx)
pickle.dump(char_idx, open(char_idx_file, 'wb'))
g = tflearn.input_data([None, maxlen, len(char_idx)])
g = tflearn.lstm(g, 512, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 512, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 512)
g = tflearn.dropout(g, 0.5)
g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
learning_rate=0.001)
m = tflearn.SequenceGenerator(g, dictionary=char_idx,
seq_maxlen=maxlen,
clip_gradients=5.0,
checkpoint_path='model_shakespeare')
for i in range(50):
seed = random_sequence_from_textfile(path, maxlen)
m.fit(X, Y, validation_set=0.1, batch_size=128,
n_epoch=1, run_id='shakespeare')
print("-- TESTING...")
print("-- Test with temperature of 1.0 --")
print(m.generate(600, temperature=1.0, seq_seed=seed))
#print(m.generate(10, temperature=1.0, seq_seed=seed))
print("-- Test with temperature of 0.5 --")
print(m.generate(600, temperature=0.5, seq_seed=seed))
开发者ID:Emersonxuelinux,项目名称:2book,代码行数:50,代码来源:rnn.py
示例7: main
def main():
load_vectors("./vectors.bin")
init_seq()
xlist = []
ylist = []
test_X = None
#for i in range(len(seq)-100):
for i in range(1000):
sequence = seq[i:i+20]
xlist.append(sequence)
ylist.append(seq[i+20])
if test_X is None:
test_X = np.array(sequence)
(match_word, max_cos) = vector2word(seq[i+20])
print "right answer=", match_word, max_cos
X = np.array(xlist)
Y = np.array(ylist)
net = tflearn.input_data([None, 20, 200])
net = tflearn.lstm(net, 200)
net = tflearn.fully_connected(net, 200, activation='linear')
net = tflearn.regression(net, optimizer='sgd', learning_rate=0.1,
loss='mean_square')
model = tflearn.DNN(net)
model.fit(X, Y, n_epoch=1000, batch_size=1,snapshot_epoch=False,show_metric=True)
model.save("model")
predict = model.predict([test_X])
#print predict
#for v in test_X:
# print vector2word(v)
(match_word, max_cos) = vector2word(predict[0])
print "predict=", match_word, max_cos
开发者ID:Hackerer,项目名称:ChatBotCourse,代码行数:32,代码来源:one_lstm_sequence_generate.py
示例8: do_rnn
def do_rnn(x,y):
global max_document_length
print "RNN"
trainX, testX, trainY, testY = train_test_split(x, y, test_size=0.4, random_state=0)
y_test=testY
trainX = pad_sequences(trainX, maxlen=max_document_length, value=0.)
testX = pad_sequences(testX, maxlen=max_document_length, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Network building
net = tflearn.input_data([None, max_document_length])
net = tflearn.embedding(net, input_dim=10240000, output_dim=128)
net = tflearn.lstm(net, 128, dropout=0.8)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(trainX, trainY, validation_set=0.1, show_metric=True,
batch_size=10,run_id="webshell",n_epoch=5)
y_predict_list=model.predict(testX)
y_predict=[]
for i in y_predict_list:
if i[0] > 0.5:
y_predict.append(0)
else:
y_predict.append(1)
do_metrics(y_test, y_predict)
开发者ID:Emersonxuelinux,项目名称:2book,代码行数:34,代码来源:webshell.py
示例9: do_rnn
def do_rnn(trainX, testX, trainY, testY):
global n_words
# Data preprocessing
# Sequence padding
print "GET n_words embedding %d" % n_words
trainX = pad_sequences(trainX, maxlen=MAX_DOCUMENT_LENGTH, value=0.)
testX = pad_sequences(testX, maxlen=MAX_DOCUMENT_LENGTH, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Network building
net = tflearn.input_data([None, MAX_DOCUMENT_LENGTH])
net = tflearn.embedding(net, input_dim=n_words, output_dim=128)
net = tflearn.lstm(net, 128, dropout=0.8)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=3)
model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
batch_size=32,run_id="maidou")
开发者ID:DemonZeros,项目名称:1book,代码行数:28,代码来源:16-3.py
示例10: do_rnn
def do_rnn(trainX, testX, trainY, testY):
max_document_length=64
y_test=testY
trainX = pad_sequences(trainX, maxlen=max_document_length, value=0.)
testX = pad_sequences(testX, maxlen=max_document_length, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Network building
net = tflearn.input_data([None, max_document_length])
net = tflearn.embedding(net, input_dim=10240000, output_dim=64)
net = tflearn.lstm(net, 64, dropout=0.1)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=0,tensorboard_dir="dga_log")
model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
batch_size=10,run_id="dga",n_epoch=1)
y_predict_list = model.predict(testX)
#print y_predict_list
y_predict = []
for i in y_predict_list:
print i[0]
if i[0] > 0.5:
y_predict.append(0)
else:
y_predict.append(1)
print(classification_report(y_test, y_predict))
print metrics.confusion_matrix(y_test, y_predict)
开发者ID:Emersonxuelinux,项目名称:2book,代码行数:35,代码来源:dga.py
示例11: test_sequencegenerator
def test_sequencegenerator(self):
with tf.Graph().as_default():
text = "123456789101234567891012345678910123456789101234567891012345678910"
maxlen = 5
X, Y, char_idx = \
tflearn.data_utils.string_to_semi_redundant_sequences(text, seq_maxlen=maxlen, redun_step=3)
g = tflearn.input_data(shape=[None, maxlen, len(char_idx)])
g = tflearn.lstm(g, 32)
g = tflearn.dropout(g, 0.5)
g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
learning_rate=0.1)
m = tflearn.SequenceGenerator(g, dictionary=char_idx,
seq_maxlen=maxlen,
clip_gradients=5.0)
m.fit(X, Y, validation_set=0.1, n_epoch=100, snapshot_epoch=False)
res = m.generate(10, temperature=1., seq_seed="12345")
self.assertEqual(res, "123456789101234", "SequenceGenerator test failed! Generated sequence: " + res + " expected '123456789101234'")
# Testing save method
m.save("test_seqgen.tflearn")
self.assertTrue(os.path.exists("test_seqgen.tflearn"))
# Testing load method
m.load("test_seqgen.tflearn")
res = m.generate(10, temperature=1., seq_seed="12345")
self.assertEqual(res, "123456789101234", "SequenceGenerator test failed after loading model! Generated sequence: " + res + " expected '123456789101234'")
开发者ID:braddengross,项目名称:tflearn,代码行数:31,代码来源:test_models.py
示例12: generator_xss
def generator_xss():
global char_idx
global xss_data_file
global maxlen
if os.path.isfile(char_idx_file):
print('Loading previous xxs_char_idx')
char_idx = pickle.load(open(char_idx_file, 'rb'))
X, Y, char_idx = \
textfile_to_semi_redundant_sequences(xss_data_file, seq_maxlen=maxlen, redun_step=3,
pre_defined_char_idx=char_idx)
#pickle.dump(char_idx, open(char_idx_file, 'wb'))
g = tflearn.input_data([None, maxlen, len(char_idx)])
g = tflearn.lstm(g, 32, return_seq=True)
g = tflearn.dropout(g, 0.1)
g = tflearn.lstm(g, 32, return_seq=True)
g = tflearn.dropout(g, 0.1)
g = tflearn.lstm(g, 32)
g = tflearn.dropout(g, 0.1)
g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
learning_rate=0.001)
m = tflearn.SequenceGenerator(g, dictionary=char_idx,
seq_maxlen=maxlen,
clip_gradients=5.0,
checkpoint_path='chkpoint/model_scanner_poc')
print "random_sequence_from_textfile"
#seed = random_sequence_from_textfile(xss_data_file, maxlen)
seed='"/><script>'
m.fit(X, Y, validation_set=0.1, batch_size=128,
n_epoch=2, run_id='scanner-poc')
print("-- TESTING...")
print("-- Test with temperature of 0.1 --")
print(m.generate(32, temperature=0.1, seq_seed=seed))
print("-- Test with temperature of 0.5 --")
print(m.generate(32, temperature=0.5, seq_seed=seed))
print("-- Test with temperature of 1.0 --")
print(m.generate(32, temperature=1.0, seq_seed=seed))
开发者ID:Emersonxuelinux,项目名称:2book,代码行数:47,代码来源:scanner-poc.py
示例13: build
def build(embedding_size=(400000, 50), train_embedding=False, hidden_dims=128,
learning_rate=0.001):
net = tflearn.input_data([None, 200])
net = tflearn.embedding(net, input_dim=embedding_size[0],
output_dim=embedding_size[1],
trainable=train_embedding, name='EmbeddingLayer')
net = tflearn.lstm(net, hidden_dims, return_seq=True)
net = tflearn.dropout(net, 0.5)
net = tflearn.lstm(net, hidden_dims, return_seq=True)
net = tflearn.dropout(net, 0.5)
net = tflearn.lstm(net, hidden_dims, return_seq=True)
net = tflearn.dropout(net, 0.5)
net = tflearn.lstm(net, hidden_dims)
net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate,
loss='categorical_crossentropy')
return net
开发者ID:kashizui,项目名称:rnn-sentiment-analysis,代码行数:18,代码来源:stacked_lstm.py
示例14: generate_net
def generate_net(embedding):
net = tflearn.input_data([None, 200])
net = tflearn.embedding(net, input_dim=300000, output_dim=128)
net = tflearn.lstm(net, 128)
net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam',
loss='categorical_crossentropy')
return net
开发者ID:kashizui,项目名称:rnn-sentiment-analysis,代码行数:9,代码来源:word2vec.py
示例15: create_net
def create_net(in_sx, in_sy, out_sx):
"""
Creates a tflearn neural network with the correct
architecture for learning to hear the keyword
"""
net = tflearn.input_data([None, in_sx, in_sy])
net = tflearn.lstm(net, lstm_size, dropout=lstm_dropout)
net = tflearn.fully_connected(net, out_sx, activation='softmax')
net = tflearn.regression(net, learning_rate=learning_rate, optimizer='adam', loss='categorical_crossentropy')
return net
开发者ID:lzufalcon,项目名称:mycroft-precise-python-experiments,代码行数:10,代码来源:mycroft_keyword.py
示例16: train_model
def train_model():
X_train, X_test, y_train, y_test = np.load(PREPROCESSED_DATA)
num_samples, num_timesteps, input_dim = X_train.shape
net = tflearn.input_data(shape=[None, num_timesteps, input_dim])
net = tflearn.lstm(net, 128)
net = tflearn.fully_connected(net, 1, activation='relu')
net = tflearn.regression(net, optimizer='sgd',
loss='mean_square', name="regression_output")
model = tflearn.DNN(net, tensorboard_verbose=2, run_id=)
model.fit(X_train, y_train, n_epoch=1, validation_set=0.1, show_metric=True,
snapshot_step=100)
开发者ID:Musicophilia,项目名称:nga_hacks,代码行数:11,代码来源:pipeline_tflearn.py
示例17: model
def model(self, feed_previous=False):
# 通过输入的XY生成encoder_inputs和带GO头的decoder_inputs
input_data = tflearn.input_data(shape=[None, self.max_seq_len*2, self.word_vec_dim], dtype=tf.float32, name = "XY")
encoder_inputs = tf.slice(input_data, [0, 0, 0], [-1, self.max_seq_len, self.word_vec_dim], name="enc_in")
decoder_inputs_tmp = tf.slice(input_data, [0, self.max_seq_len, 0], [-1, self.max_seq_len-1, self.word_vec_dim], name="dec_in_tmp")
go_inputs = tf.ones_like(decoder_inputs_tmp)
go_inputs = tf.slice(go_inputs, [0, 0, 0], [-1, 1, self.word_vec_dim])
decoder_inputs = tf.concat(1, [go_inputs, decoder_inputs_tmp], name="dec_in")
# 编码器
# 把encoder_inputs交给编码器,返回一个输出(预测序列的第一个值)和一个状态(传给解码器)
(encoder_output_tensor, states) = tflearn.lstm(encoder_inputs, self.word_vec_dim, return_state=True, scope='encoder_lstm')
encoder_output_sequence = tf.pack([encoder_output_tensor], axis=1)
# 解码器
# 预测过程用前一个时间序的输出作为下一个时间序的输入
# 先用编码器的最后一个输出作为第一个输入
if feed_previous:
first_dec_input = go_inputs
else:
first_dec_input = tf.slice(decoder_inputs, [0, 0, 0], [-1, 1, self.word_vec_dim])
decoder_output_tensor = tflearn.lstm(first_dec_input, self.word_vec_dim, initial_state=states, return_seq=False, reuse=False, scope='decoder_lstm')
decoder_output_sequence_single = tf.pack([decoder_output_tensor], axis=1)
decoder_output_sequence_list = [decoder_output_tensor]
# 再用解码器的输出作为下一个时序的输入
for i in range(self.max_seq_len-1):
if feed_previous:
next_dec_input = decoder_output_sequence_single
else:
next_dec_input = tf.slice(decoder_inputs, [0, i+1, 0], [-1, 1, self.word_vec_dim])
decoder_output_tensor = tflearn.lstm(next_dec_input, self.word_vec_dim, return_seq=False, reuse=True, scope='decoder_lstm')
decoder_output_sequence_single = tf.pack([decoder_output_tensor], axis=1)
decoder_output_sequence_list.append(decoder_output_tensor)
decoder_output_sequence = tf.pack(decoder_output_sequence_list, axis=1)
real_output_sequence = tf.concat(1, [encoder_output_sequence, decoder_output_sequence])
net = tflearn.regression(real_output_sequence, optimizer='sgd', learning_rate=0.1, loss='mean_square')
model = tflearn.DNN(net)
return model
开发者ID:Hackerer,项目名称:ChatBotCourse,代码行数:40,代码来源:my_seq2seq_v2.py
示例18: build
def build():
network = input_data([None, Meta.max_string_len])
network = embedding(network, input_dim=Meta.max_one_hot, output_dim=128)
branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
network = merge([branch1, branch2, branch3], mode='concat', axis=1)
network = dropout(network, 0.5)
network = lstm(network, 128)
# network = fully_connected(network, 20)
network = fully_connected(network, 2, activation='softmax')
network = tflearn.regression(network, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy')
model = tflearn.DNN(network, tensorboard_verbose=0)
return model
开发者ID:szsam,项目名称:DSLearn,代码行数:14,代码来源:NN.py
示例19: train
def train(self):
char_idx = None
if(os.path.isfile(self.charIDXFile)):
# load previous character file
char_idx = pickle.load(open(self.charIDXFile, 'rb'))
X, Y, char_idx = textfile_to_semi_redundant_sequences(self.path,seq_maxlen=self.maxLength,redun_step=3)
pickle.dump(char_idx, open(self.charIDXFile, 'wb'))
self.g = tflearn.input_data([None,self.maxLength,len(char_idx)]);
self.g = tflearn.lstm(self.g,512,return_seq=True)
self.g = tflearn.dropout(self.g,0.5)
self.g = tflearn.lstm(self.g,512,return_seq=True)
self.g = tflearn.dropout(self.g,0.5)
self.g = tflearn.lstm(self.g,512)
self.g = tflearn.dropout(self.g,0.5)
self.g = tflearn.fully_connected(self.g,len(char_idx),activation='softmax')
self.g = tflearn.regression(self.g, optimizer='adam', loss='categorical_crossentropy',
learning_rate=0.001)
self.model = tflearn.SequenceGenerator(self.g, dictionary=char_idx, seq_maxlen=self.maxLength, max_checkpoints=0,checkpoint_path='model_trump')
开发者ID:govindtank,项目名称:TrumpBot,代码行数:24,代码来源:trumpGenTF.py
示例20: test_recurrent_layers
def test_recurrent_layers(self):
X = [[1, 3, 5, 7], [2, 4, 8, 10], [1, 5, 9, 11], [2, 6, 8, 0]]
Y = [[0., 1.], [1., 0.], [0., 1.], [1., 0.]]
with tf.Graph().as_default():
g = tflearn.input_data(shape=[None, 4])
g = tflearn.embedding(g, input_dim=12, output_dim=4)
g = tflearn.lstm(g, 6)
g = tflearn.fully_connected(g, 2, activation='softmax')
g = tflearn.regression(g, optimizer='sgd', learning_rate=1.)
m = tflearn.DNN(g)
m.fit(X, Y, n_epoch=300, snapshot_epoch=False)
self.assertGreater(m.predict([[5, 9, 11, 1]])[0][1], 0.9)
开发者ID:EddywardoFTW,项目名称:tflearn,代码行数:15,代码来源:test_layers.py
注:本文中的tflearn.lstm函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论