本文整理汇总了Python中tflearn.dropout函数的典型用法代码示例。如果您正苦于以下问题:Python dropout函数的具体用法?Python dropout怎么用?Python dropout使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dropout函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: use_tflearn
def use_tflearn():
import tflearn
# Data loading and preprocessing
import tflearn.datasets.mnist as mnist
X, Y, testX, testY = mnist.load_data(one_hot=True)
# Building deep neural network
input_layer = tflearn.input_data(shape=[None, 784])
dense1 = tflearn.fully_connected(input_layer, 64, activation='tanh',
regularizer='L2', weight_decay=0.001)
dropout1 = tflearn.dropout(dense1, 0.8)
dense2 = tflearn.fully_connected(dropout1, 64, activation='tanh',
regularizer='L2', weight_decay=0.001)
dropout2 = tflearn.dropout(dense2, 0.8)
softmax = tflearn.fully_connected(dropout2, 10, activation='softmax')
# Regression using SGD with learning rate decay and Top-3 accuracy
sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000)
top_k = tflearn.metrics.Top_k(3)
net = tflearn.regression(softmax, optimizer=sgd, metric=top_k,
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(X, Y, n_epoch=20, validation_set=(testX, testY),
show_metric=True, run_id="dense_model")
开发者ID:Emersonxuelinux,项目名称:2book,代码行数:27,代码来源:tools.py
示例2: run
def run():
# imagine cnn, the third dim is like the 'chnl'
g = tflearn.input_data(shape=[None, maxlen, len(char_idx)])
g = tflearn.lstm(g, 512, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 512)
g = tflearn.dropout(g, 0.5)
g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
g = tflearn.regression(g, optimizer='adam',
loss='categorical_crossentropy',
learning_rate=0.001)
m = tflearn.SequenceGenerator(g, dictionary=char_idx,
seq_maxlen=maxlen,
clip_gradients=5.0,
checkpoint_path='models/model_us_cities')
for i in range(40):
seed = random_sequence_from_textfile(path, maxlen)
m.fit(X, Y, validation_set=0.1, batch_size=128,
n_epoch=1, run_id='us_cities')
print("-- TESTING...")
print("-- Test with temperature of 1.2 --")
print(m.generate(30, temperature=1.2, seq_seed=seed))
print("-- Test with temperature of 1.0 --")
print(m.generate(30, temperature=1.0, seq_seed=seed))
print("-- Test with temperature of 0.5 --")
print(m.generate(30, temperature=0.5, seq_seed=seed))
开发者ID:kengz,项目名称:ai-notebook,代码行数:28,代码来源:gen_cityname_lstm.py
示例3: yn_net
def yn_net():
net = tflearn.input_data(shape=[None, img_rows, img_cols, 1]) #D = 256, 256
net = tflearn.conv_2d(net,nb_filter=8,filter_size=3, activation='relu', name='conv0.1')
net = tflearn.conv_2d(net,nb_filter=8,filter_size=3, activation='relu', name='conv0.2')
net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool0') #D = 128, 128
net = tflearn.dropout(net,0.75,name='dropout0')
net = tflearn.conv_2d(net,nb_filter=16,filter_size=3, activation='relu', name='conv1.1')
net = tflearn.conv_2d(net,nb_filter=16,filter_size=3, activation='relu', name='conv1.2')
net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool1') #D = 64, 64
net = tflearn.dropout(net,0.75,name='dropout0')
net = tflearn.conv_2d(net,nb_filter=32,filter_size=3, activation='relu', name='conv2.1')
net = tflearn.conv_2d(net,nb_filter=32,filter_size=3, activation='relu', name='conv2.2')
net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool2') #D = 32 by 32
net = tflearn.dropout(net,0.75,name='dropout0')
net = tflearn.conv_2d(net,nb_filter=32,filter_size=3, activation='relu', name='conv3.1')
net = tflearn.conv_2d(net,nb_filter=32,filter_size=3, activation='relu', name='conv3.2')
net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool3') #D = 16 by 16
net = tflearn.dropout(net,0.75,name='dropout0')
# net = tflearn.conv_2d(net,nb_filter=64,filter_size=3, activation='relu', name='conv4.1')
# net = tflearn.conv_2d(net,nb_filter=64,filter_size=3, activation='relu', name='conv4.2')
# net = tflearn.max_pool_2d(net, kernel_size = [2,2], name='maxpool4') #D = 8 by 8
# net = tflearn.dropout(net,0.75,name='dropout0')
net = tflearn.fully_connected(net, n_units = 128, activation='relu', name='fc1')
net = tflearn.fully_connected(net, 2, activation='sigmoid')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001)
model = tflearn.DNN(net, tensorboard_verbose=1,tensorboard_dir='/tmp/tflearn_logs/')
return model
开发者ID:bmalthi,项目名称:bnerveseg,代码行数:27,代码来源:train_yn.py
示例4: vgg16
def vgg16(placeholderX=None):
x = tflearn.input_data(shape=[None, 224, 224, 3], name='input',
placeholder=placeholderX)
x = tflearn.conv_2d(x, 64, 3, activation='relu', name='conv1_1')
x = tflearn.conv_2d(x, 64, 3, activation='relu', name='conv1_2')
x = tflearn.max_pool_2d(x, 2, strides=2, name='pool1')
x = tflearn.conv_2d(x, 128, 3, activation='relu', name='conv2_1')
x = tflearn.conv_2d(x, 128, 3, activation='relu', name='conv2_2')
x = tflearn.max_pool_2d(x, 2, strides=2, name='pool2')
x = tflearn.conv_2d(x, 256, 3, activation='relu', name='conv3_1')
x = tflearn.conv_2d(x, 256, 3, activation='relu', name='conv3_2')
x = tflearn.conv_2d(x, 256, 3, activation='relu', name='conv3_3')
x = tflearn.max_pool_2d(x, 2, strides=2, name='pool3')
x = tflearn.conv_2d(x, 512, 3, activation='relu', name='conv4_1')
x = tflearn.conv_2d(x, 512, 3, activation='relu', name='conv4_2')
x = tflearn.conv_2d(x, 512, 3, activation='relu', name='conv4_3')
x = tflearn.max_pool_2d(x, 2, strides=2, name='pool4')
x = tflearn.conv_2d(x, 512, 3, activation='relu', name='conv5_1')
x = tflearn.conv_2d(x, 512, 3, activation='relu', name='conv5_2')
x = tflearn.conv_2d(x, 512, 3, activation='relu', name='conv5_3')
x = tflearn.max_pool_2d(x, 2, strides=2, name='pool5')
x = tflearn.conv_2d(x, 4096, 7, activation='relu', name='fc6')
x = tflearn.dropout(x, 0.5)
x = tflearn.conv_2d(x, 4096, 1, activation='relu', name='fc7')
x = tflearn.dropout(x, 0.5)
return x
开发者ID:aymericdamien,项目名称:models,代码行数:35,代码来源:vgg16.py
示例5: vgg16
def vgg16(input, num_class):
x = tflearn.conv_2d(input, 64, 3, activation='relu', scope='conv1_1')
x = tflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_2')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool1')
x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_1')
x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_2')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool2')
x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_1')
x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_2')
x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_3')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool3')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_1')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_2')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_3')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool4')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_1')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_2')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_3')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool5')
x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc6')
x = tflearn.dropout(x, 0.5, name='dropout1')
x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc7')
x = tflearn.dropout(x, 0.5, name='dropout2')
x = tflearn.fully_connected(x, num_class, activation='softmax', scope='fc8',
restore=False)
return x
开发者ID:EddywardoFTW,项目名称:tflearn,代码行数:35,代码来源:vgg_network_finetuning.py
示例6: make_core_network
def make_core_network(network):
dense1 = tflearn.fully_connected(network, 64, activation='tanh',
regularizer='L2', weight_decay=0.001, name="dense1")
dropout1 = tflearn.dropout(dense1, 0.8)
dense2 = tflearn.fully_connected(dropout1, 64, activation='tanh',
regularizer='L2', weight_decay=0.001, name="dense2")
dropout2 = tflearn.dropout(dense2, 0.8)
softmax = tflearn.fully_connected(dropout2, 10, activation='softmax', name="softmax")
return softmax
开发者ID:EddywardoFTW,项目名称:tflearn,代码行数:9,代码来源:weights_loading_scope.py
示例7: get_model_action
def get_model_action():
# Network building
net = tflearn.input_data(shape=[None, 10, 128], name='net2_layer1')
net = tflearn.lstm(net, n_units=256, return_seq=True, name='net2_layer2')
net = tflearn.dropout(net, 0.6, name='net2_layer3')
net = tflearn.lstm(net, n_units=256, return_seq=False, name='net2_layer4')
net = tflearn.dropout(net, 0.6, name='net2_layer5')
net = tflearn.fully_connected(net, 5, activation='softmax', name='net2_layer6')
net = tflearn.regression(net, optimizer='sgd', loss='categorical_crossentropy', learning_rate=0.001,
name='net2_layer7')
return tflearn.DNN(net, clip_gradients=5.0, tensorboard_verbose=0)
开发者ID:SamsadSajid,项目名称:DeepGamingAI_FIFA,代码行数:11,代码来源:play_fifa.py
示例8: shakespeare
def shakespeare():
path = "shakespeare_input.txt"
#path = "shakespeare_input-100.txt"
char_idx_file = 'char_idx.pickle'
if not os.path.isfile(path):
urllib.request.urlretrieve(
"https://raw.githubusercontent.com/tflearn/tflearn.github.io/master/resources/shakespeare_input.txt", path)
maxlen = 25
char_idx = None
if os.path.isfile(char_idx_file):
print('Loading previous char_idx')
char_idx = pickle.load(open(char_idx_file, 'rb'))
X, Y, char_idx = \
textfile_to_semi_redundant_sequences(path, seq_maxlen=maxlen, redun_step=3,
pre_defined_char_idx=char_idx)
pickle.dump(char_idx, open(char_idx_file, 'wb'))
g = tflearn.input_data([None, maxlen, len(char_idx)])
g = tflearn.lstm(g, 512, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 512, return_seq=True)
g = tflearn.dropout(g, 0.5)
g = tflearn.lstm(g, 512)
g = tflearn.dropout(g, 0.5)
g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
learning_rate=0.001)
m = tflearn.SequenceGenerator(g, dictionary=char_idx,
seq_maxlen=maxlen,
clip_gradients=5.0,
checkpoint_path='model_shakespeare')
for i in range(50):
seed = random_sequence_from_textfile(path, maxlen)
m.fit(X, Y, validation_set=0.1, batch_size=128,
n_epoch=1, run_id='shakespeare')
print("-- TESTING...")
print("-- Test with temperature of 1.0 --")
print(m.generate(600, temperature=1.0, seq_seed=seed))
#print(m.generate(10, temperature=1.0, seq_seed=seed))
print("-- Test with temperature of 0.5 --")
print(m.generate(600, temperature=0.5, seq_seed=seed))
开发者ID:Emersonxuelinux,项目名称:2book,代码行数:50,代码来源:rnn.py
示例9: deep_model
def deep_model(self, wide_inputs, n_inputs, n_nodes=[100, 50], use_dropout=False):
'''
Model - deep, i.e. two-layer fully connected network model
'''
cc_input_var = {}
cc_embed_var = {}
flat_vars = []
if self.verbose:
print ("--> deep model: %s categories, %d continuous" % (len(self.categorical_columns), n_inputs))
for cc, cc_size in self.categorical_columns.items():
cc_input_var[cc] = tflearn.input_data(shape=[None, 1], name="%s_in" % cc, dtype=tf.int32)
# embedding layers only work on CPU! No GPU implementation in tensorflow, yet!
cc_embed_var[cc] = tflearn.layers.embedding_ops.embedding(cc_input_var[cc], cc_size, 8, name="deep_%s_embed" % cc)
if self.verbose:
print (" %s_embed = %s" % (cc, cc_embed_var[cc]))
flat_vars.append(tf.squeeze(cc_embed_var[cc], squeeze_dims=[1], name="%s_squeeze" % cc))
network = tf.concat(1, [wide_inputs] + flat_vars, name="deep_concat")
for k in range(len(n_nodes)):
network = tflearn.fully_connected(network, n_nodes[k], activation="relu", name="deep_fc%d" % (k+1))
if use_dropout:
network = tflearn.dropout(network, 0.5, name="deep_dropout%d" % (k+1))
if self.verbose:
print ("Deep model network before output %s" % network)
network = tflearn.fully_connected(network, 1, activation="linear", name="deep_fc_output", bias=False)
network = tf.reshape(network, [-1, 1]) # so that accuracy is binary_accuracy
if self.verbose:
print ("Deep model network %s" % network)
return network
开发者ID:ALISCIFP,项目名称:tflearn,代码行数:29,代码来源:recommender_wide_and_deep.py
示例10: test_sequencegenerator
def test_sequencegenerator(self):
with tf.Graph().as_default():
text = "123456789101234567891012345678910123456789101234567891012345678910"
maxlen = 5
X, Y, char_idx = \
tflearn.data_utils.string_to_semi_redundant_sequences(text, seq_maxlen=maxlen, redun_step=3)
g = tflearn.input_data(shape=[None, maxlen, len(char_idx)])
g = tflearn.lstm(g, 32)
g = tflearn.dropout(g, 0.5)
g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
learning_rate=0.1)
m = tflearn.SequenceGenerator(g, dictionary=char_idx,
seq_maxlen=maxlen,
clip_gradients=5.0)
m.fit(X, Y, validation_set=0.1, n_epoch=100, snapshot_epoch=False)
res = m.generate(10, temperature=1., seq_seed="12345")
self.assertEqual(res, "123456789101234", "SequenceGenerator test failed! Generated sequence: " + res + " expected '123456789101234'")
# Testing save method
m.save("test_seqgen.tflearn")
self.assertTrue(os.path.exists("test_seqgen.tflearn"))
# Testing load method
m.load("test_seqgen.tflearn")
res = m.generate(10, temperature=1., seq_seed="12345")
self.assertEqual(res, "123456789101234", "SequenceGenerator test failed after loading model! Generated sequence: " + res + " expected '123456789101234'")
开发者ID:braddengross,项目名称:tflearn,代码行数:31,代码来源:test_models.py
示例11: build_cnn_network
def build_cnn_network(self, network):
""" Build CNN network.
Args:
network: base network.
Returns:
model: CNN model.
"""
print('Building CNN network.')
# Convolutional network building
network = tflearn.conv_2d(network, 32,
self.IMAGE_CHANNEL_NUM,
activation='relu')
network = tflearn.max_pool_2d(network, 2)
network = tflearn.conv_2d(network, 64,
self.IMAGE_CHANNEL_NUM,
activation='relu')
network = tflearn.conv_2d(network, 64,
self.IMAGE_CHANNEL_NUM,
activation='relu')
network = tflearn.max_pool_2d(network, 2)
network = tflearn.fully_connected(
network, 32 * 32, activation='relu')
network = tflearn.dropout(network, 0.5)
# Two category. positive or negative.
network = tflearn.fully_connected(network, 2,
activation='softmax')
network = tflearn.regression(network, optimizer='adam',
loss='categorical_crossentropy',
learning_rate=0.001)
print("CNN network built.")
return network
开发者ID:NuitNoir,项目名称:MachineLearning,代码行数:34,代码来源:dnn_network.py
示例12: generator_xss
def generator_xss():
global char_idx
global xss_data_file
global maxlen
if os.path.isfile(char_idx_file):
print('Loading previous xxs_char_idx')
char_idx = pickle.load(open(char_idx_file, 'rb'))
X, Y, char_idx = \
textfile_to_semi_redundant_sequences(xss_data_file, seq_maxlen=maxlen, redun_step=3,
pre_defined_char_idx=char_idx)
#pickle.dump(char_idx, open(char_idx_file, 'wb'))
g = tflearn.input_data([None, maxlen, len(char_idx)])
g = tflearn.lstm(g, 32, return_seq=True)
g = tflearn.dropout(g, 0.1)
g = tflearn.lstm(g, 32, return_seq=True)
g = tflearn.dropout(g, 0.1)
g = tflearn.lstm(g, 32)
g = tflearn.dropout(g, 0.1)
g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
learning_rate=0.001)
m = tflearn.SequenceGenerator(g, dictionary=char_idx,
seq_maxlen=maxlen,
clip_gradients=5.0,
checkpoint_path='chkpoint/model_scanner_poc')
print "random_sequence_from_textfile"
#seed = random_sequence_from_textfile(xss_data_file, maxlen)
seed='"/><script>'
m.fit(X, Y, validation_set=0.1, batch_size=128,
n_epoch=2, run_id='scanner-poc')
print("-- TESTING...")
print("-- Test with temperature of 0.1 --")
print(m.generate(32, temperature=0.1, seq_seed=seed))
print("-- Test with temperature of 0.5 --")
print(m.generate(32, temperature=0.5, seq_seed=seed))
print("-- Test with temperature of 1.0 --")
print(m.generate(32, temperature=1.0, seq_seed=seed))
开发者ID:Emersonxuelinux,项目名称:2book,代码行数:47,代码来源:scanner-poc.py
示例13: simple_learn
def simple_learn(self):
tflearn.init_graph()
net=tflearn.input_data(shape=[None,64,64,3])
net=tflearn.fully_connected(net,64)
net=tflearn.dropout(net,.5)
net=tflearn.fully_connected(net,10,activation='softmax')
net=tflearn.regression(net,optimizer='adam',loss='softmax_categorical_crossentropy')
model = tflearn.DNN(net)
model.fit(self.trainset,self.trainlabels)
开发者ID:Qrkchrm,项目名称:StateFarmShared,代码行数:9,代码来源:dataviewing.py
示例14: generate_net
def generate_net(embedding):
net = tflearn.input_data([None, 200])
net = tflearn.embedding(net, input_dim=300000, output_dim=128)
net = tflearn.lstm(net, 128)
net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam',
loss='categorical_crossentropy')
return net
开发者ID:kashizui,项目名称:rnn-sentiment-analysis,代码行数:9,代码来源:word2vec.py
示例15: build
def build(embedding_size=(400000, 50), train_embedding=False, hidden_dims=128,
learning_rate=0.001):
net = tflearn.input_data([None, 200])
net = tflearn.embedding(net, input_dim=embedding_size[0],
output_dim=embedding_size[1],
trainable=train_embedding, name='EmbeddingLayer')
net = tflearn.lstm(net, hidden_dims, return_seq=True)
net = tflearn.dropout(net, 0.5)
net = tflearn.lstm(net, hidden_dims, return_seq=True)
net = tflearn.dropout(net, 0.5)
net = tflearn.lstm(net, hidden_dims, return_seq=True)
net = tflearn.dropout(net, 0.5)
net = tflearn.lstm(net, hidden_dims)
net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate,
loss='categorical_crossentropy')
return net
开发者ID:kashizui,项目名称:rnn-sentiment-analysis,代码行数:18,代码来源:stacked_lstm.py
示例16: deep_net_tflearn
def deep_net_tflearn(X_train,X_test,Y_train,Y_test, num_epoch, first_layer, second_layer, third_layer,fourth_layer):
#Implementation with TFLEARN
tf.reset_default_graph()
tflearn.init_graph(num_cores=8, gpu_memory_fraction=0.8)
tnorm = tflearn.initializations.uniform(minval=-1.0, maxval=1.0)
# Building DNN
nn = tflearn.input_data(shape=[None, len(X_train[0])])
Input = nn
nn = tflearn.fully_connected(nn, first_layer, activation='elu', regularizer='L2', weights_init=tnorm, name = "layer_1")
nn = tflearn.dropout(nn, 0.5)
nn = tflearn.fully_connected(nn, second_layer, activation='elu', regularizer='L2', weights_init=tnorm, name = "layer_2")
nn = tflearn.dropout(nn, 0.5)
nn = tflearn.fully_connected(nn, third_layer, activation='elu', regularizer='L2', weights_init=tnorm, name = "layer_3")
nn = tflearn.dropout(nn, 0.5)
nn = tflearn.fully_connected(nn, fourth_layer, activation='elu', regularizer='L2', weights_init=tnorm, name = "layer_4")
nn = tflearn.dropout(nn, 0.5)
Hidden_state = nn
nn = tflearn.fully_connected(nn, len(Y_train[0]), activation='elu', weights_init=tnorm, name = "layer_5")
Output = nn
#custom_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
# out_layer, tf_train_labels) +
# 0.01*tf.nn.l2_loss(hidden_weights) +
# 0.01*tf.nn.l2_loss(hidden_biases) +
# 0.01*tf.nn.l2_loss(out_weights) +
# 0.01*tf.nn.l2_loss(out_biases))
# Regression, with mean square error
net = tflearn.regression(nn, optimizer='SGD' , learning_rate=0.001, loss ='categorical_crossentropy', metric=None)
# Training the auto encoder
model = tflearn.DNN(net, tensorboard_verbose=3)
model.fit( X_train, Y_train, n_epoch = num_epoch, validation_set=0.1, run_id="bitsight_nn_tflearn", batch_size=128)
pred = model.predict(X_test)
total = 0
correct = 0
for i in range(len(pred)):
total += 1
if np.argmax(pred[i]) == np.argmax(Y_test[i]):
correct += 1
return total*1., correct*1.
开发者ID:sabersf,项目名称:Botnets,代码行数:43,代码来源:botnet_tf.py
示例17: run
def run():
net = tflearn.input_data(shape=[None, 224, 224, 3])
net = tflearn.conv_2d(net, 64, 3, activation='relu')
net = tflearn.conv_2d(net, 64, 3, activation='relu')
net = tflearn.max_pool_2d(net, 2)
net = tflearn.conv_2d(net, 128, 3, activation='relu')
net = tflearn.conv_2d(net, 128, 3, activation='relu')
net = tflearn.max_pool_2d(net, 2)
net = tflearn.conv_2d(net, 256, 3, activation='relu')
net = tflearn.conv_2d(net, 256, 3, activation='relu')
net = tflearn.conv_2d(net, 256, 3, activation='relu')
net = tflearn.max_pool_2d(net, 2)
net = tflearn.conv_2d(net, 512, 3, activation='relu')
net = tflearn.conv_2d(net, 512, 3, activation='relu')
net = tflearn.conv_2d(net, 512, 3, activation='relu')
net = tflearn.max_pool_2d(net, 2)
net = tflearn.conv_2d(net, 512, 3, activation='relu')
net = tflearn.conv_2d(net, 512, 3, activation='relu')
net = tflearn.conv_2d(net, 512, 3, activation='relu')
net = tflearn.max_pool_2d(net, 2)
net = tflearn.fully_connected(net, 4096, activation='relu')
net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, 4096, activation='relu')
net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, 17, activation='softmax')
net = tflearn.regression(net, optimizer='rmsprop',
loss='categorical_crossentropy',
learning_rate=0.001)
m = tflearn.DNN(net, checkpoint_path='models/vgg_net',
max_checkpoints=1, tensorboard_verbose=3)
m.fit(X, Y, n_epoch=500, shuffle=True,
show_metric=True, batch_size=32, snapshot_step=500,
snapshot_epoch=False, run_id='vgg_net')
m.save('models/vgg_net.tfl')
开发者ID:kengz,项目名称:ai-notebook,代码行数:42,代码来源:vgg_net.py
示例18: vgg16
def vgg16(placeholderX=None, softmax_size=1000, restore_softmax=True,
data_preprocessing=None, data_augmentation=None):
x = tflearn.input_data(shape=[None, 224, 224, 3], name='input',
placeholder=placeholderX,
data_preprocessing=data_preprocessing,
data_augmentation=data_augmentation)
x = tflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_1')
x = tflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_2')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool1')
x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_1')
x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_2')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool2')
x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_1')
x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_2')
x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_3')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool3')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_1')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_2')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_3')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool4')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_1')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_2')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_3')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool5')
x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc6')
x = tflearn.dropout(x, 0.5, name='dropout1')
x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc7')
x = tflearn.dropout(x, 0.5, name='dropout2')
x = tflearn.fully_connected(x, softmax_size, activation='softmax',
scope='fc8', restore=restore_softmax)
return x
开发者ID:dthiagarajan,项目名称:grozi_tf,代码行数:41,代码来源:vgg16.py
示例19: run
def run():
net = tflearn.input_data([None, 100])
net = tflearn.embedding(net, input_dim=20000, output_dim=128)
net = tflearn.bidirectional_rnn(
net, tflearn.BasicLSTMCell(128), tflearn.BasicLSTMCell(128))
net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(
net, optimizer='adam', loss='categorical_crossentropy')
m = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=2)
m.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=64)
m.save('models/bidirectional_rnn.tfl')
开发者ID:kengz,项目名称:ai-notebook,代码行数:13,代码来源:bidirectional_rnn.py
示例20: vgg16
def vgg16(placeholderX=None):
x = tflearn.input_data(shape=[None, 224, 224, 3], name='input',
placeholder=placeholderX)
x = tflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_1')
x = tflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_2')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool1')
x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_1')
x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_2')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool2')
x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_1')
x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_2')
x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_3')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool3')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_1')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_2')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_3')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool4')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_1')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_2')
x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_3')
x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool5')
x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc6')
x = tflearn.dropout(x, 0.5, name='dropout1')
x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc7')
x = tflearn.dropout(x, 0.5, name='dropout2')
x = tflearn.fully_connected(x, 1000, activation='softmax', scope='fc8')
return x
开发者ID:tflearn,项目名称:models,代码行数:37,代码来源:vgg16.py
注:本文中的tflearn.dropout函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论