本文整理汇总了Python中tflearn.layers.core.dropout函数的典型用法代码示例。如果您正苦于以下问题:Python dropout函数的具体用法?Python dropout怎么用?Python dropout使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dropout函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: cnn
def cnn():
X, Y, testX, testY = mnist.load_data(one_hot=True)
X = X.reshape([-1, 28, 28, 1])
testX = testX.reshape([-1, 28, 28, 1])
# Building convolutional network
network = input_data(shape=[None, 28, 28, 1], name='input')
network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = fully_connected(network, 128, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 10, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.01,
loss='categorical_crossentropy', name='target')
# Training
model = tflearn.DNN(network, tensorboard_verbose=0)
model.fit({'input': X}, {'target': Y}, n_epoch=20,
validation_set=({'input': testX}, {'target': testY}),
snapshot_step=100, show_metric=True, run_id='cnn_demo')
开发者ID:Emersonxuelinux,项目名称:2book,代码行数:26,代码来源:cnn.py
示例2: do_cnn_doc2vec_2d
def do_cnn_doc2vec_2d(trainX, testX, trainY, testY):
print "CNN and doc2vec 2d"
trainX = trainX.reshape([-1, max_features, max_document_length, 1])
testX = testX.reshape([-1, max_features, max_document_length, 1])
# Building convolutional network
network = input_data(shape=[None, max_features, max_document_length, 1], name='input')
network = conv_2d(network, 16, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = fully_connected(network, 128, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 10, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.01,
loss='categorical_crossentropy', name='target')
# Training
model = tflearn.DNN(network, tensorboard_verbose=0)
model.fit({'input': trainX}, {'target': trainY}, n_epoch=20,
validation_set=({'input': testX}, {'target': testY}),
snapshot_step=100, show_metric=True, run_id='review')
开发者ID:Emersonxuelinux,项目名称:2book,代码行数:28,代码来源:review.py
示例3: neural_network_model
def neural_network_model(input_size):
"""
Function is to build NN based on the input size
:param input_size: feature size of each observation
:return: tensorflow model
"""
network = input_data(shape=[None, input_size], name='input')
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 512, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 2, activation='softmax')
network = regression(network, learning_rate=LR, name='targets')
model = tflearn.DNN(network, tensorboard_dir='logs/ann/ann_0')
return model
开发者ID:Eudie,项目名称:Online-Practice,代码行数:28,代码来源:ann_playing.py
示例4: alexnet
def alexnet():
X, Y = oxflower17.load_data(one_hot=True, resize_pics=(227, 227))
# Building 'AlexNet'
network = input_data(shape=[None, 227, 227, 3])
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 17, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=0.001)
# Training
model = tflearn.DNN(network, checkpoint_path='model_alexnet',
max_checkpoints=1, tensorboard_verbose=2)
model.fit(X, Y, n_epoch=1000, validation_set=0.1, shuffle=True,
show_metric=True, batch_size=64, snapshot_step=200,
snapshot_epoch=False, run_id='alexnet')
开发者ID:Emersonxuelinux,项目名称:2book,代码行数:31,代码来源:cnn.py
示例5: neural_network_model
def neural_network_model(input_size):
network = input_data(shape=[None, input_size, 1], name='input')
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 512, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 2, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=LR,
loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(network, tensorboard_dir='log')
return model
开发者ID:davidgjordan,项目名称:gym_tensorflow,代码行数:25,代码来源:tensor2.py
示例6: _model1
def _model1():
global yTest, img_aug
tf.reset_default_graph()
img_prep = ImagePreprocessing()
img_prep.add_featurewise_zero_center()
img_prep.add_featurewise_stdnorm()
network = input_data(shape=[None, inputSize, inputSize, dim],
name='input',
data_preprocessing=img_prep,
data_augmentation=img_aug)
network = conv_2d(network, 32, 3, strides = 4, activation='relu')
network = max_pool_2d(network, 2, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 64, 3, strides = 2, activation='relu')
network = max_pool_2d(network, 2, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 128, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, len(Y[0]), activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy', name='target')
model = tflearn.DNN(network, tensorboard_verbose=3)
model.fit(X, Y, n_epoch=epochNum, validation_set=(xTest, yTest),
snapshot_step=500, show_metric=True, batch_size=batchNum, shuffle=True, run_id=_id + 'artClassification')
if modelStore: model.save(_id + '-model.tflearn')
开发者ID:richardbored,项目名称:customData,代码行数:29,代码来源:train.py
示例7: train_nmf_network
def train_nmf_network(mfcc_array, sdr_array, n_epochs, take):
"""
:param mfcc_array:
:param sdr_array:
:param n_epochs:
:param take:
:return:
"""
with tf.Graph().as_default():
network = input_data(shape=[None, 13, 100, 1])
network = conv_2d(network, 32, [5, 5], activation="relu", regularizer="L2")
network = max_pool_2d(network, 2)
network = conv_2d(network, 64, [5, 5], activation="relu", regularizer="L2")
network = max_pool_2d(network, 2)
network = fully_connected(network, 128, activation="relu")
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation="relu")
network = dropout(network, 0.8)
network = fully_connected(network, 1, activation="linear")
regress = tflearn.regression(network, optimizer="rmsprop", loss="mean_square", learning_rate=0.001)
# Training
model = tflearn.DNN(regress) # , session=sess)
model.fit(
mfcc_array,
sdr_array,
n_epoch=n_epochs,
snapshot_step=1000,
show_metric=True,
run_id="repet_choice_{0}_epochs_take_{1}".format(n_epochs, take),
)
return model
开发者ID:ethman,项目名称:prediction,代码行数:34,代码来源:repet_nmf_choice.py
示例8: train_repet_network
def train_repet_network(beat_spectrum_array, sdr_array, n_epochs, take):
"""
:param beat_spectrum_array:
:param sdr_array:
:param n_epochs:
:param take:
:return:
"""
beat_spec_len = 432
with tf.Graph().as_default():
input_layer = input_data(shape=[None, beat_spec_len, 1])
conv1 = conv_1d(input_layer, 32, 4, activation="relu", regularizer="L2")
max_pool1 = max_pool_1d(conv1, 2)
conv2 = conv_1d(max_pool1, 64, 80, activation="relu", regularizer="L2")
max_pool2 = max_pool_1d(conv2, 2)
fully1 = fully_connected(max_pool2, 128, activation="relu")
dropout1 = dropout(fully1, 0.8)
fully2 = fully_connected(dropout1, 256, activation="relu")
dropout2 = dropout(fully2, 0.8)
linear = fully_connected(dropout2, 1, activation="linear")
regress = tflearn.regression(linear, optimizer="rmsprop", loss="mean_square", learning_rate=0.001)
# Training
model = tflearn.DNN(regress) # , session=sess)
model.fit(
beat_spectrum_array,
sdr_array,
n_epoch=n_epochs,
snapshot_step=1000,
show_metric=True,
run_id="repet_choice_{0}_epochs_take_{1}".format(n_epochs, take),
)
return model
开发者ID:ethman,项目名称:prediction,代码行数:35,代码来源:repet_nmf_choice.py
示例9: alexnet
def alexnet(width, height, lr, output=3):
network = input_data(shape=[None, width, height, 1], name='input')
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, output, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=lr, name='targets')
model = tflearn.DNN(network, checkpoint_path='model_alexnet',
max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')
return model
开发者ID:gcm0621,项目名称:pygta5,代码行数:26,代码来源:models.py
示例10: build_network
def build_network(self):
# Building 'AlexNet'
# https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
# https://github.com/DT42/squeezenet_demo
# https://github.com/yhenon/pysqueezenet/blob/master/squeezenet.py
print('[+] Building CNN')
self.network = input_data(shape = [None, SIZE_FACE, SIZE_FACE, 1])
self.network = conv_2d(self.network, 96, 11, strides = 4, activation = 'relu')
self.network = max_pool_2d(self.network, 3, strides = 2)
self.network = local_response_normalization(self.network)
self.network = conv_2d(self.network, 256, 5, activation = 'relu')
self.network = max_pool_2d(self.network, 3, strides = 2)
self.network = local_response_normalization(self.network)
self.network = conv_2d(self.network, 256, 3, activation = 'relu')
self.network = max_pool_2d(self.network, 3, strides = 2)
self.network = local_response_normalization(self.network)
self.network = fully_connected(self.network, 1024, activation = 'tanh')
self.network = dropout(self.network, 0.5)
self.network = fully_connected(self.network, 1024, activation = 'tanh')
self.network = dropout(self.network, 0.5)
self.network = fully_connected(self.network, len(EMOTIONS), activation = 'softmax')
self.network = regression(self.network,
optimizer = 'momentum',
loss = 'categorical_crossentropy')
self.model = tflearn.DNN(
self.network,
checkpoint_path = SAVE_DIRECTORY + '/alexnet_mood_recognition',
max_checkpoints = 1,
tensorboard_verbose = 2
)
self.load_model()
开发者ID:somaticapi,项目名称:mood-recognition-neural-networks,代码行数:31,代码来源:mood_recognition.py
示例11: main
def main():
pickle_folder = '../pickles_rolloff'
pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))]
pickle_folders_to_load = sorted(pickle_folders_to_load)
# pickle parameters
fg_or_bg = 'background'
sdr_type = 'sdr'
feature = 'sim_mat'
beat_spec_len = 432
# training params
n_classes = 16
training_percent = 0.85
testing_percent = 0.15
validation_percent = 0.00
# set up training, testing, & validation partitions
print('Loading sim_mat and sdrs')
sim_mat_array, sdr_array = get_generated_data(feature, fg_or_bg, sdr_type)
print('sim_mat and sdrs loaded')
print('splitting and grooming data')
train, test, validate = split_into_sets(len(pickle_folders_to_load), training_percent,
testing_percent, validation_percent)
trainX = np.expand_dims([sim_mat_array[i] for i in train], -1)
trainY = np.expand_dims([sdr_array[i] for i in train], -1)
testX = np.expand_dims([sim_mat_array[i] for i in test], -1)
testY = np.array([sdr_array[i] for i in test])
print('setting up CNN')
# Building convolutional network
network = input_data(shape=[None, beat_spec_len, beat_spec_len, 1])
network = conv_2d(network, 32, 10, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = conv_2d(network, 64, 20, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = fully_connected(network, 128, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 1, activation='linear')
regress = tflearn.regression(network, optimizer='sgd', loss='mean_square', learning_rate=0.01)
print('running CNN')
# Training
model = tflearn.DNN(regress, tensorboard_verbose=1)
model.fit(trainX, trainY, n_epoch=10,
snapshot_step=1000, show_metric=True, run_id='{} classes'.format(n_classes - 1))
predicted = np.array(model.predict(testX))[:,0]
print('plotting')
plot(testY, predicted)
开发者ID:ethman,项目名称:prediction,代码行数:56,代码来源:predict_repet_original_with_sim_matrix2.py
示例12: build_network
def build_network():
network = tflearn.input_data(shape=[None, 2])
network = tflearn.fully_connected(network, 64, activation='relu')
network = dropout(network, 0.9)
network = tflearn.fully_connected(network, 128, activation='relu')
network = dropout(network, 0.9)
network = tflearn.fully_connected(network, 2, activation='softmax')
network = tflearn.regression(network, optimizer='sgd', learning_rate=0.1,
loss='categorical_crossentropy')
return network
开发者ID:belkale,项目名称:deeplearning_playground,代码行数:10,代码来源:complicated1_dl.py
示例13: main
def main():
pickle_folder = '../pickles_rolloff'
pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))]
pickle_folders_to_load = sorted(pickle_folders_to_load)
# pickle parameters
fg_or_bg = 'background'
sdr_type = 'sdr'
feature = 'beat_spec'
beat_spec_len = 432
# training params
n_classes = 16
training_percent = 0.85
testing_percent = 0.15
validation_percent = 0.00
# set up training, testing, & validation partitions
beat_spec_array, sdr_array = load_beat_spec_and_sdrs(pickle_folders_to_load, pickle_folder,
feature, fg_or_bg, sdr_type)
train, test, validate = split_into_sets(len(pickle_folders_to_load), training_percent,
testing_percent, validation_percent)
trainX = np.expand_dims([beat_spec_array[i] for i in train], -1)
trainY = np.expand_dims([sdr_array[i] for i in train], -1)
testX = np.expand_dims([beat_spec_array[i] for i in test], -1)
testY = np.array([sdr_array[i] for i in test])
# Building convolutional network
network = input_data(shape=[None, beat_spec_len, 1])
network = conv_1d(network, 32, 4, activation='relu', regularizer="L2")
network = max_pool_1d(network, 2)
network = conv_1d(network, 64, 80, activation='relu', regularizer="L2")
network = max_pool_1d(network, 2)
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu') # look for non-tanh things???
network = dropout(network, 0.8)
network = fully_connected(network, 1, activation='linear')
regress = tflearn.regression(network, optimizer='sgd', loss='mean_square', learning_rate=0.01)
# Training
model = tflearn.DNN(regress, tensorboard_verbose=1)
model.fit(trainX, trainY, n_epoch=100,
snapshot_step=1000, show_metric=True, run_id='relus_100_3')
predicted = np.array(model.predict(testX))[:,0]
# pprint.pprint()
print("Test MSE: ", np.square(testY - predicted).mean())
plot(testY, predicted)
开发者ID:ethman,项目名称:prediction,代码行数:52,代码来源:beat_test3.py
示例14: build_model_1_conv
def build_model_1_conv(learning_rate, input_shape, nb_classes, base_path , drop):
network = input_data(shape=input_shape, name='input')
network = conv_2d(network, 64, [4, 16], activation='relu')
network = fully_connected(network, 128, activation='relu')
network = dropout(network, drop)
network = fully_connected(network, 64, activation='relu')
network = dropout(network, drop)
network = fully_connected(network, nb_classes, activation='softmax')
network = regression(network, optimizer='sgd', learning_rate=learning_rate,
loss='categorical_crossentropy', name='target')
model = tflearn.DNN(network, tensorboard_verbose=3, tensorboard_dir=base_path + "/tflearn_logs/",
checkpoint_path=base_path + "/checkpoints/step")
return model
开发者ID:ErwanGalline,项目名称:test,代码行数:13,代码来源:models.py
示例15: main
def main():
pickle_folder = 'pickles_combined'
# pickle parameters
fg_or_bg = 'background'
sdr_type = 'sdr'
feature = 'beat_spec'
# training params
training_percent = 0.85
testing_percent = 0.15
validation_percent = 0.00
beat_spec_max = 355
# set up training, testing, & validation partitions
beat_spec_array, sdr_array = unpickle_beat_spec_and_sdrs(pickle_folder, beat_spec_max)
train, test, validate = split_into_sets(len(beat_spec_array), training_percent,
testing_percent, validation_percent)
trainX = np.expand_dims([beat_spec_array[i] for i in train], -1)
trainY = np.expand_dims([sdr_array[i] for i in train], -1)
testX = np.expand_dims([beat_spec_array[i] for i in test], -1)
testY = np.array([sdr_array[i] for i in test])
# Building convolutional network
network = input_data(shape=[None, beat_spec_max, 1])
network = conv_1d(network, 32, 4, activation='relu', regularizer="L2")
network = max_pool_1d(network, 2)
network = conv_1d(network, 64, 80, activation='relu', regularizer="L2")
network = max_pool_1d(network, 2)
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu') # look for non-tanh things???
network = dropout(network, 0.8)
network = fully_connected(network, 1, activation='linear')
regress = tflearn.regression(network, optimizer='sgd', loss='mean_square', learning_rate=0.01)
start = time.time()
# Training
model = tflearn.DNN(regress, tensorboard_verbose=1)
model.fit(trainX, trainY, n_epoch=2000,
snapshot_step=1000, show_metric=True, run_id='mir1k_2000_truncate')
elapsed = (time.time() - start)
predicted = np.array(model.predict(testX))[:,0]
print("Test MSE: ", np.square(testY - predicted).mean())
print(elapsed, "seconds")
plot(testY, predicted)
开发者ID:ethman,项目名称:prediction,代码行数:51,代码来源:repet_prediction_tensforflow.py
示例16: make_core_network
def make_core_network(network):
network = tflearn.reshape(network, [-1, 28, 28, 1], name="reshape")
network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = fully_connected(network, 128, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 10, activation='softmax')
return network
开发者ID:EddywardoFTW,项目名称:tflearn,代码行数:14,代码来源:weights_loading_scope.py
示例17: _model3
def _model3():
global yTest, img_aug
tf.reset_default_graph()
img_prep = ImagePreprocessing()
img_prep.add_featurewise_zero_center()
img_prep.add_featurewise_stdnorm()
network = input_data(shape=[None, inputSize, inputSize, dim],
data_preprocessing=img_prep,
data_augmentation=img_aug)
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, len(yTest[0]), activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=0.001)
print('Model has been made!!!?')
# Training
model = tflearn.DNN(network, checkpoint_path='model_densenet_cifar10',
max_checkpoints=10, tensorboard_verbose=0,
clip_gradients=0.)
model.load(_path)
pred = model.predict(xTest)
df = pd.DataFrame(pred)
df.to_csv(_path + ".csv")
newList = pred.copy()
newList = convert2(newList)
if _CSV: makeCSV(newList)
pred = convert2(pred)
pred = convert3(pred)
yTest = convert3(yTest)
print(metrics.confusion_matrix(yTest, pred))
print(metrics.classification_report(yTest, pred))
print('Accuracy', accuracy_score(yTest, pred))
print()
if _wrFile: writeTest(pred)
开发者ID:richardbored,项目名称:customData,代码行数:50,代码来源:evaluate.py
示例18: main
def main():
"""
:return:
"""
pickle_folder = '../NMF/mfcc_pickles'
pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))]
fg_or_bg = 'background'
sdr_type = 'sdr'
feature = 'mfcc_clusters'
beat_spec_len = 432
n_epochs = 200
take = 1
# set up training, testing, & validation partitions
mfcc_array, sdr_array = load_mfcc_and_sdrs(pickle_folders_to_load, pickle_folder,
feature, fg_or_bg, sdr_type)
mfcc_array = np.expand_dims(mfcc_array, -1)
sdr_array = np.expand_dims(sdr_array, -1)
# Building convolutional network
network = input_data(shape=[None, 13, 100, 1])
network = conv_2d(network, 32, [5, 5], activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = conv_2d(network, 64, [5, 5], activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 1, activation='linear')
regress = tflearn.regression(network, optimizer='rmsprop', loss='mean_square', learning_rate=0.001)
start = time.time()
# Training
model = tflearn.DNN(regress) # , session=sess)
model.fit(mfcc_array, sdr_array, n_epoch=n_epochs,
snapshot_step=1000, show_metric=True,
run_id='repet_save_{0}_epochs_take_{1}'.format(n_epochs, take))
elapsed = (time.time() - start)
print('Finished training after ' + elapsed + 'seconds. Saving...')
model_output_folder = 'network_outputs/'
model_output_file = join(model_output_folder, 'nmf_save_{0}_epochs_take_{1}'.format(n_epochs, take))
model.save(model_output_file)
开发者ID:ethman,项目名称:prediction,代码行数:48,代码来源:train_and_save_nmf_network.py
示例19: do_cnn
def do_cnn(trainX, trainY,testX, testY):
global n_words
# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=MAX_DOCUMENT_LENGTH, value=0.)
testX = pad_sequences(testX, maxlen=MAX_DOCUMENT_LENGTH, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Building convolutional network
network = input_data(shape=[None, MAX_DOCUMENT_LENGTH], name='input')
network = tflearn.embedding(network, input_dim=n_words+1, output_dim=128)
branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
network = merge([branch1, branch2, branch3], mode='concat', axis=1)
network = tf.expand_dims(network, 2)
network = global_max_pool(network)
network = dropout(network, 0.5)
network = fully_connected(network, 2, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy', name='target')
# Training
model = tflearn.DNN(network, tensorboard_verbose=0)
model.fit(trainX, trainY, n_epoch = 20, shuffle=True, validation_set=(testX, testY), show_metric=True, batch_size=32)
开发者ID:DemonZeros,项目名称:1book,代码行数:26,代码来源:17-2.py
示例20: createModel
def createModel(nbClasses,imageSize):
print("[+] Creating model...")
convnet = input_data(shape=[None, imageSize, imageSize, 1], name='input')
convnet = conv_2d(convnet, 64, 2, activation='elu', weights_init="Xavier")
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 128, 2, activation='elu', weights_init="Xavier")
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 256, 2, activation='elu', weights_init="Xavier")
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 512, 2, activation='elu', weights_init="Xavier")
convnet = max_pool_2d(convnet, 2)
convnet = fully_connected(convnet, 1024, activation='elu')
convnet = dropout(convnet, 0.5)
convnet = fully_connected(convnet, nbClasses, activation='softmax')
convnet = regression(convnet, optimizer='rmsprop', loss='categorical_crossentropy')
model = tflearn.DNN(convnet)
print(" Model created! ✅")
return model
开发者ID:withoutend,项目名称:DeepAudioClassification,代码行数:25,代码来源:model.py
注:本文中的tflearn.layers.core.dropout函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论