本文整理汇总了Python中tflearn.data_utils.to_categorical函数的典型用法代码示例。如果您正苦于以下问题:Python to_categorical函数的具体用法?Python to_categorical怎么用?Python to_categorical使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了to_categorical函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: do_rnn
def do_rnn(trainX, testX, trainY, testY):
max_document_length=64
y_test=testY
trainX = pad_sequences(trainX, maxlen=max_document_length, value=0.)
testX = pad_sequences(testX, maxlen=max_document_length, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Network building
net = tflearn.input_data([None, max_document_length])
net = tflearn.embedding(net, input_dim=10240000, output_dim=64)
net = tflearn.lstm(net, 64, dropout=0.1)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=0,tensorboard_dir="dga_log")
model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
batch_size=10,run_id="dga",n_epoch=1)
y_predict_list = model.predict(testX)
#print y_predict_list
y_predict = []
for i in y_predict_list:
print i[0]
if i[0] > 0.5:
y_predict.append(0)
else:
y_predict.append(1)
print(classification_report(y_test, y_predict))
print metrics.confusion_matrix(y_test, y_predict)
开发者ID:Emersonxuelinux,项目名称:2book,代码行数:35,代码来源:dga.py
示例2: do_cnn_doc2vec
def do_cnn_doc2vec(trainX, testX, trainY, testY):
global max_features
print "CNN and doc2vec"
#trainX = pad_sequences(trainX, maxlen=max_features, value=0.)
#testX = pad_sequences(testX, maxlen=max_features, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Building convolutional network
network = input_data(shape=[None,max_features], name='input')
network = tflearn.embedding(network, input_dim=1000000, output_dim=128,validate_indices=False)
branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
network = merge([branch1, branch2, branch3], mode='concat', axis=1)
network = tf.expand_dims(network, 2)
network = global_max_pool(network)
network = dropout(network, 0.8)
network = fully_connected(network, 2, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy', name='target')
# Training
model = tflearn.DNN(network, tensorboard_verbose=0)
model.fit(trainX, trainY,
n_epoch=5, shuffle=True, validation_set=(testX, testY),
show_metric=True, batch_size=100,run_id="review")
开发者ID:Emersonxuelinux,项目名称:2book,代码行数:28,代码来源:review.py
示例3: do_cnn
def do_cnn(trainX, trainY,testX, testY):
global n_words
# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=MAX_DOCUMENT_LENGTH, value=0.)
testX = pad_sequences(testX, maxlen=MAX_DOCUMENT_LENGTH, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Building convolutional network
network = input_data(shape=[None, MAX_DOCUMENT_LENGTH], name='input')
network = tflearn.embedding(network, input_dim=n_words+1, output_dim=128)
branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
network = merge([branch1, branch2, branch3], mode='concat', axis=1)
network = tf.expand_dims(network, 2)
network = global_max_pool(network)
network = dropout(network, 0.5)
network = fully_connected(network, 2, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy', name='target')
# Training
model = tflearn.DNN(network, tensorboard_verbose=0)
model.fit(trainX, trainY, n_epoch = 20, shuffle=True, validation_set=(testX, testY), show_metric=True, batch_size=32)
开发者ID:DemonZeros,项目名称:1book,代码行数:26,代码来源:17-2.py
示例4: do_rnn
def do_rnn(trainX, testX, trainY, testY):
global n_words
# Data preprocessing
# Sequence padding
print "GET n_words embedding %d" % n_words
trainX = pad_sequences(trainX, maxlen=MAX_DOCUMENT_LENGTH, value=0.)
testX = pad_sequences(testX, maxlen=MAX_DOCUMENT_LENGTH, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Network building
net = tflearn.input_data([None, MAX_DOCUMENT_LENGTH])
net = tflearn.embedding(net, input_dim=n_words, output_dim=128)
net = tflearn.lstm(net, 128, dropout=0.8)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=3)
model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
batch_size=32,run_id="maidou")
开发者ID:DemonZeros,项目名称:1book,代码行数:28,代码来源:16-3.py
示例5: do_rnn
def do_rnn(x_train,x_test,y_train,y_test):
global n_words
# Data preprocessing
# Sequence padding
print "GET n_words embedding %d" % n_words
#x_train = pad_sequences(x_train, maxlen=100, value=0.)
#x_test = pad_sequences(x_test, maxlen=100, value=0.)
# Converting labels to binary vectors
y_train = to_categorical(y_train, nb_classes=2)
y_test = to_categorical(y_test, nb_classes=2)
# Network building
net = tflearn.input_data(shape=[None, 100,n_words])
net = tflearn.lstm(net, 10, return_seq=True)
net = tflearn.lstm(net, 10, )
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.1,name="output",
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=3)
model.fit(x_train, y_train, validation_set=(x_test, y_test), show_metric=True,
batch_size=32,run_id="maidou")
开发者ID:DemonZeros,项目名称:1book,代码行数:26,代码来源:16-7.py
示例6: do_rnn
def do_rnn(x,y):
global max_document_length
print "RNN"
trainX, testX, trainY, testY = train_test_split(x, y, test_size=0.4, random_state=0)
y_test=testY
trainX = pad_sequences(trainX, maxlen=max_document_length, value=0.)
testX = pad_sequences(testX, maxlen=max_document_length, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Network building
net = tflearn.input_data([None, max_document_length])
net = tflearn.embedding(net, input_dim=10240000, output_dim=128)
net = tflearn.lstm(net, 128, dropout=0.8)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(trainX, trainY, validation_set=0.1, show_metric=True,
batch_size=10,run_id="webshell",n_epoch=5)
y_predict_list=model.predict(testX)
y_predict=[]
for i in y_predict_list:
if i[0] > 0.5:
y_predict.append(0)
else:
y_predict.append(1)
do_metrics(y_test, y_predict)
开发者ID:Emersonxuelinux,项目名称:2book,代码行数:34,代码来源:webshell.py
示例7: do_cnn
def do_cnn(x,y):
global max_document_length
print "CNN and tf"
trainX, testX, trainY, testY = train_test_split(x, y, test_size=0.4, random_state=0)
y_test=testY
trainX = pad_sequences(trainX, maxlen=max_document_length, value=0.)
testX = pad_sequences(testX, maxlen=max_document_length, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Building convolutional network
network = input_data(shape=[None,max_document_length], name='input')
network = tflearn.embedding(network, input_dim=1000000, output_dim=128)
branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
network = merge([branch1, branch2, branch3], mode='concat', axis=1)
network = tf.expand_dims(network, 2)
network = global_max_pool(network)
network = dropout(network, 0.8)
network = fully_connected(network, 2, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy', name='target')
model = tflearn.DNN(network, tensorboard_verbose=0)
#if not os.path.exists(pkl_file):
# Training
model.fit(trainX, trainY,
n_epoch=5, shuffle=True, validation_set=0.1,
show_metric=True, batch_size=100,run_id="webshell")
# model.save(pkl_file)
#else:
# model.load(pkl_file)
y_predict_list=model.predict(testX)
#y_predict = list(model.predict(testX,as_iterable=True))
y_predict=[]
for i in y_predict_list:
print i[0]
if i[0] > 0.5:
y_predict.append(0)
else:
y_predict.append(1)
print 'y_predict_list:'
print y_predict_list
print 'y_predict:'
print y_predict
#print y_test
do_metrics(y_test, y_predict)
开发者ID:Emersonxuelinux,项目名称:2book,代码行数:53,代码来源:webshell.py
示例8: process_form_data
def process_form_data(filename) :
data = h5py.File(filename, 'r')
output = h5py.File('forms_out.h5', 'w')
test_image = output.create_dataset('test_image', (330, 3, 256, 256), dtype=np.uint8)
train_image = output.create_dataset('train_image', (770, 3, 256, 256), dtype=np.uint8)
test_label = output.create_dataset('test_label', (330,11), dtype=np.int8)
train_label = output.create_dataset('train_label', (770,11), dtype=np.int8)
image, labels = shuffle(data['image'], data['form'])
onehot_labels = to_categorical(labels, 11)
count = {}
train_count = 0
test_count = 0
for i, l in enumerate(labels) :
if l not in count :
count[l] = 0
if count[l] > 29 :
train_image[train_count] = image[i]
train_label[train_count] = onehot_labels[i]
train_count += 1
else :
test_image[test_count] = image[i]
test_label[test_count] = onehot_labels[i]
test_count += 1
count[l] += 1
output.close()
开发者ID:megansearles,项目名称:neural-nets,代码行数:35,代码来源:process_data.py
示例9: do_rnn
def do_rnn(trainX, testX, trainY, testY):
global max_sequences_len
global max_sys_call
# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=max_sequences_len, value=0.)
testX = pad_sequences(testX, maxlen=max_sequences_len, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY_old=testY
testY = to_categorical(testY, nb_classes=2)
# Network building
print "GET max_sequences_len embedding %d" % max_sequences_len
print "GET max_sys_call embedding %d" % max_sys_call
net = tflearn.input_data([None, max_sequences_len])
net = tflearn.embedding(net, input_dim=max_sys_call+1, output_dim=128)
net = tflearn.lstm(net, 128, dropout=0.3)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.1,
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=3)
model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
batch_size=32,run_id="maidou")
y_predict_list = model.predict(testX)
#print y_predict_list
y_predict = []
for i in y_predict_list:
#print i[0]
if i[0] > 0.5:
y_predict.append(0)
else:
y_predict.append(1)
#y_predict=to_categorical(y_predict, nb_classes=2)
print(classification_report(testY_old, y_predict))
print metrics.confusion_matrix(testY_old, y_predict)
开发者ID:DemonZeros,项目名称:1book,代码行数:47,代码来源:16-5.py
示例10: do_cnn_word2vec_2d
def do_cnn_word2vec_2d(trainX, testX, trainY, testY):
global max_features
global max_document_length
print "CNN and word2vec2d"
y_test = testY
#trainX = pad_sequences(trainX, maxlen=max_features, value=0.)
#testX = pad_sequences(testX, maxlen=max_features, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Building convolutional network
network = input_data(shape=[None,max_document_length,max_features,1], name='input')
network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = fully_connected(network, 128, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 2, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.01,
loss='categorical_crossentropy', name='target')
model = tflearn.DNN(network, tensorboard_verbose=0)
model.fit(trainX, trainY,
n_epoch=5, shuffle=True, validation_set=(testX, testY),
show_metric=True,run_id="sms")
y_predict_list = model.predict(testX)
print y_predict_list
y_predict = []
for i in y_predict_list:
print i[0]
if i[0] > 0.5:
y_predict.append(0)
else:
y_predict.append(1)
print(classification_report(y_test, y_predict))
print metrics.confusion_matrix(y_test, y_predict)
开发者ID:Emersonxuelinux,项目名称:2book,代码行数:46,代码来源:sms.py
示例11: bi_lstm
def bi_lstm(trainX, trainY,testX, testY):
trainX = pad_sequences(trainX, maxlen=200, value=0.)
testX = pad_sequences(testX, maxlen=200, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Network building
net = tflearn.input_data(shape=[None, 200])
net = tflearn.embedding(net, input_dim=20000, output_dim=128)
net = tflearn.bidirectional_rnn(net, BasicLSTMCell(128), BasicLSTMCell(128))
net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=2)
model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=64,run_id="rnn-bilstm")
开发者ID:Emersonxuelinux,项目名称:2book,代码行数:18,代码来源:rnn.py
示例12: do_cnn_word2vec_2d_345
def do_cnn_word2vec_2d_345(trainX, testX, trainY, testY):
global max_features
global max_document_length
print "CNN and word2vec_2d_345"
y_test = testY
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Building convolutional network
network = input_data(shape=[None,max_document_length,max_features,1], name='input')
network = tflearn.embedding(network, input_dim=1, output_dim=128,validate_indices=False)
branch1 = conv_2d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
branch2 = conv_2d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
branch3 = conv_2d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
network = merge([branch1, branch2, branch3], mode='concat', axis=1)
network = tf.expand_dims(network, 2)
network = global_max_pool_2d(network)
network = dropout(network, 0.8)
network = fully_connected(network, 2, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy', name='target')
# Training
model = tflearn.DNN(network, tensorboard_verbose=0)
model.fit(trainX, trainY,
n_epoch=5, shuffle=True, validation_set=(testX, testY),
show_metric=True, batch_size=100,run_id="sms")
y_predict_list = model.predict(testX)
print y_predict_list
y_predict = []
for i in y_predict_list:
print i[0]
if i[0] > 0.5:
y_predict.append(0)
else:
y_predict.append(1)
print(classification_report(y_test, y_predict))
print metrics.confusion_matrix(y_test, y_predict)
开发者ID:Emersonxuelinux,项目名称:2book,代码行数:41,代码来源:sms.py
示例13: lstm
def lstm(trainX, trainY,testX, testY):
# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=100, value=0.)
testX = pad_sequences(testX, maxlen=100, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Network building
net = tflearn.input_data([None, 100])
net = tflearn.embedding(net, input_dim=10000, output_dim=128)
net = tflearn.lstm(net, 128, dropout=0.8)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
batch_size=32,run_id="rnn-lstm")
开发者ID:Emersonxuelinux,项目名称:2book,代码行数:21,代码来源:rnn.py
示例14: do_rnn_wordbag
def do_rnn_wordbag(trainX, testX, trainY, testY):
global max_document_length
print "RNN and wordbag"
trainX = pad_sequences(trainX, maxlen=max_document_length, value=0.)
testX = pad_sequences(testX, maxlen=max_document_length, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Network building
net = tflearn.input_data([None, max_document_length])
net = tflearn.embedding(net, input_dim=10240000, output_dim=128)
net = tflearn.lstm(net, 128, dropout=0.8)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
batch_size=10,run_id="review",n_epoch=5)
开发者ID:Emersonxuelinux,项目名称:2book,代码行数:22,代码来源:review.py
示例15: create_datasets
def create_datasets(file_path, vocab_size=30000, val_fraction=0.0):
# IMDB Dataset loading
train, test, _ = imdb.load_data(
path=file_path,
n_words=vocab_size,
valid_portion=val_fraction,
sort_by_len=False)
trainX, trainY = train
testX, testY = test
# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=FLAGS.max_len, value=0.)
testX = pad_sequences(testX, maxlen=FLAGS.max_len, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
train_dataset = DataSet(trainX, trainY)
return train_dataset
开发者ID:Biocodings,项目名称:Paddle,代码行数:22,代码来源:reader.py
示例16: do_cnn
def do_cnn(trainX, testX, trainY, testY):
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=4)
testY = to_categorical(testY, nb_classes=4)
# Building convolutional network
network = input_data(shape=[None, 32, 32,1], name='input')
network = conv_2d(network, 16, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 16, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = fully_connected(network, 16, activation='tanh')
network = dropout(network, 0.1)
network = fully_connected(network, 16, activation='tanh')
network = dropout(network, 0.1)
network = fully_connected(network, 4, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.01,
loss='categorical_crossentropy', name='target')
# Training
model = tflearn.DNN(network, tensorboard_verbose=0)
model.fit(trainX, trainY, n_epoch=10, validation_set=(testX, testY),show_metric=True, run_id="malware")
开发者ID:Emersonxuelinux,项目名称:2book,代码行数:23,代码来源:malware.py
示例17: load_test_data
def load_test_data():
test_dict = sio.loadmat(test_location)
X = np.asarray(test_dict['X'])
X_test = []
for i in xrange(X.shape[3]):
X_test.append(X[:,:,:,i])
X_test = np.asarray(X_test)
Y_test = test_dict['y']
for i in xrange(len(Y_test)):
if Y_test[i]%10 == 0:
Y_test[i] = 0
Y_test = to_categorical(Y_test,10)
return (X_test,Y_test)
开发者ID:codemukul95,项目名称:SVHN-classification-using-Tensorflow,代码行数:15,代码来源:load_input.py
示例18: load_train_data
def load_train_data():
train_dict = sio.loadmat(train_location)
X = np.asarray(train_dict['X'])
X_train = []
for i in xrange(X.shape[3]):
X_train.append(X[:,:,:,i])
X_train = np.asarray(X_train)
Y_train = train_dict['y']
for i in xrange(len(Y_train)):
if Y_train[i]%10 == 0:
Y_train[i] = 0
Y_train = to_categorical(Y_train,10)
return (X_train,Y_train)
开发者ID:codemukul95,项目名称:SVHN-classification-using-Tensorflow,代码行数:15,代码来源:load_input.py
示例19: generate_image_sets_for_single_digit
def generate_image_sets_for_single_digit(nb_sample=SAMPLE_SIZE, single_digit_index=0):
captcha = ImageCaptcha()
labels = []
images = []
for i in range(0, nb_sample):
digits = 0
last_digit = INVALID_DIGIT
for j in range(0, DIGIT_COUNT):
digit = last_digit
while digit == last_digit:
digit = random.randint(0, 9)
last_digit = digit
digits = digits * 10 + digit
digits_as_str = DIGIT_FORMAT_STR % digits
labels.append(digits_as_str)
images.append(captcha.generate_image(digits_as_str))
digit_labels = list()
for digit_index in range(0, DIGIT_COUNT):
digit_labels.append(np.empty(nb_sample, dtype="int8"))
shape = (nb_sample, IMAGE_STD_HEIGHT, IMAGE_STD_WIDTH, RGB_COLOR_COUNT)
digit_image_data = np.empty(shape, dtype="float32")
for index in range(0, nb_sample):
img = images[index].resize((IMAGE_STD_WIDTH, IMAGE_STD_HEIGHT), PIL.Image.LANCZOS)
img_arr = np.asarray(img, dtype="float32") / 255.0
digit_image_data[index, :, :, :] = img_arr
for digit_index in range(0, DIGIT_COUNT):
digit_labels[digit_index][index] = labels[index][digit_index]
x = digit_image_data
y = to_categorical(digit_labels[single_digit_index], CLASS_COUNT)
return x, y
开发者ID:utensil,项目名称:julia-playground,代码行数:39,代码来源:train_captcha_tfl.py
示例20: load_dataset
def load_dataset(x_count, y_count):
print '[+] Loading data'
X = []
Y = []
places = Set()
data = np.load('grid/data-{0}-{1}.npy'.format(x_count, y_count))
for row in data:
x = map(float, row[1:5])
time = row[4]
x.extend([
(time // 60) % 24 + 1, # Hour
(time // 1440) % 7 + 1, # Day
(time // 43200) % 12 + 1, # Month
(time // 525600) + 1 # Year
])
X.append(x)
Y.append(row[5])
places.add(row[5])
places = list(places)
Y = [places.index(y) for y in Y]
Y = to_categorical(Y, len(places))
print '[+] All data loaded'
return X, Y
开发者ID:isseu,项目名称:kaggle-facebook-predicting-check-ins-nn,代码行数:23,代码来源:train.py
注:本文中的tflearn.data_utils.to_categorical函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论