本文整理汇总了Python中tensorflow.examples.tutorials.mnist.input_data.read_data_sets函数的典型用法代码示例。如果您正苦于以下问题:Python read_data_sets函数的具体用法?Python read_data_sets怎么用?Python read_data_sets使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了read_data_sets函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: BuildComplexLearner
def BuildComplexLearner(restore=True):
"""Builds a Complex Learner that uses CNNs for digit classification.
Args:
restore: (bool) Whether to restore the model or train a new one.
"""
if restore:
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
learner = ComplexLearner()
learner.Restore("thresholded_model.ckpt")
return learner
else:
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
learner = ComplexLearner()
ThresholdPixels(mnist.train.images)
def signal_handler(signal, frame):
print "Caught ctrl-c. Saving model then exiting..."
learner.Save("thresholded_ctrl_c.ckpt")
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
learner.Train(mnist.train)
ThresholdPixels(mnist.test.images)
learner.Test(mnist.test)
return learner
开发者ID:mjchao,项目名称:Machine-Learning-Experiments,代码行数:26,代码来源:Model.py
示例2: fetch_data
def fetch_data():
if not exists(data_dir):
makedirs(data_dir)
# Normalize data once if we haven't done it before and store it in a file
if not exists(f'{data_dir}/{data_file}'):
print('Downloading MNIST data')
mnist = input_data.read_data_sets(data_dir, one_hot=True)
def _normalize(data, mean=None, std=None):
if mean is None:
mean = np.mean(data, axis=0)
std = np.std(data, axis=0)
return div0((data - mean), std), mean, std
train_data, mean, std = _normalize(mnist.train.images)
validation_data, *_ = _normalize(mnist.validation.images, mean, std)
test_data, *_ = _normalize(mnist.test.images, mean, std)
mnist_data = {'train_images': train_data,
'train_labels': mnist.train.labels,
'validation_images': validation_data,
'validation_labels': mnist.validation.labels,
'test_images': test_data,
'test_labels': mnist.test.labels}
with open(f'{data_dir}/{data_file}', 'wb') as f:
pickle.dump(mnist_data, f)
# If we have normalized the data already; load it
else:
with open(f'{data_dir}/{data_file}', 'rb') as f:
mnist_data = pickle.load(f)
return mnist_data
开发者ID:AUHack,项目名称:ws18_tensorflow,代码行数:35,代码来源:mnist_data_feed.py
示例3: load_data
def load_data(data_dir):
"""Returns training and test tf.data.Dataset objects."""
data = input_data.read_data_sets(data_dir, one_hot=True)
train_ds = tf.data.Dataset.from_tensor_slices((data.train.images,
data.train.labels))
test_ds = tf.data.Dataset.from_tensors((data.test.images, data.test.labels))
return (train_ds, test_ds)
开发者ID:ClowJ,项目名称:tensorflow,代码行数:7,代码来源:mnist.py
示例4: load_data
def load_data(name, random_labels=False):
"""Load the data
name - the name of the dataset
random_labels - True if we want to return random labels to the dataset
return object with data and labels"""
print ('Loading Data...')
C = type('type_C', (object,), {})
data_sets = C()
if name.split('/')[-1] == 'MNIST':
data_sets_temp = input_data.read_data_sets(os.path.dirname(sys.argv[0]) + "/data/MNIST_data/", one_hot=True)
data_sets.data = np.concatenate((data_sets_temp.train.images, data_sets_temp.test.images), axis=0)
data_sets.labels = np.concatenate((data_sets_temp.train.labels, data_sets_temp.test.labels), axis=0)
else:
d = sio.loadmat(os.path.join(os.path.dirname(sys.argv[0]), name + '.mat'))
F = d['F']
y = d['y']
C = type('type_C', (object,), {})
data_sets = C()
data_sets.data = F
data_sets.labels = np.squeeze(np.concatenate((y[None, :], 1 - y[None, :]), axis=0).T)
# If we want to assign random labels to the data
if random_labels:
labels = np.zeros(data_sets.labels.shape)
labels_index = np.random.randint(low=0, high=labels.shape[1], size=labels.shape[0])
labels[np.arange(len(labels)), labels_index] = 1
data_sets.labels = labels
return data_sets
开发者ID:HounD,项目名称:IDNNs,代码行数:27,代码来源:utils.py
示例5: main
def main(_):
# Import data
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.matmul(x, W) + b
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])
# The raw formulation of cross-entropy,
#
# tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),
# reduction_indices=[1]))
#
# can be numerically unstable.
#
# So here we use tf.nn.softmax_cross_entropy_with_logits on the raw
# outputs of 'y', and then average across the batch.
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
# Train
for _ in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
# Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images,
y_: mnist.test.labels}))
开发者ID:stasonhan,项目名称:machine-learing,代码行数:33,代码来源:mnist.py
示例6: main
def main(_):
# Create a cluster from the parameter server and worker hosts.
cluster = tf.train.ClusterSpec({"local": ["localhost:2222"]})
# Create and start a server for the local task.
server = tf.train.Server(cluster, job_name="local", task_index=0)
# Build model...
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
init = tf.initialize_all_variables()
sess = tf.Session(server.target)
sess.run(init)
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
开发者ID:vcramesh,项目名称:tf,代码行数:28,代码来源:distTF0.py
示例7: main
def main(_):
cluster,server,job_name,task_index,num_workers = get_mpi_cluster_server_jobname(num_ps = 4, num_workers = 5)
MY_GPU = task_index % NUM_GPUS
if job_name == "ps":
server.join()
elif job_name == "worker":
is_chief = (task_index == 0)
# Assigns ops to the local worker by default.
with tf.device(tf.train.replica_device_setter(\
worker_device='/job:worker/task:{}/gpu:{}'.format(task_index,MY_GPU),
cluster=cluster)):
loss,accuracy,input_tensor,true_output_tensor = get_loss_accuracy_ops()
global_step = tf.Variable(0,trainable=False)
optimizer = tf.train.AdagradOptimizer(0.01)
if sync_mode:
optimizer = tf.train.SyncReplicasOptimizer(optimizer,replicas_to_aggregate=num_workers,
replica_id=task_index,total_num_replicas=num_workers)
train_op = optimizer.minimize(loss, global_step=global_step)
if sync_mode and is_chief:
# Initial token and chief queue runners required by the sync_replicas mode
chief_queue_runner = optimizer.get_chief_queue_runner()
init_tokens_op = optimizer.get_init_tokens_op()
saver = tf.train.Saver()
summary_op = tf.merge_all_summaries()
init_op = tf.initialize_all_variables()
# Create a "supervisor", which oversees the training process.
sv = tf.train.Supervisor(is_chief=is_chief,logdir="/tmp/train_logs",init_op=init_op,summary_op=summary_op,
saver=saver,global_step=global_step,save_model_secs=600)
mnist = input_data.read_data_sets(data_dir, one_hot=True)
# The supervisor takes care of session initialization, restoring from
# a checkpoint, and closing when done or an error occurs.
config = tf.ConfigProto(allow_soft_placement=True)
with sv.prepare_or_wait_for_session(server.target,config=config) as sess:
if sync_mode and is_chief:
sv.start_queue_runners(sess,[chief_queue_runner])
sess.run(init_tokens_op)
step = 0
start = time.time()
while not sv.should_stop() and step < 1000:
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
train_feed = {input_tensor: batch_xs, true_output_tensor: batch_ys,K.learning_phase(): 1}
_, step, curr_loss, curr_accuracy = sess.run([train_op, global_step, loss, accuracy], feed_dict=train_feed)
sys.stdout.write('\rWorker {}, step: {}, loss: {}, accuracy: {}'.format(task_index,step,curr_loss,curr_accuracy))
sys.stdout.flush()
# Ask for all the services to stop.
sv.stop()
print('Elapsed: {}'.format(time.time() - start))
开发者ID:jnkh,项目名称:plasma,代码行数:60,代码来源:distributed_mnist.py
示例8: test_fully_connected
def test_fully_connected(self):
# self.mock.verbose = True
self.mock.loop_cnt = 65
self.mock.add_layer(784)
self.mock.add_cnn()
self.mock.add_pool()
self.mock.add_cnn()
self.mock.add_pool()
self.mock.add_layer(1024, act_func=tf.nn.relu)
self.mock.add_layer(10, act_func= tf.nn.softmax)
self.mock.set_entropy_func(self.mock.entropy_log)
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("down/", one_hot=True)
def get_feed(x=None):
b = mnist.train.next_batch(100)
feed = {self.mock.input:b[0], self.mock.target:b[1]}
return feed
self.mock.get_feed_before_loop = get_feed
self.mock.get_feed_each_one_step = get_feed
# def print_entropy (i, sess, feed):
# print sess.run( self.mock.entropy , feed)
# self.mock.after_one_step = print_entropy
self.mock.learn()
self.assertTrue(0.5 <self.mock.last_acc , 'less 0.5 acc %2.3f'%self.mock.last_acc )
开发者ID:doonething,项目名称:tensorflow_study,代码行数:29,代码来源:cnn_test.py
示例9: __init__
def __init__(self, config, sess):
self.input_dim = config.input_dim # 784
self.z_dim = config.z_dim # 14
self.c_cat = config.c_cat # 10: Category c - 1 hot vector for 10 label values
self.c_cont = config.c_cont # 2: Continuous c
self.d_update = config.d_update # 2: Run discriminator twice before generator
self.batch_size = config.batch_size
self.nepoch = config.nepoch
self.lr = config.lr # Learning rate 0.001
self.max_grad_norm = config.max_grad_norm # 40
self.show_progress = config.show_progress # False
self.optimizer = tf.train.AdamOptimizer
self.checkpoint_dir = config.checkpoint_dir
self.image_dir = config.image_dir
home = str(Path.home())
DATA_ROOT_DIR = os.path.join(home, "dataset", "MNIST_data")
self.mnist = input_data.read_data_sets(DATA_ROOT_DIR, one_hot=True)
self.random_seed = 42
self.X = tf.placeholder(tf.float32, [None, self.input_dim], 'X')
self.z = tf.placeholder(tf.float32, [None, self.z_dim], 'z')
self.c_i = tf.placeholder(tf.float32, [None, self.c_cat], 'c_cat')
self.c_j = tf.placeholder(tf.float32, [None, self.c_cont], 'c_cont')
self.c = tf.concat([self.c_i, self.c_j], axis=1)
self.z_c = tf.concat([self.z, self.c_i, self.c_j], axis=1)
self.training = tf.placeholder_with_default(False, shape=(), name='training')
self.sess = sess
开发者ID:lzqkean,项目名称:deep_learning,代码行数:33,代码来源:InfoDCGAN.py
示例10: main
def main(_):
# MNIST 데이터 셋을 ont-hot 인코딩 형태로 받아온다.
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.matmul(x, W) + b
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
#train_step = tf.train.AdamOptimizer(0.1).minimize(cross_entropy)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
# Train
for _ in range(10000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
# Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images,
y_: mnist.test.labels}))
开发者ID:kuhanmo,项目名称:MLstudy,代码行数:30,代码来源:02_MNIST+For+ML+Beginners.py
示例11: get_data
def get_data(task_name):
## Data sets
if task_name == 'qianli_func':
(X_train, Y_train, X_cv, Y_cv, X_test, Y_test) = get_data_from_file(file_name='./f_1d_cos_no_noise_data.npz')
elif task_name == 'f_2D_task2':
(X_train, Y_train, X_cv, Y_cv, X_test, Y_test) = get_data_from_file(file_name='./f_2d_task2_ml_data_and_mesh.npz')
elif task_name == 'f_2d_task2_xsinglog1_x_depth2':
(X_train, Y_train, X_cv, Y_cv, X_test, Y_test) = get_data_from_file(file_name='./f_2d_task2_ml_xsinlog1_x_depth_2data_and_mesh.npz')
elif task_name == 'f_2d_task2_xsinglog1_x_depth3':
(X_train, Y_train, X_cv, Y_cv, X_test, Y_test) = get_data_from_file(file_name='./f_2d_task2_ml_xsinlog1_x_depth_3data_and_mesh.npz')
elif task_name == 'MNIST_flat':
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
X_train, Y_train = mnist.train.images, mnist.train.labels
X_cv, Y_cv = mnist.validation.images, mnist.validation.labels
X_test, Y_test = mnist.test.images, mnist.test.labels
elif task_name == 'hrushikesh':
with open('../hrushikesh/patient_data_X_Y.json', 'r') as f_json:
patients_data = json.load(f_json)
X = patients_data['1']['X']
Y = patients_data['1']['Y']
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.40)
X_cv, X_test, Y_cv, Y_test = train_test_split(X_test, Y_test, test_size=0.5)
(X_train, Y_train, X_cv, Y_cv, X_test, Y_test) = ( np.array(X_train), np.array(Y_train), np.array(X_cv), np.array(Y_cv), np.array(X_test), np.array(Y_test) )
else:
raise ValueError('task_name: %s does not exist. Try experiment that exists'%(task_name))
return (X_train, Y_train, X_cv, Y_cv, X_test, Y_test)
开发者ID:brando90,项目名称:tensor_flow_experiments,代码行数:26,代码来源:f_1D_data.py
示例12: main
def main():
data_path = '/home/charlesxu/Workspace/data/MNIST_data/'
data = input_data.read_data_sets(data_path, one_hot=True)
original(data)
widen(data)
deepen(data)
开发者ID:the0demiurge,项目名称:python-test,代码行数:7,代码来源:IncreaseNN.py
示例13: __init__
def __init__(self, batch_size):
from tensorflow.examples.tutorials.mnist import input_data
self.mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
self.x = tf.placeholder(tf.float32, shape=[batch_size, 28, 28, 1])
self.feed_y = tf.placeholder(tf.float32, shape=[batch_size, 10])
self.y = ((2*self.feed_y)-1)
开发者ID:255BITS,项目名称:hyperchamber-gan,代码行数:7,代码来源:classification.py
示例14: main
def main(_):
n_in = 784
n_out = 10
n_hidden = 200
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
w_in = tf.Variable(tf.random_normal([n_in, n_hidden]))
b_in = tf.Variable(tf.random_normal([n_hidden]))
w_out = tf.Variable(tf.random_normal([n_hidden, n_out]))
b_out = tf.Variable(tf.random_normal([n_out]))
# Create the model
x = tf.placeholder(tf.float32, [None, n_in])
h = tf.nn.relu(tf.add(tf.matmul(x, w_in), b_in))
y = tf.add(tf.matmul(h, w_out), b_out)
batch_size = 100
labels = tf.placeholder(tf.float32, [None, n_out])
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, labels))
optimizer = tf.train.AdamOptimizer(0.01).minimize(cost)
with tf.Session() as sess:
# Train
sess.run(tf.initialize_all_variables())
for _ in range(5000):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
sess.run(optimizer, feed_dict={x: batch_xs, labels: batch_ys})
#print(sess.run(tf.nn.softmax(y), feed_dict={x: batch_xs}))
# Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images,
labels: mnist.test.labels}))
开发者ID:DCSaunders,项目名称:NN-samples,代码行数:31,代码来源:mnist_simple_nn.py
示例15: runMNIST
def runMNIST():
imageSize = 4
imageChannels = 1
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
self.createNetwork("fullyConnected", imageSize)
开发者ID:sudnya,项目名称:misc,代码行数:7,代码来源:RunMNIST.py
示例16: __init__
def __init__(self, name='mnist', source='./data/mnist/', one_hot=True, batch_size = 64, seed = 0):
self.name = name
self.source = source
self.one_hot = one_hot
self.batch_size = batch_size
self.seed = seed
np.random.seed(seed) # To make your "random" minibatches the same as ours
self.count = 0
tf.set_random_seed(self.seed) # Fix the random seed for randomized tensorflow operations.
if name == 'mnist':
self.mnist = input_data.read_data_sets(source)
self.data = self.mnist.train.images
print('data shape: {}'.format(np.shape(self.data)))
self.minibatches = self.random_mini_batches(self.data.T, self.batch_size, self.seed)
elif name == 'cifar10':
# download data files from: 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
# extract into the correct folder
data_files = ['data_batch_1','data_batch_2','data_batch_3','data_batch_4','data_batch_5']
self.data, _ = read_cifar10(source, data_files)
self.minibatches = self.random_mini_batches(self.data.T, self.batch_size, self.seed)
elif name == 'celeba':
# Count number of data images
self.im_list = list_dir(source, 'jpg')
self.nb_imgs = len(self.im_list)
self.nb_compl_batches = int(math.floor(self.nb_imgs/self.batch_size))
self.nb_total_batches = self.nb_compl_batches
if self.nb_imgs % batch_size != 0:
self.num_total_batches = self.nb_compl_batches + 1
self.count = 0
self.color_space = 'RGB'
开发者ID:KhanhDinhDuy,项目名称:gaan,代码行数:35,代码来源:dataset.py
示例17: main
def main():
# Load the input data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Setup variables and placeholders
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
# Implement our model
y = tf.nn.softmax(tf.matmul(x, W) + b)
# Placeholder to input the correct answers
y_ = tf.placeholder(tf.float32, [None, 10])
# Implement cross-entropy
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
# Apply an optimization algorithm to reduce the cost
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
# Initialize variables
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
# Now let's train!
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
# Evaluate our model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_:mnist.test.labels}))
开发者ID:gragas,项目名称:aml-final,代码行数:35,代码来源:mnistforbeginners.py
示例18: load_mnist_dataset
def load_mnist_dataset(mode='supervised', one_hot=True):
"""Load the MNIST handwritten digits dataset.
:param mode: 'supervised' or 'unsupervised' mode
:param one_hot: whether to get one hot encoded labels
:return: train, validation, test data:
for (X, y) if 'supervised',
for (X) if 'unsupervised'
"""
mnist = input_data.read_data_sets("MNIST_data/", one_hot=one_hot)
# Training set
trX = mnist.train.images
trY = mnist.train.labels
# Validation set
vlX = mnist.validation.images
vlY = mnist.validation.labels
# Test set
teX = mnist.test.images
teY = mnist.test.labels
if mode == 'supervised':
return trX, trY, vlX, vlY, teX, teY
elif mode == 'unsupervised':
return trX, vlX, teX
开发者ID:alvarojoao,项目名称:Deep-Learning-TensorFlow,代码行数:28,代码来源:datasets.py
示例19: main
def main(_):
# Import data
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])
# Build the graph for the deep net
y_conv, keep_prob = deepnn(x)
with tf.name_scope('loss'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,
logits=y_conv)
cross_entropy = tf.reduce_mean(cross_entropy)
with tf.name_scope('adam_optimizer'):
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
correct_prediction = tf.cast(correct_prediction, tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
graph_location = tempfile.mkdtemp()
print('Saving graph to: %s' % graph_location)
train_writer = tf.summary.FileWriter(graph_location)
train_writer.add_graph(tf.get_default_graph())
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x: batch[0], y_: batch[1], keep_prob: 1.0})
print('step %d, training accuracy %g' % (i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print('test accuracy %g' % accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
开发者ID:stasonhan,项目名称:machine-learing,代码行数:34,代码来源:mnist_deep.py
示例20: main
def main():
sess = tf.Session()
cnn = CNN(sess)
sess.run(tf.global_variables_initializer())
# Load the MNIST Data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Training
for epoch in range(TRAINING_EPOCH):
cost = 0.
total_batch = int(mnist.train.num_examples / BATCH_SIZE)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(BATCH_SIZE)
c, _ = cnn.train(batch_xs, batch_ys)
cost += c
avg_cost = c / total_batch
print('Epoch #%2d' % (epoch+1))
print('- Average cost: %4f' % (avg_cost))
# Testing
print('Accuracy:', cnn.get_accuracy(mnist.test.images, mnist.test.labels))
开发者ID:zake7749,项目名称:TensorFlow-Study-Notes,代码行数:28,代码来源:MNIST_CNN.py
注:本文中的tensorflow.examples.tutorials.mnist.input_data.read_data_sets函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论