Building a CNN model in TensorFlow

Before we start, there is a bit of good news: using TensorFlow, you don't need to take care about writing backpropagation or gradient descent code and also all common types of layers are already implemented, so things should be easier.

In the TensorFlow example here, we will change things a bit from what you learned in Chapter 1, Setup and Introduction to TensorFlow, and use the tf.layers API to create whole layers of our network with ease:

import tensorflow as tf 
from tensorflow.examples.tutorials.mnist import input_data 
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) 
# MNIST data input (img shape: 28*28) 
num_input = 28*28*1 
# MNIST total classes (0-9 digits) 
num_classes = 10 
 
# Define model I/O (Placeholders are used to send/get information from graph) 
x_ = tf.placeholder("float", shape=[None, num_input], name='X') 
y_ = tf.placeholder("float", shape=[None, num_classes], name='Y') 
# Add dropout to the fully connected layer 
is_training = tf.placeholder(tf.bool) 
 
# Convert the feature vector to a (-1)x28x28x1 image 
# The -1 has the same effect as the "None" value, and will 
# be used to inform a variable batch size 
x_image = tf.reshape(x_, [-1, 28, 28, 1]) 
 
# Convolutional Layer #1 
# Computes 32 features using a 5x5 filter with ReLU activation. 
# Padding is added to preserve width and height. 
# Input Tensor Shape: [batch_size, 28, 28, 1] 
# Output Tensor Shape: [batch_size, 28, 28, 32] 
conv1 = tf.layers.conv2d(inputs=x_image, filters=32, kernel_size=[5, 5], padding="same", activation=tf.nn.relu) 
 
# Pooling Layer #1 
# First max pooling layer with a 2x2 filter and stride of 2 
# Input Tensor Shape: [batch_size, 28, 28, 32] 
# Output Tensor Shape: [batch_size, 14, 14, 32] 
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2) 
 
# Convolutional Layer #2 
# Computes 64 features using a 5x5 filter. 
# Input Tensor Shape: [batch_size, 14, 14, 32] 
# Output Tensor Shape: [batch_size, 14, 14, 64] 
conv2 = tf.layers.conv2d( inputs=pool1, filters=64, kernel_size=[5, 5], padding="same", activation=tf.nn.relu) 
 
# Pooling Layer #2 
# Second max pooling layer with a 2x2 filter and stride of 2 
# Input Tensor Shape: [batch_size, 14, 14, 64] 
# Output Tensor Shape: [batch_size, 7, 7, 64] 
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2) 
 
# Flatten tensor into a batch of vectors 
# Input Tensor Shape: [batch_size, 7, 7, 64] 
# Output Tensor Shape: [batch_size, 7 * 7 * 64] 
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64]) 
 
# Dense Layer 
# Densely connected layer with 1024 neurons 
# Input Tensor Shape: [batch_size, 7 * 7 * 64] 
# Output Tensor Shape: [batch_size, 1024] 
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu) 
 
# Add dropout operation; 0.6 probability that element will be kept 
dropout = tf.layers.dropout( inputs=dense, rate=0.4, training=is_training) 
 
# Logits layer 
# Input Tensor Shape: [batch_size, 1024] 
# Output Tensor Shape: [batch_size, 10] 
logits = tf.layers.dense(inputs=dropout, units=10) 
 
# Define a loss function (Multinomial cross-entropy) and how to optimize it 
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_)) 
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) 
 
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(y_,1)) 
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) 
 
# Build graph 
init = tf.global_variables_initializer() 
 
# Avoid allocating the whole memory 
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) 
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) 
sess.run(init) 
 
# Train graph 
for i in range(2000): 
   # Get batch of 50 images 
   batch = mnist.train.next_batch(50) 
 
   # Print each 100 epochs 
   if i % 100 == 0: 
       # Calculate train accuracy 
       train_accuracy = accuracy.eval(session=sess, feed_dict={x_: batch[0], y_: batch[1], is_training: True}) 
       print("step %d, training accuracy %g" % (i, train_accuracy)) 
 
   # Train actually here 
   train_step.run(session=sess, feed_dict={x_: batch[0], y_: batch[1], is_training: False}) 
 
print("Test Accuracy:",sess.run(accuracy, feed_dict={x_: mnist.test.images, y_: mnist.test.labels, is_training: False}))
..................Content has been hidden....................

You can't read the all page of ebook, please click here login for view all page.
Reset
18.216.255.250