29. CNN with TensorFlow

CNN implementation with TensorFlow

tensorflow

MNIST with CNN from TensorFlow

import tensorflow as tf
import matplotlib.pyplot as plt

from tensorflow.examples.tutorials.mnist import input_data

# Reproducibility
tf.set_random_seed(777)

class CRPDF:
    """ Convolution ReLU Pooling DropOut Flat """
    def __init__(self, name, input_layer, filter_size, filter_stddev,
                 conv_stride, pool_size, pool_stride,
                 keep_prob, padding="SAME", isFlat=False):
        """
        :param padding: if "SAME",
                        tensorflow adds pad to input image
                        to return output image
                        whose size is the same as the input image.
        """
        self.name = name
        self.isFlat = isFlat
        self.W = tf.Variable(
                    tf.random_normal(filter_size, stddev=filter_stddev))
        self.conv = tf.nn.conv2d(
            input_layer, self.W, strides=conv_stride, padding=padding)
        self.relu = tf.nn.relu(self.conv)
        self.pool = tf.nn.max_pool(self.relu,
                                   ksize=pool_size,
                                   strides=pool_stride,
                                   padding=padding)
        self.drop = tf.nn.dropout(self.pool, keep_prob = keep_prob)

        if self.isFlat:
            _, width, height, channels = self.drop.shape
            reSize = int(width) * int(height) * int(channels)
            self.flat = tf.reshape(self.drop, [-1, reSize])

    def output_layer(self):
        _out = None
        if self.isFlat:
            _out = self.flat
        else:
            _out = self.drop

        return _out

    def arch(self):
        print("Name    : {0}".format(self.name))
        print("Filter  : {0}".format(self.W.shape))
        print("Conv    : {0}".format(self.conv.shape))
        print("ReLU    : {0}".format(self.relu.shape))
        print("Pool    : {0}".format(self.pool.shape))
        print("DropOut : {0}".format(self.drop.shape))

        if self.isFlat:
            print("Flat    : {0}".format(self.flat.shape))

        print("")

class FC:
    """ Fully Connected """
    def __init__(self, name, input_layer, iNode, oNode,
                 keep_prob, initializer=None, isLast=False):
        self.name = name
        self.isLast = isLast
        self.initializer = None
        if not initializer:
            self.initializer = tf.contrib.layers.xavier_initializer()
        else:
            self.initializer = initializer

        self.W = tf.get_variable(name=self.name, shape=[iNode, oNode],
                     initializer=self.initializer)
        self.b = tf.Variable(tf.random_normal([oNode]))
        if self.isLast:
            self.inference = tf.matmul(input_layer, self.W) + self.b
        else:
            self.relu = tf.nn.relu(tf.matmul(input_layer, self.W) + self.b)
            self.drop = tf.nn.dropout(self.relu, keep_prob=keep_prob)

    def output_layer(self):
        _out = None
        if self.isLast:
            _out = self.inference
        else:
            _out = self.drop
        return _out

    def arch(self):
        print("Name    : {0}".format(self.name))
        print("Weight  : {0}".format(self.W.shape))
        print("Bias    : {0}".format(self.b.shape))
        if self.isLast:
            print("Inf     : {0}".format(self.inference.shape))
        else:
            print("ReLU    : {0}".format(self.relu.shape))
            print("DropOut : {0}".format(self.drop.shape))

        print("")

class CNN:
    def __init__(self, sess, name, learning_rate=0.01):
        self.sess = sess
        self.name = name
        self.learning_rate = learning_rate
        self._build_net()

    def _build_net(self):
        with tf.variable_scope(self.name):
            # Probability for DropOut
            self.keep_prob = tf.placeholder(tf.float32)

            # Placeholders for Input and Output
            self.X = tf.placeholder(tf.float32, [None, 784])
            self.Y = tf.placeholder(tf.float32, [None, 10])

            # Image 28x28x1 (black/white)
            inImg = tf.reshape(self.X, [-1, 28, 28, 1])

            layer1 = CRPDF(name="conv1", input_layer=inImg,
                           filter_size=[3,3,1,32], filter_stddev=0.01,
                           conv_stride=[1,1,1,1], pool_size=[1,2,2,1],
                           pool_stride=[1,2,2,1], keep_prob=self.keep_prob)
            layer1.arch()
            L1 = layer1.output_layer()

            layer2 = CRPDF(name="conv2", input_layer=L1,
                           filter_size=[3,3,32,64], filter_stddev=0.01,
                           conv_stride=[1,1,1,1], pool_size=[1,2,2,1],
                           pool_stride=[1,2,2,1], keep_prob=self.keep_prob)
            layer2.arch()
            L2 = layer2.output_layer()

            layer3 = CRPDF(name="conv3", input_layer=L2,
                           filter_size=[3, 3, 64, 128], filter_stddev=0.01,
                           conv_stride=[1, 1, 1, 1], pool_size=[1, 2, 2, 1],
                           pool_stride=[1, 2, 2, 1], keep_prob=self.keep_prob,
                           isFlat=True)
            layer3.arch()
            L3 = layer3.output_layer()

            layer4 = FC(name="fc1", input_layer=L3, iNode=128 * 4 * 4,
                        oNode=625,keep_prob=self.keep_prob)
            layer4.arch()
            L4 =layer4.output_layer()

            layer5 = FC(name="fc2", input_layer=L4, iNode=625,
                        oNode=10, keep_prob=self.keep_prob, isLast=True)
            layer5.arch()
            logit = layer5.output_layer()

        # define cost/loss & optimizer
        self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
            logits=logit, labels=self.Y))
        self.optimizer = tf.train.AdamOptimizer(
            learning_rate=self.learning_rate).minimize(self.cost)

        predict = tf.equal(
            tf.argmax(logit, 1), tf.argmax(self.Y, 1))
        self.accuracy = tf.reduce_mean(tf.cast(predict, tf.float32))

    def test(self, x_test, y_test, keep_prob=1.0):
        return self.sess.run(self.accuracy, feed_dict={
            self.X: x_test, self.Y: y_test, self.keep_prob: keep_prob})

    def train(self, x_data, y_data, keep_prob=0.7):
        return self.sess.run([self.cost, self.optimizer], feed_dict={
            self.X: x_data, self.Y: y_data, self.keep_prob: keep_prob})

def main():
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

    training_epochs = 15
    batch_size = 100

    # Initialize
    sess = tf.Session()
    cnn = CNN(sess, "cnn", learning_rate=0.001)

    sess.run(tf.global_variables_initializer())

    accs = []
    # Train
    for epoch in range(training_epochs):
        total_batch = int(mnist.train.num_examples / batch_size)

        for i in range(total_batch):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            c, _ = cnn.train(batch_xs, batch_ys)

        acc = cnn.test(mnist.test.images, mnist.test.labels)
        accs.append(acc)
        print("{0:2d} Accuracy: {1:.4f}".format(epoch + 1, acc))

    # Test
    fAcc = cnn.test(mnist.test.images, mnist.test.labels)
    print("Final accuracy: {0:.4f}".format(accs[-1]))

    steps = [i for i in range(training_epochs)]

    plt.plot(steps, accs)
    plt.grid()
    plt.xlabel("epoch")
    plt.ylabel("accuracy")
    plt.title("CNN MNIST Training with TensorFlow")
    plt.show()

if __name__ == "__main__":
    main()
Extracting MNIST_data/train-images-idx3-ubyte.gz
Extracting MNIST_data/train-labels-idx1-ubyte.gz
Extracting MNIST_data/t10k-images-idx3-ubyte.gz
Extracting MNIST_data/t10k-labels-idx1-ubyte.gz

Name    : conv1
Filter  : (3, 3, 1, 32)
Conv    : (?, 28, 28, 32)
ReLU    : (?, 28, 28, 32)
Pool    : (?, 14, 14, 32)
DropOut : (?, 14, 14, 32)

Name    : conv2
Filter  : (3, 3, 32, 64)
Conv    : (?, 14, 14, 64)
ReLU    : (?, 14, 14, 64)
Pool    : (?, 7, 7, 64)
DropOut : (?, 7, 7, 64)

Name    : conv3
Filter  : (3, 3, 64, 128)
Conv    : (?, 7, 7, 128)
ReLU    : (?, 7, 7, 128)
Pool    : (?, 4, 4, 128)
DropOut : (?, 4, 4, 128)
Flat    : (?, 2048)

Name    : fc1
Weight  : (2048, 625)
Bias    : (625,)
ReLU    : (?, 625)
DropOut : (?, 625)

Name    : fc2
Weight  : (625, 10)
Bias    : (10,)
Inf     : (?, 10)

 1 Accuracy: 0.9771
 2 Accuracy: 0.9859
 3 Accuracy: 0.9859
 4 Accuracy: 0.9896
 5 Accuracy: 0.9918
 6 Accuracy: 0.9917
 7 Accuracy: 0.9926
 8 Accuracy: 0.9914
 9 Accuracy: 0.9913
10 Accuracy: 0.9933
11 Accuracy: 0.9939
12 Accuracy: 0.9935
13 Accuracy: 0.9939
14 Accuracy: 0.9943
15 Accuracy: 0.9936
Final accuracy: 0.9936
Image 1. CNN MNIST training with TensorFlow

MNIST with CNN Layer from TensorFlow

  • Currently, TensorFlow provides high level APIs. The APIs are brought in layers package. The next example shows how to use layers package for MNIST training.
import tensorflow as tf
import matplotlib.pyplot as plt

from tensorflow.examples.tutorials.mnist import input_data

# Reproducibility
tf.set_random_seed(777)

class CRPDF_L:
    """ Convolution ReLU Pooling DropOut Flat with Layer """
    def __init__(self, name, input_layer, filters, kernel_size, pool_size,
                 strides, dropRate, training, padding="SAME",
                 activation=tf.nn.relu, isFlat=False):
        self.name = name
        self.isFlat = isFlat
        self.conv = tf.layers.conv2d(inputs=input_layer, filters=32,
                                     kernel_size=kernel_size, padding=padding,
                                     activation=activation)
        self.pool = tf.layers.max_pooling2d(inputs=self.conv, pool_size=[2,2],
                                            padding=padding, strides=strides)
        self.drop = tf.layers.dropout(inputs=self.pool, rate=dropRate,
                                      training=training)

        if self.isFlat:
            _, width, height, channels = self.drop.shape
            reSize = int(width) * int(height) * int(channels)
            self.flat = tf.reshape(self.drop, [-1, reSize])

    def output_layer(self):
        _out = None

        if self.isFlat:
            _out = self.flat
        else:
            _out = self.drop

        return _out

    def arch(self):
        print("Name    : {0}".format(self.name))
        print("Conv    : {0}".format(self.conv.shape))
        print("Pool    : {0}".format(self.pool.shape))
        print("DropOut : {0}".format(self.drop.shape))

        if self.isFlat:
            print("Flat    : {0}".format(self.flat.shape))

        print("")

class FC_L:
    """ Fully Connected """
    def __init__(self, name, input_layer, units, dropRate=None, training=None,
                 activation=tf.nn.relu, isLast=False):
        self.name = name
        self.isLast = isLast

        if self.isLast:
            self.logit = tf.layers.dense(inputs=input_layer, units=units)
        else:
            self.dense = tf.layers.dense(inputs=input_layer, units=units,
                                     activation=activation)
            self.drop = tf.layers.dropout(inputs=self.dense, rate=dropRate,
                                          training=training)

    def output_layer(self):
        _out = None
        if self.isLast:
            _out = self.logit
        else:
            _out = self.drop
        return _out

    def arch(self):
        print("Name    : {0}".format(self.name))
        if self.isLast:
            print("Inf     : {0}".format(self.logit.shape))
        else:
            print("ReLU    : {0}".format(self.dense.shape))
            print("DropOut : {0}".format(self.drop.shape))

        print("")

class CNN:
    def __init__(self, sess, name, learning_rate=0.01):
        self.sess = sess
        self.name = name
        self.learning_rate = learning_rate
        self._build_net()

    def _build_net(self):
        with tf.variable_scope(self.name):
            # Probability for DropOut
            self.training = tf.placeholder(tf.bool)
            dropRate = 0.7

            # Placeholders for Input and Output
            self.X = tf.placeholder(tf.float32, [None, 784])
            self.Y = tf.placeholder(tf.float32, [None, 10])

            # Image 28x28x1 (black/white)
            inImg = tf.reshape(self.X, [-1, 28, 28, 1])

            layer1 = CRPDF_L(name="conv1", input_layer=inImg,
                             filters=32, kernel_size=[3,3], strides=2,
                             pool_size=[2,2], dropRate=0.7,
                             training=self.training)
            layer1.arch()
            L1 = layer1.output_layer()

            layer2 = CRPDF_L(name="conv2", input_layer=L1,
                             filters=64, kernel_size=[3,3], strides=2,
                             pool_size=[2, 2], dropRate=0.7,
                             training=self.training)

            layer2.arch()
            L2 = layer2.output_layer()

            layer3 = CRPDF_L(name="conv3", input_layer=L2,
                             filters=128, kernel_size=[3,3], strides=2,
                             pool_size=[2, 2], dropRate=0.7,
                             training=self.training, isFlat=True)

            layer3.arch()
            L3 = layer3.output_layer()

            layer4 = FC_L(name="fc1", input_layer=L3, units=625,
                          dropRate=0.7, training=self.training)
            layer4.arch()
            L4 =layer4.output_layer()

            layer5 = FC_L(name="fc2", input_layer=L4, units=10,
                          isLast=True)
            layer5.arch()
            logit = layer5.output_layer()

        # define cost/loss & optimizer
        self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
            logits=logit, labels=self.Y))
        self.optimizer = tf.train.AdamOptimizer(
            learning_rate=self.learning_rate).minimize(self.cost)

        predict = tf.equal(
            tf.argmax(logit, 1), tf.argmax(self.Y, 1))
        self.accuracy = tf.reduce_mean(tf.cast(predict, tf.float32))

    def test(self, x_test, y_test, training=False):
        return self.sess.run(self.accuracy, feed_dict={
            self.X: x_test, self.Y: y_test, self.training: training})

    def train(self, x_data, y_data, training=True):
        return self.sess.run([self.cost, self.optimizer], feed_dict={
            self.X: x_data, self.Y: y_data, self.training: training})

def main():
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

    training_epochs = 15
    batch_size = 100

    # Initialize
    sess = tf.Session()
    cnn = CNN(sess, "cnn", learning_rate=0.001)

    sess.run(tf.global_variables_initializer())

    accs = []
    # Train
    for epoch in range(training_epochs):
        total_batch = int(mnist.train.num_examples / batch_size)

        for i in range(total_batch):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            c, _ = cnn.train(batch_xs, batch_ys)

        acc = cnn.test(mnist.test.images, mnist.test.labels)
        accs.append(acc)
        print("{0:2d} Accuracy: {1:.4f}".format(epoch + 1, acc))

    # Test
    print("Final accuracy: {0:.4f}".format(accs[-1]))

    steps = [i for i in range(training_epochs)]

    plt.plot(steps, accs)
    plt.grid()
    plt.xlabel("epoch")
    plt.ylabel("accuracy")
    plt.title("CNN MNIST Training with TensorFlow")
    plt.show()

if __name__ == "__main__":
    main()
Extracting MNIST_data/train-images-idx3-ubyte.gz
Extracting MNIST_data/train-labels-idx1-ubyte.gz
Extracting MNIST_data/t10k-images-idx3-ubyte.gz
Extracting MNIST_data/t10k-labels-idx1-ubyte.gz

Name    : conv1
Conv    : (?, 28, 28, 32)
Pool    : (?, 14, 14, 32)
DropOut : (?, 14, 14, 32)

Name    : conv2
Conv    : (?, 14, 14, 32)
Pool    : (?, 7, 7, 32)
DropOut : (?, 7, 7, 32)

Name    : conv3
Conv    : (?, 7, 7, 32)
Pool    : (?, 4, 4, 32)
DropOut : (?, 4, 4, 32)
Flat    : (?, 512)

Name    : fc1
ReLU    : (?, 625)
DropOut : (?, 625)

Name    : fc2
Inf     : (?, 10)

 1 Accuracy: 0.9206
 2 Accuracy: 0.9547
 3 Accuracy: 0.9644
 4 Accuracy: 0.9677
 5 Accuracy: 0.9714
 6 Accuracy: 0.9705
 7 Accuracy: 0.9737
 8 Accuracy: 0.9761
 9 Accuracy: 0.9774
10 Accuracy: 0.9783
11 Accuracy: 0.9789
12 Accuracy: 0.9794
13 Accuracy: 0.9792
14 Accuracy: 0.9788
15 Accuracy: 0.9805
Final accuracy: 0.9805
Image 2. CNN MNIST training with TensorFlow layers

MNIST with CNN Ensemble with TensorFlow

  • with CNN class, it is easy to apply ensemble technique.
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np

from tensorflow.examples.tutorials.mnist import input_data

# Reproducibility
tf.set_random_seed(777)

class CRPDF_L:
    """ Convolution ReLU Pooling DropOut Flat with Layer """
    def __init__(self, name, input_layer, filters, kernel_size, pool_size,
                 strides, dropRate, training, padding="SAME",
                 activation=tf.nn.relu, isFlat=False):
        self.name = name
        self.isFlat = isFlat
        self.conv = tf.layers.conv2d(inputs=input_layer, filters=32,
                                     kernel_size=kernel_size, padding=padding,
                                     activation=activation)
        self.pool = tf.layers.max_pooling2d(inputs=self.conv, pool_size=[2,2],
                                            padding=padding, strides=strides)
        self.drop = tf.layers.dropout(inputs=self.pool, rate=dropRate,
                                      training=training)

        if self.isFlat:
            _, width, height, channels = self.drop.shape
            reSize = int(width) * int(height) * int(channels)
            self.flat = tf.reshape(self.drop, [-1, reSize])
            print("Flat: {0}".format(self.flat.shape))

    def output_layer(self):
        _out = None

        if self.isFlat:
            _out = self.flat
        else:
            _out = self.drop

        return _out

    def arch(self):
        print("Name    : {0}".format(self.name))
        print("Conv    : {0}".format(self.conv.shape))
        print("Pool    : {0}".format(self.pool.shape))
        print("DropOut : {0}".format(self.drop.shape))

        print("")

class FC_L:
    """ Fully Connected """
    def __init__(self, name, input_layer, units, dropRate=None, training=None,
                 activation=tf.nn.relu, isLast=False):
        self.name = name
        self.isLast = isLast

        if self.isLast:
            self.logit = tf.layers.dense(inputs=input_layer, units=units)
        else:
            self.dense = tf.layers.dense(inputs=input_layer, units=units,
                                     activation=activation)
            self.drop = tf.layers.dropout(inputs=self.dense, rate=dropRate,
                                          training=training)

    def output_layer(self):
        _out = None
        if self.isLast:
            _out = self.logit
        else:
            _out = self.drop
        return _out

    def arch(self):
        print("Name    : {0}".format(self.name))
        if self.isLast:
            print("Inf     : {0}".format(self.logit.shape))
        else:
            print("ReLU    : {0}".format(self.dense.shape))
            print("DropOut : {0}".format(self.drop.shape))

        print("")

class CNN:
    def __init__(self, sess, name, learning_rate=0.01):
        self.sess = sess
        self.name = name
        self.learning_rate = learning_rate
        self._build_net()

    def _build_net(self):
        with tf.variable_scope(self.name):
            # Probability for DropOut
            self.training = tf.placeholder(tf.bool)
            dropRate = 0.7

            # Placeholders for Input and Output
            self.X = tf.placeholder(tf.float32, [None, 784])
            self.Y = tf.placeholder(tf.float32, [None, 10])

            # Image 28x28x1 (black/white)
            inImg = tf.reshape(self.X, [-1, 28, 28, 1])

            layer1 = CRPDF_L(name="{0}.conv1".format(self.name),
                             input_layer=inImg, filters=32, kernel_size=[3,3],
                             strides=2, pool_size=[2,2], dropRate=0.7,
                             training=self.training)
            layer1.arch()
            L1 = layer1.output_layer()

            layer2 = CRPDF_L(name="{0}.conv2".format(self.name),
                             input_layer=L1, filters=64, kernel_size=[3,3],
                             strides=2, pool_size=[2, 2], dropRate=0.7,
                             training=self.training)

            layer2.arch()
            L2 = layer2.output_layer()

            layer3 = CRPDF_L(name="{0}.conv3".format(self.name),
                             input_layer=L2, filters=128, kernel_size=[3,3],
                             strides=2, pool_size=[2, 2], dropRate=0.7,
                             training=self.training, isFlat=True)

            layer3.arch()
            L3 = layer3.output_layer()

            layer4 = FC_L(name="{0}.fc1".format(self.name), input_layer=L3,
                          units=625, dropRate=0.7, training=self.training)
            layer4.arch()
            L4 =layer4.output_layer()

            layer5 = FC_L(name="{0}.fc2".format(self.name), input_layer=L4,
                          units=10, isLast=True)
            layer5.arch()
            self.logit = layer5.output_layer()

        # define cost/loss & optimizer
        self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
            logits=self.logit, labels=self.Y))
        self.optimizer = tf.train.AdamOptimizer(
            learning_rate=self.learning_rate).minimize(self.cost)

        predict = tf.equal(
            tf.argmax(self.logit, 1), tf.argmax(self.Y, 1))
        self.accuracy = tf.reduce_mean(tf.cast(predict, tf.float32))

    def predict(self, x_test, training=False):
        return self.sess.run(self.logit, feed_dict={self.X: x_test,
                                                    self.training: training})

    def test(self, x_test, y_test, training=False):
        return self.sess.run(self.accuracy, feed_dict={
            self.X: x_test, self.Y: y_test, self.training: training})

    def train(self, x_data, y_data, training=True):
        return self.sess.run([self.cost, self.optimizer], feed_dict={
            self.X: x_data, self.Y: y_data, self.training: training})

def en_accuracy(cnns, mnist):
    test_size = len(mnist.test.labels)
    predictions = np.zeros(test_size * 10).reshape(test_size, 10)
    for idx, cnn in enumerate(cnns):
        p = cnn.predict(mnist.test.images)
        predictions += p
    ensemble_prediction = tf.equal(
        tf.argmax(predictions, 1), tf.argmax(mnist.test.labels, 1))
    ensemble_accuracy = tf.reduce_mean(
        tf.cast(ensemble_prediction, tf.float32))

    return ensemble_accuracy

def main():
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

    training_epochs = 15
    batch_size = 100
    nr_cnn = 5

    # Initialize
    sess = tf.Session()

    cnns =[]
    for i in range(nr_cnn):
        cnn = CNN(sess, "cnn_{0}".format(i), learning_rate=0.001)
        cnns.append(cnn)

    sess.run(tf.global_variables_initializer())

    accs = []
    # Train
    for epoch in range(training_epochs):
        total_batch = int(mnist.train.num_examples / batch_size)

        for i in range(total_batch):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)

            # Train multi CNN
            for idx, cnn in enumerate(cnns):
                c, _ = cnn.train(batch_xs, batch_ys)

        # Test
        acc = sess.run(en_accuracy(cnns, mnist))
        accs.append(acc)
        print("{0:2d} Accuracy: {1:.4f}".format(epoch + 1, acc))

    print("Ensemble accuracy: {0:.4f}".format(accs[-1]))

    steps = [i for i in range(training_epochs)]

    plt.plot(steps, accs)
    plt.grid()
    plt.xlabel("epoch")
    plt.ylabel("accuracy")
    plt.title("Ensemble CNN MNIST Training with TensorFlow")
    plt.show()

if __name__ == "__main__":
    main()
Extracting MNIST_data/train-images-idx3-ubyte.gz
Extracting MNIST_data/train-labels-idx1-ubyte.gz
Extracting MNIST_data/t10k-images-idx3-ubyte.gz
Extracting MNIST_data/t10k-labels-idx1-ubyte.gz

Name    : cnn_0.conv1
Conv    : (?, 28, 28, 32)
Pool    : (?, 14, 14, 32)
DropOut : (?, 14, 14, 32)

Name    : cnn_0.conv2
Conv    : (?, 14, 14, 32)
Pool    : (?, 7, 7, 32)
DropOut : (?, 7, 7, 32)

Flat: (?, 512)
Name    : cnn_0.conv3
Conv    : (?, 7, 7, 32)
Pool    : (?, 4, 4, 32)
DropOut : (?, 4, 4, 32)

Name    : cnn_0.fc1
ReLU    : (?, 625)
DropOut : (?, 625)

Name    : cnn_0.fc2
Inf     : (?, 10)

Name    : cnn_1.conv1
Conv    : (?, 28, 28, 32)
Pool    : (?, 14, 14, 32)
DropOut : (?, 14, 14, 32)

Name    : cnn_1.conv2
Conv    : (?, 14, 14, 32)
Pool    : (?, 7, 7, 32)
DropOut : (?, 7, 7, 32)

Flat: (?, 512)
Name    : cnn_1.conv3
Conv    : (?, 7, 7, 32)
Pool    : (?, 4, 4, 32)
DropOut : (?, 4, 4, 32)

Name    : cnn_1.fc1
ReLU    : (?, 625)
DropOut : (?, 625)

Name    : cnn_1.fc2
Inf     : (?, 10)

Name    : cnn_2.conv1
Conv    : (?, 28, 28, 32)
Pool    : (?, 14, 14, 32)
DropOut : (?, 14, 14, 32)

Name    : cnn_2.conv2
Conv    : (?, 14, 14, 32)
Pool    : (?, 7, 7, 32)
DropOut : (?, 7, 7, 32)

Flat: (?, 512)
Name    : cnn_2.conv3
Conv    : (?, 7, 7, 32)
Pool    : (?, 4, 4, 32)
DropOut : (?, 4, 4, 32)

Name    : cnn_2.fc1
ReLU    : (?, 625)
DropOut : (?, 625)

Name    : cnn_2.fc2
Inf     : (?, 10)

Name    : cnn_3.conv1
Conv    : (?, 28, 28, 32)
Pool    : (?, 14, 14, 32)
DropOut : (?, 14, 14, 32)

Name    : cnn_3.conv2
Conv    : (?, 14, 14, 32)
Pool    : (?, 7, 7, 32)
DropOut : (?, 7, 7, 32)

Flat: (?, 512)
Name    : cnn_3.conv3
Conv    : (?, 7, 7, 32)
Pool    : (?, 4, 4, 32)
DropOut : (?, 4, 4, 32)

Name    : cnn_3.fc1
ReLU    : (?, 625)
DropOut : (?, 625)

Name    : cnn_3.fc2
Inf     : (?, 10)

Name    : cnn_4.conv1
Conv    : (?, 28, 28, 32)
Pool    : (?, 14, 14, 32)
DropOut : (?, 14, 14, 32)

Name    : cnn_4.conv2
Conv    : (?, 14, 14, 32)
Pool    : (?, 7, 7, 32)
DropOut : (?, 7, 7, 32)

Flat: (?, 512)
Name    : cnn_4.conv3
Conv    : (?, 7, 7, 32)
Pool    : (?, 4, 4, 32)
DropOut : (?, 4, 4, 32)

Name    : cnn_4.fc1
ReLU    : (?, 625)
DropOut : (?, 625)

Name    : cnn_4.fc2
Inf     : (?, 10)

 1 Accuracy: 0.9318
 2 Accuracy: 0.9588
 3 Accuracy: 0.9661
 4 Accuracy: 0.9704
 5 Accuracy: 0.9723
 6 Accuracy: 0.9744
 7 Accuracy: 0.9766
 8 Accuracy: 0.9763
 9 Accuracy: 0.9774
10 Accuracy: 0.9788
11 Accuracy: 0.9785
12 Accuracy: 0.9795
13 Accuracy: 0.9796
14 Accuracy: 0.9798
15 Accuracy: 0.9809
Ensemble accuracy: 0.9809
Image 3. CNN MNIST training with Ensemble

COMMENTS

Name

0 weights,1,abstract class,1,active function,3,adam,2,Adapter,1,affine,2,argmax,1,back propagation,3,binary classification,3,blog,2,Bucket list,1,C++,11,Casting,1,cee,1,checkButton,1,cnn,3,col2im,1,columnspan,1,comboBox,1,concrete class,1,convolution,2,cost function,6,data preprocessing,2,data set,1,deep learning,31,Design Pattern,12,DIP,1,django,1,dnn,2,Don't Repeat Your code,1,drop out,2,ensemble,2,epoch,2,favicon,1,fcn,1,frame,1,gradient descent,5,gru,1,he,1,identify function,1,im2col,1,initialization,1,Lab,9,learning rate,2,LifeLog,1,linear regression,6,logistic function,1,logistic regression,3,logit,3,LSP,1,lstm,1,machine learning,31,matplotlib,1,menu,1,message box,1,mnist,3,mse,1,multinomial classification,3,mutli layer neural network,1,Non Virtual Interface,1,normalization,2,Note,21,numpy,4,one-hot encoding,3,OOP Principles,2,Open Close Principle,1,optimization,1,overfitting,1,padding,2,partial derivative,2,pooling,2,Prototype,1,pure virtual function,1,queue runner,1,radioButton,1,RBM,1,regularization,1,relu,2,reshape,1,restricted boltzmann machine,1,rnn,2,scrolledText,1,sigmoid,2,sigmoid function,1,single layer neural network,1,softmax,6,softmax classification,3,softmax cross entropy with logits,1,softmax function,2,softmax regression,3,softmax-with-loss,2,spinBox,1,SRP,1,standardization,1,sticky,1,stride,1,tab,1,Template Method,1,TensorFlow,31,testing data,1,this,2,tkinter,5,tooltip,1,Toplevel,1,training data,1,vanishing gradient,1,Virtual Copy Constructor,1,Virtual Destructor,1,Virtual Function,1,weight decay,1,xavier,2,xor,3,
ltr
item
Universe In Computer: 29. CNN with TensorFlow
29. CNN with TensorFlow
CNN implementation with TensorFlow
https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiE9QfIQg9MqxmXv8wo1jRHrMgva3N0n9uaoJIHiM44Vt8k6nlufCwcOrXM4piATO-QqQmLgh_JEZUv2KXJVRIATvdu0xwckn-JPaRyfJpu9tFP929dbQgKHcd0zfVFfe9EjSkH18A4MxU4/s0/
https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiE9QfIQg9MqxmXv8wo1jRHrMgva3N0n9uaoJIHiM44Vt8k6nlufCwcOrXM4piATO-QqQmLgh_JEZUv2KXJVRIATvdu0xwckn-JPaRyfJpu9tFP929dbQgKHcd0zfVFfe9EjSkH18A4MxU4/s72-c/
Universe In Computer
https://kunicom.blogspot.com/2017/08/29-cnn-with-tensorflow.html
https://kunicom.blogspot.com/
https://kunicom.blogspot.com/
https://kunicom.blogspot.com/2017/08/29-cnn-with-tensorflow.html
true
2543631451419919204
UTF-8
Loaded All Posts Not found any posts VIEW ALL Readmore Reply Cancel reply Delete By Home PAGES POSTS View All RECOMMENDED FOR YOU LABEL ARCHIVE SEARCH ALL POSTS Not found any post match with your request Back Home Sunday Monday Tuesday Wednesday Thursday Friday Saturday Sun Mon Tue Wed Thu Fri Sat January February March April May June July August September October November December Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec just now 1 minute ago $$1$$ minutes ago 1 hour ago $$1$$ hours ago Yesterday $$1$$ days ago $$1$$ weeks ago more than 5 weeks ago Followers Follow THIS CONTENT IS PREMIUM Please share to unlock Copy All Code Select All Code All codes were copied to your clipboard Can not copy the codes / texts, please press [CTRL]+[C] (or CMD+C with Mac) to copy