Tensorflow学习笔记(二) Toy Demo

Posted by SkyHigh on October 14, 2016

Previous

1.Tensorflow学习笔记(一) 基础

Tensorflow学习笔记(二) Toy Demo

本节主要是以下三个内容:

  • MNIST数据集
    • 单隐层神经网络
    • CNN
    • RNN
  • IRIS数据集
    • DNN

几个demo都比较简单,代码给了注释,可以对比官网tutorial来自己手动实现。

一、单隐层神经网络

这一节我们使用一个单隐层的神经网络,将输入直接通过Softmax层得到输出。

# 单隐层神经网络
def simpleMNIST():
    from tensorflow.examples.tutorials.mnist import input_data
    # 导入MNIST
    mnist = input_data.read_data_sets('./data', one_hot=True)

    # 输入数据,x为图像,y为标签,shape里的None表示任意值
    x = tf.placeholder(dtype=tf.float32, shape=(None, 784), name='x')
    y = tf.placeholder(dtype=tf.float32, shape=(None, 10), name='y')

    # 定义变量
    with tf.variable_scope('MNIST'):
        # softmax层的变量:W和b
        W = tf.get_variable(name='weights', shape=(784, 10), dtype=tf.float32, initializer=tf.random_normal_initializer())
        b = tf.get_variable(name='bias', shape=(10, ), dtype=tf.float32, initializer=tf.constant_initializer())
        # softmax层的输入z值
        z1 = tf.matmul(x,W) + b
        # softmax层的输出
        y_pred = tf.nn.softmax(z1)
        # 交叉熵损失函数
        ce = tf.reduce_mean(-tf.reduce_sum(y*tf.log(y_pred), reduction_indices=[1]))
        # 或者可以直接调用已有的
        # ce = tf.nn.softmax_cross_entropy_with_logits(z1, y)
        # 优化器,这里使用梯度下降优化器
        opt = tf.train.GradientDescentOptimizer(0.5).minimize(ce)

    # 建立一个会话
    with tf.Session() as sess:
        # 初始化变量
        sess.run(tf.initialize_all_variables())
        # 准确率
        precision = tf.equal(tf.argmax(y, 1), tf.argmax(y_pred, 1))
        precision = tf.reduce_mean(tf.cast(precision,tf.float32))

        print 'Training...'
        for i in range(100):
            # 获得批量数据,大小为100
            x_batch, y_batch = mnist.train.next_batch(100)
            _, accuracy = sess.run([opt, precision], feed_dict={x:x_batch, y:y_batch})
            # print accuracy

        # 运行测试准确率,这时候不用调用优化器,因为已经不训练了
        accuracy = sess.run([precision], feed_dict={x:mnist.test.images, y:mnist.test.labels})
        print 'Test:'
        print accuracy

二、卷积神经网络CNN

使用简单的CNN来实现数字手写体识别分类,注释已经在代码中。

# CNN
def CNNMNIST():
    from tensorflow.examples.tutorials.mnist import input_data
    # 载入MNIST数据
    mnist = input_data.read_data_sets('./data', one_hot=True)

    # 设定参数
    filter_h = 5
    filter_w = 5
    pool_h = 2
    pool_w = 2
    n_samples = 10
    batch_size = 50
    epochs = 5000
    kp = 0.5

    # 输入变量,x为图像,y为标签
    x = tf.placeholder(dtype=tf.float32, shape=(None, 784), name='x')
    y = tf.placeholder(dtype=tf.float32, shape=(None, 10), name='y')

    # 创建权重变量的函数
    def weight_variable(shape):
        initial = tf.truncated_normal(shape, 0., 0.1)
        return tf.Variable(initial)

    # 创建偏置变量的函数
    def bias_variable(shape):
        initial = tf.constant(.1, shape=shape)
        return tf.Variable(initial)

    #
    def conv2d(x, W):
        # input:即输入数据,形状为[batch_size, height, width, channel]
        # filter:即过滤器/模板,形状为[filter_height, filter_width, channel_in, channel_out]
        # strides:即步长,对应为[数据步长(对应batch_size),水平移动步长,垂直移动步长,频道步长]
        # padding:边缘填充方法:SAME或VALID,其中SAME表示输出的图像大小不变,VALID输出图像大小在移动步长为1时为(height-filter_height+1)*(width-filter_width+1)
        # 返回值:shape为[batch_size, out_height, out_width, filter_height * filter_width * in_channels]
        return tf.nn.conv2d(input=x, filter=W, strides=[1,1,1,1], padding='SAME')

    # 池化层,用的最大池化
    def max_pool_2X2(x):
        # value:上同input
        # ksize:表示输入x的各个维度的窗口大小
        # strides:上同
        # padding:上同
        return tf.nn.max_pool(value=x, ksize=[1,pool_h,pool_w,1],strides=[1,2,2,1],padding='SAME')

    # 创建变量
    with tf.variable_scope('CNNMNIST'):
        # 第一层变量, filter_h*filter_w为过滤器/模板大小,1为输入频道数,32为输出频道数
        W_conv1 = weight_variable([filter_h, filter_w, 1, 32])
        b_conv1 = bias_variable([32])

        # -1表示任意值,28*28表示图像高和宽,1表示通道数为1
        x_image = tf.reshape(x, [-1, 28, 28, 1])

        # 第一层输出
        # 经过卷积后,由于用的padding是SAME,所以图像大小没变
        # 经过最大池化后,图像变为14*14
        h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)

        h_pool1 = max_pool_2X2(h_conv1)

        # 第二层变量
        W_conv2 = weight_variable([filter_h, filter_w, 32, 64])
        b_conv2 = bias_variable([64])

        # 第二层输出
        # 经过卷积后,由于用的padding是SAME,所以图像大小没变
        # 经过最大池化后,图像变为7*7
        h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)

        h_pool2 = max_pool_2X2(h_conv2)

        # 第一个全连接层,用的relu
        # 共64个频道,每个频道图像大小为7*7
        # 我们的全连接层有1024个神经元,这个自己可以设定
        W_fc1 = weight_variable([7*7*64, 1024])
        b_fc1 = weight_variable([1024])

        # 第一个全连接层输出
        h_poo2_flatten = tf.reshape(h_pool2, [-1, 7*7*64])
        h_fc1 = tf.nn.relu(tf.matmul(h_poo2_flatten, W_fc1)+b_fc1)

        # 在softmax层之前加入dropout,即弃权(keep_prob=0.5时,有512个输出保留,其余设置为0)
        # h_fc1_dropout大小和h_fc1一样
        keep_prob = tf.placeholder(tf.float32)
        h_fc1_dropout = tf.nn.dropout(h_fc1, keep_prob=keep_prob)

        # 第二个全连接层,用的softmax层
        W_fc2 = weight_variable([1024, n_samples])
        b_fc2 = weight_variable([n_samples])

        # 第二个全连接层输出
        h_fc2 = tf.matmul(h_fc1_dropout, W_fc2) + b_fc2

        # 获得样本的平均交叉熵
        loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(h_fc2, y))

        # Adam算法求最优解,设置初始学习率为0.0001
        opt = tf.train.AdamOptimizer(1e-4).minimize(loss)

    # 建立会话
    with tf.Session() as sess:
        # 初始化变量
        sess.run(tf.initialize_all_variables())
        # 准确率
        precision = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(y, 1), tf.argmax(h_fc2, 1)), tf.float32))
        for i in range(epochs):
            # 批量数据,大小为50
            x_batch, y_batch = mnist.train.next_batch(batch_size)
            _, p = sess.run([opt, precision], feed_dict={x:x_batch, y:y_batch, keep_prob:kp})
            print p

        # 测试,注意测试的时候弃权值为0.,即keep_prob=1.
        accuracy = sess.run([precision], feed_dict={x:mnist.test.images, y:mnist.test.labels, keep_prob:1.})
        print 'Test:'
        print accuracy

三、循环神经网络RNN

使用的是单层RNN,没有双向、多层、Attention等这些东西。
使用截断BP(Truncated Backpropagation),即unrolled RNN是固定的num step的。

# 单层RNN网络
def RNNMNIST():
    from tensorflow.python.ops import rnn, rnn_cell
    from tensorflow.examples.tutorials.mnist import input_data
    # 导入MNIST数据集
    mnist = input_data.read_data_sets('./data', one_hot=True)

    # 循环次数
    hm_epoch = 3
    # 输出类别数
    n_classes = 10
    # 批量数据的大小
    batch_size = 128
    # 一副图片的列数,类比于embedding_size
    chunk_size = 28
    # 一副图片的行数,等于时间节点的个数(t从1到28)
    n_chunks = 28   # 等价于num_steps
    # rnn层神经单元数
    rnn_size = 200

    # 输入,x为图片,y为标签
    x = tf.placeholder(tf.float32, [None, 28, 28])
    y = tf.placeholder(tf.float32, [None, 10])

    def recurrent_neural_network(x):
        # 设置变量空间"output"
        with tf.variable_scope('output'):
            # 设置输出层的权重和偏置,一般用softmax层
            layer = {'weight':tf.get_variable('weight', [rnn_size, n_classes], tf.float32, \
                    tf.random_normal_initializer(0., 0.1)),
                     'bias':tf.get_variable('bias', [n_classes], tf.float32, \
                    tf.random_normal_initializer(0., 0.1))
            }

            # 把x转成[n_chunks, batch_size, chunk_size]形式
            # 即该RNN单元共n_chunks个时间步骤,每个时间步骤输入[batch_size, chunk_size]的数据
            x = tf.transpose(x, [1,0,2])
            x = tf.reshape(x, [-1, chunk_size])
            x = tf.split(0, n_chunks, x)

            # 将RNN单元设置为LSTM单元
            lstm_cell = rnn_cell.BasicLSTMCell(rnn_size, state_is_tuple=True)
            # 也可以使用一般的RNN单元
            general_cell = rnn_cell.BasicRNNCell(rnn_size)
            # 也可以使用GRU单元
            gru_cell = rnn_cell.GRUCell(rnn_size)

            # 看各个Cell的源码可知:
            # 所有的Cell的ouputs为每个时间步骤的输出,states为最后一个单元的状态
            # LSTM的states为c和h的组合
            # GeneralRNN和GRU的states==outputs[-1]
            outputs, states = rnn.rnn(lstm_cell, x, None, tf.float32)
            # outputs, states = rnn.rnn(general_cell, x, None, tf.float32)
            # outputs, states = rnn.rnn(gru_cell, x, None, tf.float32)

            # 得到最终的output,shape为[batch_size, n_classes]
            output = tf.matmul(outputs[-1], layer['weight']) + layer['bias']
            return output

    def train_rnn(x, y):
        # 预测结果
        prediction = recurrent_neural_network(x)
        # 和真实结果比较,得到损失值
        cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(prediction, y))
        # 优化损失函数的优化器,这里使用Adam算法
        opt = tf.train.AdamOptimizer(1e-4).minimize(cost)

        # 打开一个回话
        with tf.Session() as sess:
            # 初始化所有变量
            sess.run(tf.initialize_all_variables())

            # 循环
            for epoch in range(hm_epoch):
                epoch_loss = 0
                # 每循环一次就跑遍整个数据集
                for _ in range(int(mnist.train.num_examples)/batch_size):
                    # 获取批量数据
                    epoch_x, epoch_y = mnist.train.next_batch(batch_size)
                    # 调整为[batch_size, n_chunks, chunk_size]
                    epoch_x = epoch_x.reshape([-1, n_chunks, chunk_size])
                    # 运行计算图
                    p1, _, c = sess.run([prediction, opt, cost], feed_dict={x:epoch_x, y:epoch_y})
                    # 累加损失
                    epoch_loss += c
                # 输出当前循环和损失
                print 'epoch:%d, loss:%f' % (epoch, epoch_loss)

            res = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
            # 给出准确率
            accuracy = tf.reduce_mean(tf.cast(res, tf.float32))
            print 'Accuracy:', accuracy.eval({x:mnist.test.images.reshape(-1, n_chunks, chunk_size), y:mnist.test.labels})

    # 训练\测试RNN网络
    train_rnn(x, y)

四、深度神经网络DNN

其实就是‘一’的扩展,直接给出代码。

# -*- encoding:utf-8 -*-

# 使用深度神经网络对鸢尾花进行分类

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
import numpy as np
# 鸢尾花数据集
IRIS_TRAINING = "data/iris_training.csv"
IRIS_TEST = "data/iris_test.csv"

# 载入数据集
training_set = tf.contrib.learn.datasets.base.load_csv(filename=IRIS_TRAINING,
                                                       target_dtype=np.int)
test_set = tf.contrib.learn.datasets.base.load_csv(filename=IRIS_TEST,
                                                   target_dtype=np.int)

# 设定输入数据维度
feature_columns = [tf.contrib.layers.real_valued_column("", dimension=4)]
print(feature_columns)

# 3层DNN,隐藏层神经元数量分别为[10,20,10]
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
                                            hidden_units=[10, 20, 10],
                                            n_classes=3,
                                            model_dir="./iris_model")

# 模型训练
classifier.fit(x=training_set.data,
               y=training_set.target,
               steps=2000)

# 评估准确度
accuracy_score = classifier.evaluate(x=test_set.data,
                                     y=test_set.target)["accuracy"]
print('Accuracy: {0:f}'.format(accuracy_score))

# 分类两个例子
new_samples = np.array(
    [[6.4, 3.2, 4.5, 1.5], [5.8, 3.1, 5.0, 1.7]], dtype=np.float32)
y = classifier.predict(new_samples)
print('Predictions: {}'.format(str(y)))

参考