TensorFlow在MNIST数据集上实现神经网络

  • Post author:
  • Post category:其他


MNIST_UPDATE_inference.py代码如下:
# -*- coding: utf-8 -*-
import tensorflow as tf
tf.reset_default_graph()
INPUT_NODE=784
LAYER1_NODE=500
OUTPUT_NODE=10

def get_weight_variable(shape,regularizer=None):
    weights=tf.get_variable("weights",shape,initializer=tf.truncated_normal_initializer(stddev=0.1))
    if regularizer != None:
        tf.add_to_collection('losses',regularizer(weights))
    return weights

def inference(input_tensor,regularizer):
    with tf.variable_scope('layer1'):
        
        weights=get_weight_variable([INPUT_NODE,LAYER1_NODE],regularizer)
        biases=tf.get_variable("biases",[LAYER1_NODE],initializer=tf.constant_initializer(0.0))
        layer1=tf.nn.relu(tf.matmul(input_tensor,weights)+biases)
    
    with tf.variable_scope('layer2'):
        
        weights=get_weight_variable([LAYER1_NODE,OUTPUT_NODE],regularizer)
        biases=tf.get_variable("biases",[OUTPUT_NODE],initializer=tf.constant_initializer(0.0))
        layer2=tf.matmul(layer1,weights)+biases
    
    return layer2
MNIST_UPDATE_train.py代码如下:
# -*- coding: utf-8 -*-
import os
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import MNIST_UPDATE_inference
#tf.reset_default_graph()

BATCH_SIZE=100
LEARNING_RATE_BASE=0.8
LEARNING_RATE_DECAY=0.99
REGULARAZTION_RATE=0.0001
TRAINING_STEPS=30000
MOVING_AVERAGE_DECAY=0.99

MODEL_SAVE_PATH="model/"
MODEL_NAME="mnist_model.ckpt"

def train(mnist):
    x=tf.placeholder(tf.float32,[None,MNIST_UPDATE_inference.INPUT_NODE],name='x-input')
    y_=tf.placeholder(tf.float32,[None,MNIST_UPDATE_inference.OUTPUT_NODE],name='y-input')
    
    regularizer=tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
    y=MNIST_UPDATE_inference.inference(x,regularizer)
    
    global_step=tf.Variable(0,trainable=False)
    
    variable_averages=tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)
    variable_averages_op=variable_averages.apply(tf.trainable_variables())
    cross_entropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.argmax(y_,1))
    cross_entropy_mean=tf.reduce_mean(cross_entropy)
    loss=cross_entropy_mean+tf.add_n(tf.get_collection('losses'))
    learning_rate=tf.train.exponential_decay(LEARNING_RATE_BASE,global_step,mnist.train.num_examples/BATCH_SIZE,LEARNING_RATE_DECAY)
    train_step=tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)
    with tf.control_dependencies([train_step,variable_averages_op]):
        train_op=tf.no_op(name='train')
    saver = tf.train.Saver()
    with tf.Session() as sess:
        init_op=tf.global_variables_initializer()
        sess.run(init_op)
        
        for i in range(TRAINING_STEPS):
            xs,ys=mnist.train.next_batch(BATCH_SIZE)
            _,loss_value,step=sess.run([train_op,loss,global_step],feed_dict={x:xs,y_:ys})
            
            if i%1000==0:
                print("After %d training steps, loss on training batch is %g" % (step,loss_value))
                saver.save(sess,os.path.join(MODEL_SAVE_PATH,MODEL_NAME),global_step=global_step)

def main(argv=None):
    mnist=input_data.read_data_sets("/tmp/data",one_hot=True)
    train(mnist)

if __name__=='__main__':
    tf.app.run()

MNIST_UPDATE_eval.py代码如下:
# -*- coding: utf-8 -*-

import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

import MNIST_UPDATE_inference
import MNIST_UPDATE_train

EVAL_INTERVAL_SECS=10

def evaluate(mnist):
    with tf.Graph().as_default() as g:
        x=tf.placeholder(tf.float32,[None,MNIST_UPDATE_inference.INPUT_NODE],name='x-input')
        y_=tf.placeholder(tf.float32,[None,MNIST_UPDATE_inference.OUTPUT_NODE],name='y-input')
        validate_feed={x:mnist.validation.images,y_:mnist.validation.labels}
        y=MNIST_UPDATE_inference.inference(x,None)
        
        correct_prediction=tf.equal(tf.arg_max(y,1),tf.arg_max(y_,1))
        accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
        
        variable_average=tf.train.ExponentialMovingAverage(MNIST_UPDATE_train.MOVING_AVERAGE_DECAY)
        variables_to_restore=variable_average.variables_to_restore()
        saver=tf.train.Saver(variables_to_restore)
    
        with tf.Session() as sess:
            ckpt=tf.train.get_checkpoint_state(MNIST_UPDATE_train.MODEL_SAVE_PATH)
            if ckpt and ckpt.all_model_checkpoint_paths:
                for model_file in ckpt.all_model_checkpoint_paths:
                    saver.restore(sess,model_file)
                    global_step=model_file.split('/')[-1].split('-')[-1]
                    accuracy_score=sess.run(accuracy,feed_dict=validate_feed)
                    print("After %s training steps,validation accuracy = %g"%(global_step,accuracy_score))
                    time.sleep(EVAL_INTERVAL_SECS)
            else:
                print("no checkpoint file found")
                return
        
            
def main(argv=None):
    mnist=input_data.read_data_sets("/tmp/data",one_hot=True)
    evaluate(mnist)
if __name__=='__main__':
    tf.app.run()

运行结果:

运行MNIST_UPDATE_train.py


中间省略一大堆…….


运行MNIST_UPDATE_eval.py



代码说明:(代码运行环境:VMware虚拟机,Ubuntu Linux,编译器spyder,Python3.6,TensorFlow 1.3.0,因为版本不一样,代码存在略微不一样)

MNIST_UPDATE_inference.py

因为代码里会将图模型存储,当第一次运行代码时,是成功的;但是之后再次运行就会报错,张量已存在。这个问题的解决,搜了很多,看了很多论坛,最后发现,是因为之前运行一次后,图模型中初始化了该张量,再次运行,就会与之前存在的张量同名,故出现张量已存在的情况。解决方法:在代码前面加上一句清空图的代码。

“tf.reset_default_graph()”

MNIST_UPDATE_train.py

运行代码前,需要在当前目录下,新建一个名为model的文件夹,训练完成后的模型会存在此文件夹下。每个模型会生成3个文件,其中checkpoint文件只有一个,用于记录最新模型,最多记录5个。因此该文件夹下,最多存放5个模型数据,本代码存放次数大于5,故新来的模型会覆盖旧模型,最后只剩最新的5个模型参数依然存放在文件夹下。

MNIST_UPDATE_eval.py

得到模型参数后,从模型文件中读取出数据,然后代入测试数据,进行评估。因为checkpoint文件中最多存5个模型,所以实验结果只有最新的5个模型进行评估。



版权声明:本文为Lancher_Mo原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。