速成教程:TensorFlow如何实现简单的模型训练

  • Post author:
  • Post category:其他

1、二话不说,直接上代码:TensorFlow实现简单线性回归训练模型

import tensorflow as tf
import os

os.environ['TF_CPP_MIN_LOG_LEVEL']='2'

tf.app.flags.DEFINE_integer("max_step", 200, "训练模型的步数")          # 训练步数
tf.app.flags.DEFINE_string("model_path",
                           "./ckpt/linearregression",
                           "模型保存的路径+模型名字")                    # 定义模型的路径
FLAGS = tf.app.flags.FLAGS                                           # 定义获取命令行参数


def linear_regression():
    with tf.variable_scope("dataset"):                               # 设置变量的命名空间
        X = tf.random_normal(shape=(100,1), mean=0.5, stddev=1)
        Y_true = tf.matmul(X, [[4.0]]) + 3.0
    # 建立线性模型
    with tf.variable_scope("linear_model"):
        weights = tf.Variable(initial_value=tf.random_normal(shape=(1,1)), name="weights")
        bias = tf.Variable(initial_value=tf.random_normal(shape=(1,1)), name="bias")
        Y_predict = tf.matmul(X, weights) + bias
    with tf.variable_scope("loss"):
        loss = tf.reduce_mean(tf.square(Y_predict-Y_true), name="loss")
    with tf.variable_scope("gradient_optimiter"):
        optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(loss)
    # 收集变量
    tf.summary.scalar("loss",loss)
    tf.summary.histogram("weight",weights)
    tf.summary.histogram("bias",bias)
    # 合并变量
    merge = tf.summary.merge_all()
    # 初始化所有变量
    init = tf.global_variables_initializer()

    # 创建一个模型 保存与加载 实例saver
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(init)  # 给变量赋值初始值
        print("初始化的权重为%f,偏置为%f" % (weights.eval(), bias.eval()))
        # 模型加载
        # saver.restore(sess, "./ckpt/linearregression")
        print("权重为%f,偏置为%f" % (weights.eval(), bias.eval()))
        # 创建事件文件,用于Tensorboard展示训练模型的全过程
        file_writer = tf.summary.FileWriter(logdir="./summary/",graph=sess.graph)
        # 训练模型
        for i in range(FLAGS.max_step):
            sess.run(optimizer)
            print("第%d步的损失为%f,权重为%f, 偏置为%f" % (i+1, loss.eval(), weights.eval(), bias.eval()))
            # 将收集并合并的变量添加到事件文件,以便在Tensorboard上展示
            summary = sess.run(merge)
            file_writer.add_summary(summary,i+1)
        # 模型保存
        saver.save(sess, FLAGS.model_path)

    return None


def main(argv):
    print("这是main函数")
    print(argv)
    print(FLAGS.model_path)
    linear_regression()

if __name__ == "__main__":
    tf.app.run()                  # 通过tf.app.run()启动main(argv)函数

2、cmd命令行执行该文件TensorFlow实现线性回归.py

cmd命令行可以给max_stepmodel_path重新赋值

E:\Tensorflow>workon ai            # 必须切换到虚拟环境中
(ai) E:\Tensorflow>python ./09-TensorFlow实现线性回归.py --max_step=100

===cmd执行结果:===========================================

这是main函数
['./TensorFlow实现线性回归.py']
./ckpt/linearregression
初始化的权重为-0.996474,偏置为1.092397
权重为-0.996474,偏置为1.092397
第1步的损失为38.175697,权重为-0.828843, 偏置为1.183127
第2步的损失为38.077343,权重为-0.671796, 偏置为1.268724
第3步的损失为42.033585,权重为-0.526859, 偏置为1.366900
...
第98步的损失为0.180080,权重为3.587485, 偏置为3.236459
第99步的损失为0.224762,权重为3.594254, 偏置为3.237265
第100步的损失为0.146797,权重为3.602060, 偏置为3.237001

3、Tensorboard 查看模型训练过程和结果

(1)cmd命令行输入以下命令

tensorboard --logdir=" E:\Tensorflow\summary" --host=127.0.0.1

(2)然后使用谷歌浏览器访问http://127.0.0.1:6006查看模型训练过程和结果(max_step=200)
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

4、面向对象思想下,TensorFlow实现线性回归案例代码

import tensorflow as tf
import os

os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
tf.app.flags.DEFINE_integer("max_step", 100, "训练模型的步数")
tf.app.flags.DEFINE_string("model_path", "./ckpt/linearregression", "模型保存的路径+模型名字")
FLAGS = tf.app.flags.FLAGS


class LinearRegression(object):
    def __init__(self):
        pass

    def dataset(self):
        x = tf.random_normal([100,1], mean=0.5, stddev=2, name="x")
        y = tf.matmul(x, [[4.0]]) + 3.0
        return x, y

    def model(self, feature):
        with tf.variable_scope("linea_model"):
            self.weight = tf.Variable(tf.random_normal([1, 1], mean=0.0, stddev=1.0), name="weights")
            self.bias = tf.Variable(0.0, name='biases')
            y_predict = tf.matmul(feature, self.weight) + self.bias
        return y_predict

    def loss(self, y_true, y_predict):
        loss = tf.reduce_mean(tf.square(y_true - y_predict))
        return loss

    def optimizer_op(self, loss):
        optimizer = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
        return optimizer

    def merge_summary(self, loss):
        tf.summary.scalar("losses", loss)
        tf.summary.histogram("w", self.weight)
        tf.summary.histogram('b', self.bias)
        merged = tf.summary.merge_all()
        return merged

    def train(self):
        g = tf.get_default_graph()
        with g.as_default():
            x, y = self.dataset()
            y_predict = self.model(x)
            loss = self.loss(y, y_predict)
            optimizer = self.optimizer_op(loss)
            merged = self.merge_summary(loss)
            saver = tf.train.Saver()
            with tf.Session() as sess:
                sess.run(tf.global_variables_initializer())
                print("初始化的权重:%f, 偏置:%f" % (self.weight.eval(), self.bias.eval()))
                # saver.restore(sess, "./ckpt/linearregression")
                print("权重:%f, 偏置:%f" % (self.weight.eval(), self.bias.eval()))
                for i in range(FLAGS.max_step):
                    sess.run(optimizer)
                    file_writer = tf.summary.FileWriter("./summary/", graph=sess.graph)
                    print("训练第%d步之后的损失:%f, 权重:%f, 偏置:%f" % (i, loss.eval(), self.weight.eval(), self.bias.eval()))
                    summary = sess.run(merged)
                    file_writer.add_summary(summary, i)
                saver.save(sess, FLAGS.model_path)


if __name__ == '__main__':
    lr = LinearRegression()
    lr.train()

版权声明:本文为weixin_44695969原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。