keras 模版

  • Post author:
  • Post category:其他


from keras.models import Sequential  
from keras.layers.core import Dense, Dropout, Activation  
from keras.optimizers import SGD  
from keras.datasets import mnist  
import numpy 


# ----------------- step 1: choose model -----------------

model = Sequential()


# ----------------- step 2: build network -----------------

model.add(Dense(500,input_shape=(784,))) # 输入层,28*28=784  
model.add(Activation('tanh')) # 激活函数是tanh  
model.add(Dropout(0.5)) # 采用50%的dropout

model.add(Dense(500)) # 隐藏层节点500个  
model.add(Activation('tanh'))  
model.add(Dropout(0.5))

model.add(Dense(10)) # 输出结果是10个类别,所以维度是10  
model.add(Activation('softmax')) # 最后一层用softmax作为激活函数


# ----------------- step 3: compile -----------------

sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) # 优化函数,设定学习率(lr)等参数  
model.compile(loss='categorical_crossentropy', optimizer=sgd, class_mode='categorical') # 使用交叉熵作为loss函数


# ----------------- step 4: training -----------------

'''
  .fit的一些参数
  batch_size:对总的样本数进行分组,每组包含的样本数量
  epochs :训练次数
  shuffle:是否把数据随机打乱之后再进行训练
  validation_split:拿出百分之多少用来做交叉验证
  verbose:屏显模式 0:不输出  1:输出进度  2:输出每次的训练结果
'''
(X_train, y_train), (X_test, y_test) = mnist.load_data() # 使用Keras自带的mnist工具读取数据(第一次需要联网)
# 由于mist的输入数据维度是(num, 28, 28),这里需要把后面的维度直接拼起来变成784维  
X_train = X_train.reshape(X_train.shape[0], X_train.shape[1] * X_train.shape[2]) 
X_test = X_test.reshape(X_test.shape[0], X_test.shape[1] * X_test.shape[2])  
Y_train = (numpy.arange(10) == y_train[:, None]).astype(int) 
Y_test = (numpy.arange(10) == y_test[:, None]).astype(int)

model.fit(X_train,Y_train,batch_size=200,epochs=50,shuffle=True,verbose=0,validation_split=0.3)
model.evaluate(X_test, Y_test, batch_size=200, verbose=0)

# ----------------- step 5: print -----------------

print("test set")
scores = model.evaluate(X_test,Y_test,batch_size=200,verbose=0)
print("")
print("The test loss is %f" % scores)
result = model.predict(X_test,batch_size=200,verbose=0)

result_max = numpy.argmax(result, axis = 1)
test_max = numpy.argmax(Y_test, axis = 1)

result_bool = numpy.equal(result_max, test_max)
true_num = numpy.sum(result_bool)
print("")
print("The accuracy of the model is %f" % (true_num/len(result_bool)))



参考教程二

原文连接

下面贴个简单的例子、更多的例子可以自己到官网的文档教程上看,官网给了很详细的教程,不像caffe的文档那么少。看一下下面例子,松松构建CNN模型。keras为我们提供了两种网络模型.

1、一种是CNN比较常用到的sequential网络结构,调用方法如下:

# coding=utf-8
 
import numpy as np
 
#np.random.seed(100)
 
from keras.optimizers import SGD
 
import os
import  matplotlib.pyplot as plt
 
import h5py
from keras.models import Sequential
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.layers.core import Dense,Dropout,Activation,Flatten
from keras.layers.normalization import  BatchNormalization
from keras.optimizers import SGD, Adadelta, Adagrad,RMSprop
from keras.layers.advanced_activations import PReLU
 
from  keras.callbacks import ModelCheckpoint,Callback
 
 
class LossHistory(Callback):
    def on_train_begin(self, logs={}):
        self.losses = []
 
    def on_batch_end(self, batch, logs={}):
        self.losses.append(logs.get('loss'))
 
def Net_Mouth():
	keras_model=Sequential()#单支线性网络模型
	#卷积层输出的特征图为20个,卷积核大小为5*5
	keras_model.add(Convolution2D(20, 5, 5,input_shape=(3, 60, 60)))#网络输入每张图片大小为3通道,60*60的图片。
	#激活函数层
	keras_model.add(Activation('relu'))
	#最大池化层
	keras_model.add(MaxPooling2D(pool_size=(2, 2)))
 
	#卷积层,特征图个数为40,卷积核大小为5*5
	keras_model.add(Convolution2D(40, 5, 5))
	keras_model.add(Activation('relu'))
 
	keras_model.add(MaxPooling2D(pool_size=(2, 2)))
 
 
	keras_model.add(Convolution2D(60, 3, 3))
	keras_model.add(Activation('relu'))
 
	keras_model.add(MaxPooling2D(pool_size=(2, 2)))
 
 
	keras_model.add(Convolution2D(80, 3, 3))
	keras_model.add(Activation('relu'))
 
	#全连接展平
	keras_model.add(Flatten())
	#全连接层,神经元个数为1000
	keras_model.add(Dense(1000))
	keras_model.add(Activation('relu'))
 
	keras_model.add(Dense(500))
	keras_model.add(Activation('relu'))
 
 
	keras_model.add(Dense(38))
	keras_model.add(Activation('tanh'))
 
	#采用adam算法进行迭代优化,损失函数采用均方误差计算公式
	keras_model.compile(loss='mean_squared_error', optimizer='adam')
	return keras_model
 
keras_model=Net_Mouth()
#用于保存验证集误差最小的参数,当验证集误差减少时,立马保存下来
checkpointer =ModelCheckpoint(filepath="mouth.hdf5", verbose=1, save_best_only=True)
history = LossHistory()
#训练函数,对于cnn来说,网络的输入x是(nsamples,nchanels,height,width)
#y的输入是(nsamples,output_dimension)
keras_model.fit(x, y, batch_size=128, nb_epoch=100,shuffle=True,verbose=2,show_accuracy=True,validation_split=0.1,callbacks=[checkpointer,history])