當前位置:首頁 > IT技術(shù) > Web編程 > 正文

使用Lenet5對mnist數(shù)據(jù)集進行訓練和測試
2022-04-19 11:21:22

?

安裝模型圖片導出模塊

sudo pip install pydot

sudo pip install graphviz

sudo pip install pydot-ng

sudo apt-get install graphviz


安裝h5py的命令如下(模型保存模塊):
sudo pip install cython
sudo apt-get install libhdf5-dev
sudo pip install h5py

記錄一下代碼:

# -*- coding: UTF-8 -*-

# mnist神經(jīng)網(wǎng)絡訓練,采用LeNet-5模型

import os
import cv2
import numpy as np

from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.advanced_activations import PReLU
from keras.optimizers import SGD, Adadelta, Adagrad

from keras.utils import np_utils
from keras.utils.vis_utils import plot_model

import h5py
from keras.models import model_from_json


def loadData(path, number):
data = np.empty((number, 1, 28, 28), dtype="float32") # empty與ones差不多原理,但是數(shù)值隨機,類型隨后面設定
labels = np.empty((number,), dtype="uint8")
listImg = os.listdir(path)
count = 0
for img in listImg:
imgData = cv2.imread(path + '/' + img, 0) # 數(shù)據(jù)
l = int(img.split('-')[0]) # 答案
arr = np.asarray(imgData, dtype="float32") # 將img數(shù)據(jù)轉(zhuǎn)化為數(shù)組形式
data[count, :, :, :] = arr # 將每個三維數(shù)組賦給data
labels[count] = l # 取該圖像的數(shù)值屬性作為標簽
count = count + 1
print path, " loaded ", count
if count >= number:
break
return data, labels


# 從圖片文件加載數(shù)據(jù)
# the data, shuffled and split between train and test sets
(trainData, trainLabels), (testData, testLabels) = mnist.load_data()

# 訓練數(shù)據(jù) 60000張手寫圖片,28*28*1
# 測試數(shù)據(jù) 10000張手寫圖片,28*28*1

trainData = trainData.reshape(60000, 784)
testData = testData.reshape(10000, 784)

trainLabels = np_utils.to_categorical(trainLabels, 10)
# label為0~9共10個類別,keras要求格式為binary class matrices,轉(zhuǎn)化一下,直接調(diào)用keras提供的這個函數(shù)
testLabels = np_utils.to_categorical(testLabels, 10)

# tf或th為后端,采取不同參數(shù)順序
# th
# if K.image_data_format() == 'channels_first':
# -x_train.shape[0]=6000
# x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
# -x_train.shape:(60000, 1, 28, 28)
# x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
# x_test.shape:(10000, 1, 28, 28)
# 單通道灰度圖像,channel=1
# input_shape = (1, img_rows, img_cols)
# else: #tf
# x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
# x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
# input_shape = (img_rows, img_cols, 1)

# tensorflow后端
trainData = trainData.reshape(trainData.shape[0], 28, 28, 1)
testData = testData.reshape(testData.shape[0], 28, 28, 1)

# 建立一個Sequential模型
model = Sequential()

# model.add(Conv2D(4, 5, 5, border_mode='valid',input_shape=(28,28,1)))
# 第一個卷積層,4個卷積核,每個卷積核5*5,卷積后24*24,第一個卷積核要申明input_shape(通道,大小) ,激活函數(shù)采用“tanh”
model.add(Conv2D(filters=4, kernel_size=(5, 5), padding='valid', input_shape=(28, 28, 1), activation='tanh'))

# model.add(Conv2D(8, 3, 3, subsample=(2,2), border_mode='valid'))
# 第二個卷積層,8個卷積核,不需要申明上一個卷積留下來的特征map,會自動識別,下采樣層為2*2,卷完且采樣后是11*11
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(filters=8, kernel_size=(3, 3), padding='valid', activation='tanh'))
# model.add(Activation('tanh'))

# model.add(Conv2D(16, 3, 3, subsample=(2,2), border_mode='valid'))
# 第三個卷積層,16個卷積核,下采樣層為2*2,卷完采樣后是4*4
model.add(Conv2D(filters=16, kernel_size=(3, 3), padding='valid', activation='tanh'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Activation('tanh'))

model.add(Flatten())
# 把多維的模型壓平為一維的,用在卷積層到全連接層的過度

# model.add(Dense(128, input_dim=(16*4*4), init='normal'))
# 全連接層,首層的需要指定輸入維度16*4*4,128是輸出維度,默認放第一位
model.add(Dense(128, activation='tanh'))

# model.add(Activation('tanh'))

# model.add(Dense(10, input_dim= 128, init='normal'))
# 第二層全連接層,其實不需要指定輸入維度,輸出為10維,因為是10類
model.add(Dense(10, activation='softmax'))
# model.add(Activation('softmax'))
# 激活函數(shù)“softmax”,用于分類

# 訓練CNN模型

sgd = SGD(lr=0.05, momentum=0.9, decay=1e-6, nesterov=True)
# 采用隨機梯度下降法,學習率初始值0.05,動量參數(shù)為0.9,學習率衰減值為1e-6,確定使用Nesterov動量
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
# 配置模型學習過程,目標函數(shù)為categorical_crossentropy:亦稱作多類的對數(shù)損失,注意使用該目標函數(shù)時,需要將標簽轉(zhuǎn)化為形如(nb_samples, nb_classes)的二值序列,第18行已轉(zhuǎn)化,優(yōu)化器為sgd

model.fit(trainData, trainLabels, batch_size=100, epochs=20, shuffle=True, verbose=1, validation_split=0.2)
# 訓練模型,訓練nb_epoch次,bctch_size為梯度下降時每個batch包含的樣本數(shù),驗證集比例0.2,verbose為顯示日志,shuffle是否打亂輸入樣本的順序

# 輸出模型圖片
plot_model(model, to_file='model2.png', show_shapes=True, show_layer_names=False)

print model.metrics_names
# 對測試數(shù)據(jù)進行測試
print model.evaluate(testData, testLabels,
verbose=0,
batch_size=500);

# 保存model
json_string = model.to_json()
open('my_model_architecture.json', 'w').write(json_string)
model.save_weights('my_model_weights.h5')



本文摘自 :https://blog.51cto.com/u

開通會員,享受整站包年服務立即開通 >