【TensorFlow】-LeNet-5
1.难点说明
1.输出尺寸的计算公式
不是整数下取整
输
出
=
[
n
+
2
p
−
f
s
+
1
]
输出 = [\frac{n+2p-f}{s}+1]
输出=[sn+2p−f+1]
2.drop
防止过拟合,只在训练过程中使用
dropout一般只在全连接层而不是卷积层或者池化层使用
3.tf.argmax(vector, axis=1)
其中axis:0表示按列,1表示按行。返回的是vector中的最大值的索引号
4.get_collection()
返回一个列表,这个列表包含所有这个losses集合中的元素,这些元素就是损失函数的不同部分
5.tf.add_n()
tf.add_n([p1, p2, p3…])函数是实现一个列表的元素的相加。输入的对象是一个列表,列表里的元素可以是向量、矩阵等
6.saver.save()
saver
.save
(sess
,'./model/model.ckp',
global_step
= global_step
)
'''
@global_step 这样可以让每个被保存模型的文件名末尾加上训练的轮数
比如“model.ckpt-1000” 表示训练1000轮之后得到的模型
'''
每次保存会生成3个文件
model.ckpt.meta——保存了tensorflow计算图的结构model.ckpt——tensorflow程序中每一个变量的取值checkpoint——文件中保存了一个目录下所有的模型文件列表
7.tf.nn.conv2d()
tf
.nn
.conv2d
(input,
filter,
strides
,
padding
,
use_cudnn_on_gpu
=True,
name
=None)
对于 VALID
n
e
w
−
height
=
n
e
w
−
w
i
d
t
h
=
⌈
(
W
−
F
+
1
)
S
⌉
n e w_{-} \text {height}=n e w_{-} w i d t h=\left\lceil\frac{(W-F+1)}{S}\right\rceil
new−height=new−width=⌈S(W−F+1)⌉ 对于 SAME
new
−
height
=
new
−
width
=
⌈
W
S
⌉
\text {new}_{-} \text {height}=\text {new}_{-} \text {width}=\left\lceil\frac{W}{S}\right\rceil
new−height=new−width=⌈SW⌉
W-为输入size,F为filer的size向上取整
参考:ensorFlow中CNN的两种padding方式“SAME”和“VALID”
8.tf.nn.bias_add和tf.add()
tf
.nn
.bias_add
(x
,y
,name
=None)
这个函数的作用是将偏差bias加到value上面
可以看作是tf.add的一个特例。其中bias必须是一维的
import tensorflow
as tf
a
=tf
.constant
([[1,1],[2,2],[3,3]],dtype
=tf
.float32
)
b
=tf
.constant
([1,-1],dtype
=tf
.float32
)
c
=tf
.constant
([1],dtype
=tf
.float32
)
with tf
.Session
() as sess
:
print('bias_add:')
print(sess
.run
(tf
.nn
.bias_add
(a
, b
)))
print('add:')
print(sess
.run
(tf
.add
(a
, c
)))
9.正则项
只有全连接层的权重需要加入正则
10.tf.Print()
Print
(
input_
,
data
,
message
=None,
first_n
=None,
summarize
=None,
name
=None
)
2.mnist_train.py
'''
输入x - [batch, 28,28,1] - 四维向量
输出y_ - [None, 10]
'''
import tensorflow
as tf
import numpy
as np
from tensorflow
.examples
.tutorials
.mnist
import input_data
import mnist_inference
BATCH_SIZE
= 100
LEARNING_RATE_BASE
= 0.01
LEARNING_RATE_DECAY
=0.99
REGULARAZTION_RATE
= 0.0001
MOVING_AVERAGE_DECAY
= 0.99
TRAINING_STEPS
= 8000
def train( mnist
):
'''定义输入输出placeholder'''
x
= tf
.placeholder
(tf
.float32
, [BATCH_SIZE
,
mnist_inference
.IMAGE_SIZE
,
mnist_inference
.IMAGE_SIZE
,
mnist_inference
.NUM_CHANNELS
], name
= 'x-input1')
y_
= tf
.placeholder
(tf
.float32
, [None, mnist_inference
.OUTPUT_NODE
] , name
='y-input')
regularizer
= tf
.contrib
.layers
.l2_regularizer
( REGULARAZTION_RATE
)
'''前向传播过程'''
y
= mnist_inference
.inference
(x
,True,regularizer
)
'''滑动平均更新参数'''
global_step
= tf
.Variable
(0, trainable
=False)
variable_averages
= tf
.train
.ExponentialMovingAverage
(MOVING_AVERAGE_DECAY
, global_step
)
variables_averages_op
= variable_averages
.apply( tf
.trainable_variables
())
'''损失函数'''
cross_entropy
= tf
.nn
.sparse_softmax_cross_entropy_with_logits
(logits
=y
, labels
=tf
.argmax
(y_
,1))
cross_entropy_mean
= tf
.reduce_mean
(cross_entropy
)
loss
= cross_entropy_mean
+ tf
.add_n
(tf
.get_collection
('losses'))
'''
通过exponential_decay函数生成学习率,使用呈指数衰减的学习率
在minimize函数中传入global_step将自动更新global_step参数,从而使得学习率learning_rate也得到相应更新
'''
learning_rate
= tf
.train
.exponential_decay
(LEARNING_RATE_BASE
,
global_step
,
mnist
.train
.num_examples
/BATCH_SIZE
,
LEARNING_RATE_DECAY
,
staircase
=True)
train_step
= tf
.train
.GradientDescentOptimizer
(learning_rate
).minimize
(loss
, global_step
=global_step
)
'''
tf.control_dependencies机制更新反向传播参数和每一个参数的滑动平均值
'''
with tf
.control_dependencies
([train_step
, variables_averages_op
]):
train_op
= tf
.no_op
(name
='train')
saver
= tf
.train
.Saver
()
with tf
.Session
() as sess
:
init_op
= tf
.global_variables_initializer
()
sess
.run
(init_op
)
print("------------开始训练--------------")
for i
in range(TRAINING_STEPS
):
xs
, ys
= mnist
.train
.next_batch
( BATCH_SIZE
)
reshaped_xs
= np
.reshape
(xs
, (BATCH_SIZE
,
mnist_inference
.IMAGE_SIZE
,
mnist_inference
.IMAGE_SIZE
,
mnist_inference
.NUM_CHANNELS
))
train_op_renew
,loss_value
, step
= sess
.run
([train_op
, loss
, global_step
],feed_dict
={x
: reshaped_xs
, y_
: ys
})
if i
% 1000 == 0:
print ( "After %d training step (s) , loss on training batch is %g." % (step
, loss_value
))
saver
.save
(sess
,'./model/model.ckp',
global_step
= global_step
)
print("------------------训练结束-----------------")
def main(argv
=None):
'''
主程序入口
声明处理MNIST数据集的类,这个类在初始化时会自动下载数据
'''
mnist
= input_data
.read_data_sets
("MNIST_data/", one_hot
=True)
if mnist
!= None:
print("-------------数据加载完毕------------------")
train
(mnist
)
if __name__
== '__main__':
tf
.app
.run
()
3.mnist_inference.py
'''
1.-----------conv1--------------------------------
输入: 28*28*1
f:(5*5*32) s:1 padding='same'
输出:(28*28*32)
2.-----------pool1---------------------------------
f:(2*2) s:2 padding='same'
输出:(14*14*32)
3.-----------conv2----------------------------------
f:(5*5*64) s:1 padding='same'
输出:(14*14*64)
4.-----------pool2-----------------------------------
f:(2*2) s:1 padding='same'
输出:(7*7*64) >>>> reshape 成一个(batch_sizes,7*7*64)
5.-----------fc1-------------------------------------
输入:(batch_sizes,7*7*64)
6.-----------fc2-------------------------------------
输入:(,512)
输出:(,10)
'''
import tensorflow
as tf
INPUT_NODE
= 784
OUTPUT_NODE
=10
IMAGE_SIZE
=28
NUM_CHANNELS
= 1
NUM_LABELS
= 10
'''第一层卷积层的尺寸和深度'''
C0NV1_DEEP
= 32
C0NV1_SIZE
= 5
'''第二层卷积层的尺寸和深度'''
CONV2_DEEP
= 64
CONV2_SIZE
= 5
'''全连接层的节点个数'''
FC_SIZE
= 512
def inference(input_tensor
, train
, regularizer
):
'''
卷积神经网络的前向传播过程
@ train 用于区分训练和测试过程
@ input_tensor 输入变量 四维
'''
'''
conv1
输入: 28*28*1 [batch_size,:]
f:(5*5*32) s:1 padding='same'
输出:(28*28*32)
'''
with tf
.variable_scope
('layer1-conv1'):
conv1_weights
= tf
.get_variable
("weight",
[C0NV1_SIZE
, C0NV1_SIZE
, NUM_CHANNELS
, C0NV1_DEEP
],
initializer
=tf
.truncated_normal_initializer
(stddev
=0.1))
conv1_biases
= tf
.get_variable
("bias", [C0NV1_DEEP
], initializer
=tf
.constant_initializer
(0.0))
conv1
= tf
.nn
.conv2d
(input_tensor
, conv1_weights
, strides
=[1, 1, 1, 1], padding
='SAME')
relu1
= tf
.nn
.relu
(tf
.nn
.bias_add
(conv1
, conv1_biases
))
'''pool1'''
with tf
.name_scope
("layer2-pool1"):
pool1
= tf
.nn
.max_pool
(relu1
, ksize
=[1, 2, 2, 1], strides
=[1, 2, 2, 1], padding
='SAME')
with tf
.variable_scope
("layer3-conv2"):
conv2_weights
= tf
.get_variable
("weight",
[CONV2_SIZE
,CONV2_SIZE
,C0NV1_DEEP
,CONV2_DEEP
],
initializer
=tf
.truncated_normal_initializer
(stddev
=0.1))
conv2_biases
= tf
.get_variable
("bias", [CONV2_DEEP
], initializer
=tf
.constant_initializer
(0.0))
conv2
= tf
.nn
.conv2d
(pool1
, conv2_weights
, strides
=[1, 1, 1, 1], padding
='SAME')
relu2
= tf
.nn
.relu
(tf
.nn
.bias_add
(conv2
, conv2_biases
))
with tf
.name_scope
("layer4-pool2"):
pool2
= tf
.nn
.max_pool
(relu2
, ksize
=[1, 2, 2, 1], strides
=[1, 2, 2, 1], padding
='SAME')
pool_shape
= pool2
.get_shape
().as_list
()
'''获取的pool_shape包含batch_size层'''
nodes
= pool_shape
[1] * pool_shape
[2] * pool_shape
[3]
reshaped
= tf
.reshape
(pool2
, [pool_shape
[0], nodes
])
with tf
.variable_scope
("layer5-fc1"):
fc1_weights
= tf
.get_variable
("weight", [nodes
, FC_SIZE
],
initializer
=tf
.truncated_normal_initializer
(stddev
=0.1))
'''add_to_collection函数将一个张量加入一个集合'losses' '''
if regularizer
!= None:
tf
.add_to_collection
('losses', regularizer
(fc1_weights
))
fc1_biases
= tf
.get_variable
('bias', [FC_SIZE
], initializer
=tf
.constant_initializer
(0.1))
fc1
= tf
.nn
.relu
(tf
.matmul
(reshaped
, fc1_weights
) + fc1_biases
)
if train
:
fc1
= tf
.nn
.dropout
(fc1
, 0.5)
with tf
.variable_scope
('layer6-fc2'):
fc2_weights
= tf
.get_variable
("weight", [FC_SIZE
, NUM_LABELS
],
initializer
=tf
.truncated_normal_initializer
(stddev
=0.1))
if regularizer
!= None:
tf
.add_to_collection
('losses', regularizer
(fc2_weights
))
fc2_biases
= tf
.get_variable
("bias", [NUM_LABELS
], initializer
=tf
.constant_initializer
(0.1))
logit
= tf
.matmul
(fc1
, fc2_weights
) + fc2_biases
return logit
4.mnist_eval.py
'''
测试集数量:5000
@ minst.validation.images.shape (5000, 784)
'''
import time
import tensorflow
as tf
import numpy
as np
from tensorflow
.examples
.tutorials
.mnist
import input_data
import mnist_inference
import mnist_train
'''每10秒加载一次模型,并测试最新的正确率'''
EVAL_INTERVAL_SECS
= 10
def evaluate( mnist
):
with tf
.Graph
().as_default
() as g
:
'''定义输入输出的格式'''
x
= tf
.placeholder
(tf
.float32
, [mnist
.validation
.images
.shape
[0],
mnist_inference
.IMAGE_SIZE
,
mnist_inference
.IMAGE_SIZE
,
mnist_inference
.NUM_CHANNELS
], name
='x-input1')
y_
= tf
.placeholder
(tf
.float32
, [None, mnist_inference
.OUTPUT_NODE
], name
='y-input')
xs
= mnist
.validation
.images
reshaped_xs
= np
.reshape
(xs
, (mnist
.validation
.images
.shape
[0],
mnist_inference
.IMAGE_SIZE
,
mnist_inference
.IMAGE_SIZE
,
mnist_inference
.NUM_CHANNELS
))
validate_feed
= {x
: reshaped_xs
, y_
: mnist
.validation
.labels
}
'''前向传播测试,不需要正则项'''
y
= mnist_inference
.inference
(x
,None, None)
correct_prediction
= tf
.equal
(tf
.argmax
(y
, 1), tf
.argmax
(y_
, 1))
accuracy
= tf
.reduce_mean
(tf
.cast
(correct_prediction
, tf
.float32
))
variable_averages
= tf
.train
.ExponentialMovingAverage
(mnist_train
.MOVING_AVERAGE_DECAY
)
variable_to_restore
= variable_averages
.variables_to_restore
()
saver
= tf
.train
.Saver
(variable_to_restore
)
for i
in range(2):
with tf
.Session
() as sess
:
ckpt
= tf
.train
.get_checkpoint_state
( "./model")
if ckpt
and ckpt
.model_checkpoint_path
:
saver
.restore
(sess
, ckpt
.model_checkpoint_path
)
global_step
= ckpt
.model_checkpoint_path
.split
('-')[-1]
accuracy_score
= sess
.run
(accuracy
, feed_dict
= validate_feed
)
print("After %s training steps, validation accuracy = %g" %(global_step
, accuracy_score
))
else:
print("No checkpoint file found")
return
time
.sleep
(EVAL_INTERVAL_SECS
)
def main( argv
=None ):
mnist
= input_data
.read_data_sets
("MNIST_data/", one_hot
=True)
evaluate
(mnist
)
if __name__
=='__main__':
tf
.app
.run
()
参考
1.TensorFlow实战(三)——基于LeNet-5模型实现MNIST手写数字识别