'''
Author:Alone
Create Date:2019-1-13 10:23:45
'''
import tensorflow as tf
import numpy as np
import cv2,os
from random import shuffle
abs_path=r'.\Data'#要载入的训练集的路径
test_path=r'./face_test'
# 随机获取一批打乱顺序的样本,这个需要重写
def getFaces(batch_size):
img_list = os.listdir(abs_path)#样本图片的文件夹地址
img_list.pop()
amount = img_list.__len__()#样本总数
batch_x = [] # 本批的图片
batch_y = [] # 本批的标签
for dir_name in img_list:
path = abs_path + '\\'+dir_name
b = os.listdir(path)
c = b.__len__()
random_index = np.random.choice(c, size=batch_size, replace=False, p=None)#生成随机索引
np.random.shuffle(random_index)#将随机索引打乱
for i in random_index:#取数据
name = b[i]# 文件名格式 图片序号_图片类别.jpg 例如100_1.jpg (这里的1代表某个人的类别编号,100只是为了区分图片名)
img = cv2.imread(path +'\\'+name)
if img.shape[2]==3:
img=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# print(img.shape)
if img.shape[0]!=96:
img=cv2.resize(img,(96,96))
img=img.reshape(96, 96, 1)
label = int(dir_name)
labels = [0 for n in range(amount)]
labels[label] = 1
batch_x.append(img)
batch_y.append(labels)
return np.array(batch_x), np.array(batch_y)
# 这个方法没有使用,它的作用就是获取测试集,需要重写
def getTestData(batch_size):
img_list = os.listdir(test_path) # 样本图片的文件夹地址
amount = img_list.__len__() # 样本总数
random_index = np.random.choice(amount, size=batch_size, replace=False, p=None) # 生成随机索引
np.random.shuffle(random_index) # 将随机索引打乱
batch_x = [] # 本批的图片
batch_y = [] # 本批的标签
for i in random_index: # 取数据
name = img_list[i] # 文件名格式 图片序号_图片类别.jpg 例如100_1.jpg (这里的1代表某个人的类别编号,100只是为了区分图片名)
img = cv2.imread(test_path + '\\' + name)
if channel == 1:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY).reshape(96, 96, 1)
name = name.rstrip('.jpg')
label = int(name.split('_')[1])
labels = [0 for n in range(n_class)]
labels[label] = 1
batch_x.append(img)
batch_y.append(labels)
return np.array(batch_x), np.array(batch_y)
#==================================================
#==================参数设定========================
sn=os.listdir(abs_path)
sn.pop()
n_class=len(sn)#输出的类别数
print(n_class)
channel=1#图片的通道数
n_input = 96*96*channel
input_shape=[96,96]
features_shape=[-1,2*2*256]#这里的-1是因为批量输入每一个的特征是4*4*256
#==================================================
def weight_variable(shape,name=None):
initial=tf.truncated_normal(shape,stddev=0.1)#这里的0.1是为了是产生的正太分布在[-0.2,0.2]之间,使用random则会超出范围
return tf.Variable(initial,name=name)
def bias_variable(shape,name=None):
initial=tf.constant(0.1,shape=shape)#为第一次产生一个偏置全为0.1,如果随机则可能导致训练时间增加
return tf.Variable(initial,name=name)
def convolution2d(tensor,w_kernel,name=None):
return tf.nn.conv2d(tensor,w_kernel,strides=[1,1,1,1],padding='VALID',name=name)
def matMul(x,w,name=None):
return tf.matmul(x,w,name=name)
def max_pool(tensor,name=None):
return tf.nn.max_pool(tensor,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name=name)
def average_pool(tensor,name=None):
return tf.nn.avg_pool(tensor,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name=name)
def leakyReLu(x, leak=0.2, name="LeakyRelu"):
with tf.variable_scope(name):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * tf.abs(x)
def ReLu(f1_,name="ReLu"):
with tf.variable_scope(name):
a=(f1_+tf.abs(f1_))/2
return a
def DIY_Net(x,y,weights,biases,keep_prob):
cov1=tf.nn.relu(convolution2d(x,weights['w_c1'])+biases['b_c1'])
pool1=max_pool(cov1)#黑白图像max_pool_2x2会比average_pool_2x2要好,因为最大值为白色与背景的黑色无关
cov2=tf.nn.relu(convolution2d(pool1,weights['w_c2'])+biases['b_c2'])
pool2=max_pool(cov2)
cov3=tf.nn.relu(convolution2d(pool2,weights['w_c3'])+biases['b_c3'])
pool3=max_pool(cov3)
cov4=ReLu(convolution2d(pool3,weights['w_c4'])+biases['b_c4'])
pool4=max_pool(cov4)
cov5 = ReLu(convolution2d(pool4, weights['w_c5']) + biases['b_c5'])
pool5 = max_pool(cov5)
# cov6 = ReLu(convolution2d(pool5, weights['w_c6']) + biases['b_c6'])
# pool6 = max_pool(cov6)
features=tf.reshape(pool5,features_shape,name='features')
f1= ReLu(matMul(features, weights['w_f1']) + biases['b_f1'])
f2=tf.nn.relu(matMul(f1,weights['w_f2'])+biases['b_f2'],name='f2')
dropped=tf.nn.dropout(f2,keep_prob,name='dropped')
out=tf.add(matMul(dropped,weights['w_out']),biases['b_out'],name='out')
# out1=matMul(dropped,weights['w_out1'])+biases['b_out1']
softMax_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=out, labels=y)
loss = tf.reduce_mean(softMax_cross_entropy)
return out,loss
Weights={#96*96
'w_c1':weight_variable([3,3,1,16],'w_c1'),#94 47*47 定义卷积核
'w_c2':weight_variable([2,2,16,32],'w_c2'),#46 23*23
'w_c3':weight_variable([2,2,32,64],'w_c3'),#22 11*11
'w_c4':weight_variable([2,2,64,128],'w_c4'),#10 5*5
'w_c5':weight_variable([2,2,128,256],'w_c5'),#4 2*2
'w_f1':weight_variable([2*2*256,1024],'w_f1'),
'w_f2':weight_variable([1024,1024],'w_f2'),
'w_out':weight_variable([1024,n_class],'w_out'),
# 'w_out1':weight_variable([1024,n_class],'w_out1')
}
Biases={
'b_c1':bias_variable([16],'b_c1'),
'b_c2':bias_variable([32],'b_c2'),
'b_c3':bias_variable([64],'b_c3'),
'b_c4': bias_variable([128],'b_c4'),
'b_c5': bias_variable([256],'b_c5'),
'b_f1':bias_variable([1024],'b_f1'),
'b_f2':bias_variable([1024],'b_f2'),
'b_out':bias_variable([n_class],'b_out'),
# 'b_out1':bias_variable([n_class],'b_out1'),
}
learning_rate=0.001
x = tf.placeholder("float", shape=[None,input_shape[0],input_shape[1],1],name='input_x')
y = tf.placeholder("float", shape=[None, n_class],name='input_y')
keep_prob = tf.placeholder("float")#保留率
if __name__ == '__main__':
sess=tf.InteractiveSession()
predict,loss_func=DIY_Net(x,y,Weights,Biases,keep_prob)
optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss_func,name='optimizer')
correct_pred = tf.equal(tf.argmax(predict, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
batch_size = 50#一批训练数据的大小
display_step = 50
max_iter=1000 #最大迭代次数
saver = tf.train.Saver()
try:
saver.restore(sess, "./model/model.ckpt-1000")
print('Load successful !')
except:
print('Load failed!')
sess.run(tf.initialize_all_variables())
current_count = 1
temp_img=None
font = cv2.FONT_HERSHEY_SIMPLEX # 定义字体
text = 'start training...'
while current_count <= max_iter:
seed=np.random.randint(40,90)
batch_xs, batch_ys = getFaces(batch_size)
c = list(zip(batch_xs, batch_ys))
shuffle(c)
x_batch, y_batch = zip(*c)
sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 0.75})
if current_count%display_step==0:
saver.save(sess, './model/model.ckpt', global_step=current_count)
current_accuracy = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
current_loss = sess.run(loss_func, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
text=str(current_count) + "iterat ions " + ", current batch loss: " + "{:.6f}".format(
current_loss) + ", current_accuracy= " + "{:.5f}".format(current_accuracy)
print(text)
temp_img=np.zeros((100,1000,3),dtype=np.uint8)
temp_img =cv2.putText(temp_img, text, (5, 50), font, 0.5, (0, 0, 255), 1)
cv2.imshow('running',temp_img)
key_code=cv2.waitKey(1)
if key_code==(ord('\r') or ord('q')):
cv2.destroyAllWindows()
break
current_count+=1
print("优化完成!")
img=cv2.imread('./7.jpg')
img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY).reshape(96,96,1)
r=sess.run(predict,{x:[img],keep_prob:1.})
result=sess.run(tf.math.softmax(r))
print(result)
# saver.save(sess, './model/model.ckpt', global_step=current_count-1)
# testX,testY=getTestData(1)
# accuracy_rate=sess.run(accuracy,feed_dict={x: testX, y: testY, keep_prob: 1.})
# print("测试集准确率:",accuracy_rate )
sess.close()
cv2.destroyAllWindows()
'''
迁移学习
查找已保存模型中节点名的方法是 graph.get_operations() ,然后从返回的结果中根据运算操作找到该节点
'''
import cv2, numpy as np
import pandas as pd
import random,os
import tensorflow as tf
from random import shuffle
abs_path=r'..\Data'#要载入的训练集的路径
def getFaces(batch_size):
img_list = os.listdir(abs_path)#样本图片的文件夹地址
img_list.pop()
amount = img_list.__len__()#样本总数
batch_x = [] # 本批的图片
batch_y = [] # 本批的标签
for dir_name in img_list:
path = abs_path + '\\'+dir_name
b = os.listdir(path)
c = b.__len__()
random_index = np.random.choice(c, size=batch_size, replace=False, p=None)#生成随机索引
np.random.shuffle(random_index)#将随机索引打乱
for i in random_index:#取数据
name = b[i]# 文件名格式 图片序号_图片类别.jpg 例如100_1.jpg (这里的1代表某个人的类别编号,100只是为了区分图片名)
img = cv2.imread(path +'\\'+name)
if img.shape[2]==3:
img=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# print(img.shape)
if img.shape[0]!=96:
img=cv2.resize(img,(96,96))
img=img.reshape(96, 96, 1)
label = int(dir_name)
labels = [0 for n in range(amount)]
labels[label] = 1
batch_x.append(img)
batch_y.append(labels)
return np.array(batch_x), np.array(batch_y)
saver = tf.train.import_meta_graph('./model02/model.ckpt-1000.meta')
sess=tf.InteractiveSession()
sess.run(tf.initialize_all_variables())
saver.restore(sess, './model02/model.ckpt-1000')
############开始定义网络###################
sn=os.listdir(abs_path)
sn.pop()
n_class=len(sn)#输出的类别数
current_count = 1
temp_img=None
font = cv2.FONT_HERSHEY_SIMPLEX # 定义字体
text = 'start training...'
graph=tf.get_default_graph()
x_old=graph.get_tensor_by_name('Placeholder:0')
keep_prob_old=graph.get_tensor_by_name('Placeholder_2:0')
features=graph.get_tensor_by_name('Reshape:0')
x = graph.get_tensor_by_name('x:0')
y = graph.get_tensor_by_name('y:0')
keep_prob = graph.get_tensor_by_name('keep_prob:0')
predict=graph.get_tensor_by_name('predict:0')
loss_func=graph.get_tensor_by_name('loss:0')
optimizer=graph.get_operations()[880]
correct_pred = tf.equal(tf.argmax(predict, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
batch_size = 50#一批训练数据的大小
display_step = 50
max_iter=1000 #最大迭代次数
while current_count <= max_iter:
seed=np.random.randint(40,90)
batch_xs, batch_ys = getFaces(batch_size)
c = list(zip(batch_xs, batch_ys))
shuffle(c)
batch_xs, batch_ys = zip(*c)
batch_xs = sess.run(features,{x_old: batch_xs, keep_prob_old: 1.0})
sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 0.75})
if current_count%display_step==0:
current_loss = sess.run(loss_func, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
saver.save(sess, './model02/model.ckpt', global_step=current_count)
batch_xs, batch_ys = getFaces(50)
batch_xs = sess.run(features, {x_old: batch_xs, keep_prob_old: 1.0})
current_accuracy = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
text=str(current_count) + "iterations " + ", current batch loss: " + "{:.6f}".format(
current_loss) + ", current_accuracy= " + "{:.5f}".format(current_accuracy)
print(text)
temp_img=np.zeros((100,1000,3),dtype=np.uint8)
temp_img =cv2.putText(temp_img, text, (5, 50), font, 0.5, (0, 0, 255), 1)
cv2.imshow('running',temp_img)
key_code=cv2.waitKey(1)
if key_code==(ord('\r') or ord('q')):
cv2.destroyAllWindows()
break
current_count+=1
print("优化完成!")
cv2.destroyAllWindows()
import tensorflow as tf
import numpy as np
import math
def weight_variable(shape):
initial=tf.truncated_normal(shape,stddev=0.1)#这里的0.1是为了是产生的正太分布在[-0.2,0.2]之间,使用random则会超出范围
return tf.Variable(initial)
def bias_variable(shape):
initial=tf.constant(0.1,shape=shape)#为第一次产生一个偏置全为0.1,如果随机则可能导致训练时间增加
return tf.Variable(initial)
def matMul(x,w):
return tf.matmul(x,w)
def reLu(f1_,name="ReLu"):
with tf.variable_scope(name):
a=(f1_+tf.abs(f1_))/2
return a
def getData():
x=[i*0.1 for i in range(0,100,1)]
y=[math.sin(i*0.1) for i in range(0,100,1)]
return x,y
#定义训练数据集
x_data,y_data=getData()
x_data=tf.reshape(tf.Variable(x_data),[100,1])
#定义TensorFlow的数学模型
n_class=1
n_input=1
Weights={
'w_f1':weight_variable([1,1024]),
'w_f2':weight_variable([1024,1024]),
'w_out':weight_variable([1024,n_class])
}
Biases={
'b_f1':bias_variable([1024]),
'b_f2':bias_variable([1024]),
'b_out':bias_variable([n_class])
}
f1 = reLu(matMul(x_data, Weights['w_f1']) + Biases['b_f1'])
f2 = tf.nn.relu(matMul(f1, Weights['w_f2']) + Biases['b_f2'])
out = matMul(f2, Weights['w_out']) + Biases['b_out']
# Weights=tf.Variable(tf.random_uniform([1],-1.0,1.0))
# biases=tf.Variable(tf.zeros([1]))
# train_y=Weights*x_data+biases
#定义梯度函数
loss = tf.reduce_mean(tf.square(out-y_data))
#定义反向传播函数
train=tf.train.GradientDescentOptimizer(0.2).minimize(loss)
init=tf.global_variables_initializer()
sess=tf.Session()
sess.run(init)
for step in range(0,350):
sess.run(train)
print(step)
# curWeight = sess.run(Weights[0])
# curBias = sess.run(biases[0])
# print('current_weight:{0}\tcurrent_bias:{1}'.format(curWeight,curBias))
sess.close()