|
地板
楼主 |
发表于 2020-5-14 17:55:14
|
只看该作者
本帖最后由 chenli 于 2020-5-14 20:35 编辑
大佬建模代码如下请多指教:
def weight_variable(self,shape):
tf.set_random_seed(1)
return tf.Variable(tf.truncated_normal(shape, stddev=0.05))
"""偏置"""
def bias_variable(self, shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
"""卷积"""
def conv2d(self, inputs, weight):
# stride = [1,水平移动步长,竖直移动步长,1]
return tf.nn.conv2d(inputs, weight, strides=[1, 1, 1, 1], padding='SAME')
"""池化"""
def pool(self, image):
# stride = [1,水平移动步长,竖直移动步长,1]
return tf.nn.max_pool(image, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def training(self):
# 模型的默认参数,防止参数的服用出错,最好加再开头
tf.reset_default_graph()
# 申请将输入的变量用作后期传参
with tf.name_scope("Input"):
inputs = tf.placeholder(tf.float32, name='inputs')
inputs_reshape = tf.reshape(inputs, [-1, 128, 128, 1])
labels = tf.placeholder(tf.float32, name='labels')
with tf.name_scope("Layer1"):
c1_weight = tf.Variable(self.weight_variable([3, 3, 1, 4]),name='c1_weight')
c1_bias = tf.Variable(self.bias_variable([4]),name='c1_bias')
c1_relu = tf.nn.relu(self.conv2d(inputs_reshape, c1_weight) + c1_bias)
c1_pool = self.pool(c1_relu)
with tf.name_scope("Layer2"):
c2_weight = tf.Variable(self.weight_variable([3, 3, 4, 16]), name='c2_weight')
c2_bias = tf.Variable(self.bias_variable([16]),name='c2_bias')
c2_relu = tf.nn.relu(self.conv2d(c1_pool, c2_weight) + c2_bias)
c2_pool = self.pool(c2_relu)
with tf.name_scope("Layer4"):
c4_weight = tf.Variable(self.weight_variable([3, 3, 16, 32]),name='c4_weight')
c4_bias = tf.Variable(self.bias_variable([32]),name='c4_bias')
c4_relu = tf.nn.relu(self.conv2d(c2_pool, c4_weight) + c4_bias)
c4_pool = self.pool(c4_relu)
with tf.name_scope("Layer5"):
c5_weight = tf.Variable(self.weight_variable([3, 3, 32, 64]),name='c5_weight')
c5_bias = tf.Variable(self.bias_variable([64]),name='c5_bias')
c5_relu = tf.nn.relu(self.conv2d(c4_pool, c5_weight) + c5_bias)
c5_pool = self.pool(c5_relu)
with tf.name_scope("Layer6"):
c6_weight = tf.Variable(self.weight_variable([3, 3, 64, 128]), name='c5_weight')
c6_bias = tf.Variable(self.bias_variable([128]), name='c5_bias')
c6_relu = tf.nn.relu(self.conv2d(c5_pool, c6_weight) + c6_bias)
c6_pool = self.pool(c6_relu) # 最大池化后 8*8*64=4096
c6_pool_reshape = tf.reshape(c6_pool, [-1, 4* 4 * 128])
with tf.name_scope("Layer7"):
f4_weight = tf.Variable(self.weight_variable([8 * 8 * 32, 8*32]),name='f4_weight')
fn4_bias = tf.Variable(self.bias_variable([8*32]),name='fn4_bias')
fn4_relu = tf.nn.relu(tf.matmul(c6_pool_reshape, f4_weight) + fn4_bias)
fn4_drop = tf.nn.dropout(fn4_relu, keep_prob=self.drop_rate)
with tf.name_scope("Output"):
f7_weight = tf.Variable(self.weight_variable([8*32, self.output_size]), name='f7_weight')
f7_bias = tf.Variable(self.bias_variable([self.output_size]),name='f7_bias')
prediction = tf.add(tf.matmul(fn4_drop, f7_weight), f7_bias,name="prediction")
'''Loss function.'''
with tf.name_scope("Loss"):
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=labels))
# 梯度下降法:选用AdamOptimizer优化器
with tf.name_scope("Train_Step"):
train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(loss)
with tf.name_scope("Accuracy"):
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1)), tf.float32))
self.data = np.asarray(self.data)
self.data1 = np.asarray(self.data1)
images1, label1 = self.get_Batch(self.data, self.data1, self.batch_size)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
'''Initializer varabiles and log defined in Tensorflow.'''
# 指定一个文件用来保存日志
init_op = tf.global_variables_initializer()
sess.run(init_op)
#summary_writer = tf.summary.FileWriter(self.LOG_DIR, sess.graph)
# 初始化Variable变量
'''Summary total logs in files.'''
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess, coord)
epoch = 0
try:
while not coord.should_stop():
# 将数据转化为tensor张量
data, label = sess.run([images1, label1])
epoch = epoch + 1
# 启动以下操作节点
sess.run(train_step, feed_dict={inputs: data, labels: label})
loss1 = sess.run(loss, feed_dict={inputs: data, labels: label})
accuracy1 = sess.run(accuracy, feed_dict={inputs: data, labels: label})
print('损失为' + str(loss1))
constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def,
["Input/inputs", "Input/labels",
"Output/prediction"])
with tf.gfile.FastGFile(os.path.join(self.MODEL_SAVE_PATH, self.MODEL_NAME_pb), mode="wb") as f:
f.write(constant_graph.SerializeToString())
# 每隔50步打印一次当前的loss以及acc,同时记录log,写入writer
if epoch%50 ==0:
print('准确率' + str(accuracy1))
'''会将计算图中的变量取值以常量的形式保存。在保存模型文件的时候,我们只是导出
了GraphDef部分,GraphDef保存了从输入层到输出层的计算过程。在保存的时候,通过
convert_variables_to_constants函数来指定保存的节点名称而不是张量的名称,
“add:0”是张量的名称而"add"表示的是节点的名称。 '''
# 保存input域名下的x,y,和output域名下的predection为显示节点
# 保存最后一次网络参数
except tf.errors.OutOfRangeError:
print('Done training')
finally:
coord.request_stop()
coord.join(threads)
|
|