|
- from rknn.api import RKNN
转化代码如上,查看tool使用说明书,因为输入的是4096个空间点数据,每个点有x,y,z的坐标。因此config未配置归一化。另外就是有个疑问,我使用的PB模型是自己利用开源算法训练好的CKPT经行固化的,只指定了输出节点,对于输入节点我一直有疑问不知道是否是Placeholder
网络结构如下
源码model和train文件如下
Model
- import tensorflow as tf
- import math
- import time
- import numpy as np
- import os
- import sys
- BASE_DIR = os.path.dirname(os.path.abspath(__file__))
- ROOT_DIR = os.path.dirname(BASE_DIR)
- sys.path.append(os.path.join(ROOT_DIR, 'utils'))
- import tf_util
- def placeholder_inputs(batch_size, num_point):
- pointclouds_pl = tf.placeholder(tf.float32,
- shape=(batch_size, num_point, 9))
- labels_pl = tf.placeholder(tf.int32,
- shape=(batch_size, num_point))
- return pointclouds_pl, labels_pl
- def get_model(point_cloud, is_training, bn_decay=None):
- """ ConvNet baseline, input is BxNx3 gray image """
- batch_size = point_cloud.get_shape()[0].value
- num_point = point_cloud.get_shape()[1].value
- input_image = tf.expand_dims(point_cloud, -1)
- # CONV
- net = tf_util.conv2d(input_image, 64, [1,9], padding='VALID', stride=[1,1],
- bn=True, is_training=is_training, scope='conv1', bn_decay=bn_decay)
- net = tf_util.conv2d(net, 64, [1,1], padding='VALID', stride=[1,1],
- bn=True, is_training=is_training, scope='conv2', bn_decay=bn_decay)
- net = tf_util.conv2d(net, 64, [1,1], padding='VALID', stride=[1,1],
- bn=True, is_training=is_training, scope='conv3', bn_decay=bn_decay)
- net = tf_util.conv2d(net, 128, [1,1], padding='VALID', stride=[1,1],
- bn=True, is_training=is_training, scope='conv4', bn_decay=bn_decay)
- points_feat1 = tf_util.conv2d(net, 1024, [1,1], padding='VALID', stride=[1,1],
- bn=True, is_training=is_training, scope='conv5', bn_decay=bn_decay)
- # MAX
- pc_feat1 = tf_util.max_pool2d(points_feat1, [num_point,1], padding='VALID', scope='maxpool1')
- # FC
- pc_feat1 = tf.reshape(pc_feat1, [batch_size, -1])
- pc_feat1 = tf_util.fully_connected(pc_feat1, 256, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
- pc_feat1 = tf_util.fully_connected(pc_feat1, 128, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
- print(pc_feat1)
-
- # CONCAT
- pc_feat1_expand = tf.tile(tf.reshape(pc_feat1, [batch_size, 1, 1, -1]), [1, num_point, 1, 1])
- points_feat1_concat = tf.concat(axis=3, values=[points_feat1, pc_feat1_expand])
-
- # CONV
- net = tf_util.conv2d(points_feat1_concat, 512, [1,1], padding='VALID', stride=[1,1],
- bn=True, is_training=is_training, scope='conv6')
- net = tf_util.conv2d(net, 256, [1,1], padding='VALID', stride=[1,1],
- bn=True, is_training=is_training, scope='conv7')
- net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp1')
- net = tf_util.conv2d(net, 13, [1,1], padding='VALID', stride=[1,1],
- activation_fn=None, scope='conv8')
- net = tf.squeeze(net, [2])
- return net
- def get_loss(pred, label):
- """ pred: B,N,13
- label: B,N """
- loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
- return tf.reduce_mean(loss)
- if __name__ == "__main__":
- with tf.Graph().as_default():
- a = tf.placeholder(tf.float32, shape=(32,4096,9))
- net = get_model(a, tf.constant(True))
- with tf.Session() as sess:
- init = tf.global_variables_initializer()
- sess.run(init)
- start = time.time()
- for i in range(100):
- print(i)
- sess.run(net, feed_dict={a:np.random.rand(32,4096,9)})
- print(time.time() - start)
Train
- import argparse
- import math
- import h5py
- import numpy as np
- import tensorflow as tf
- import socket
- import os
- import sys
- BASE_DIR = os.path.dirname(os.path.abspath(__file__))
- ROOT_DIR = os.path.dirname(BASE_DIR)
- sys.path.append(BASE_DIR)
- sys.path.append(ROOT_DIR)
- sys.path.append(os.path.join(ROOT_DIR, 'utils'))
- import provider
- import tf_util
- from model import *
- parser = argparse.ArgumentParser()
- parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
- parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
- parser.add_argument('--num_point', type=int, default=4096, help='Point number [default: 4096]')
- parser.add_argument('--max_epoch', type=int, default=50, help='Epoch to run [default: 50]')
- parser.add_argument('--batch_size', type=int, default=24, help='Batch Size during training [default: 24]')
- parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
- parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
- parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
- parser.add_argument('--decay_step', type=int, default=300000, help='Decay step for lr decay [default: 300000]')
- parser.add_argument('--decay_rate', type=float, default=0.5, help='Decay rate for lr decay [default: 0.5]')
- parser.add_argument('--test_area', type=int, default=6, help='Which area to use for test, option: 1-6 [default: 6]')
- FLAGS = parser.parse_args()
- BATCH_SIZE = FLAGS.batch_size
- NUM_POINT = FLAGS.num_point
- MAX_EPOCH = FLAGS.max_epoch
- NUM_POINT = FLAGS.num_point
- BASE_LEARNING_RATE = FLAGS.learning_rate
- GPU_INDEX = FLAGS.gpu
- MOMENTUM = FLAGS.momentum
- OPTIMIZER = FLAGS.optimizer
- DECAY_STEP = FLAGS.decay_step
- DECAY_RATE = FLAGS.decay_rate
- LOG_DIR = FLAGS.log_dir
- if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
- os.system('cp model.py %s' % (LOG_DIR)) # bkp of model def
- os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure
- LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
- LOG_FOUT.write(str(FLAGS)+'\n')
- MAX_NUM_POINT = 4096
- NUM_CLASSES = 13
- BN_INIT_DECAY = 0.5
- BN_DECAY_DECAY_RATE = 0.5
- #BN_DECAY_DECAY_STEP = float(DECAY_STEP * 2)
- BN_DECAY_DECAY_STEP = float(DECAY_STEP)
- BN_DECAY_CLIP = 0.99
- HOSTNAME = socket.gethostname()
- ALL_FILES = provider.getDataFiles('indoor3d_sem_seg_hdf5_data/all_files.txt')
- room_filelist = [line.rstrip() for line in open('indoor3d_sem_seg_hdf5_data/room_filelist.txt')]
- # Load ALL data
- data_batch_list = []
- label_batch_list = []
- for h5_filename in ALL_FILES:
- data_batch, label_batch = provider.loadDataFile(h5_filename)
- data_batch_list.append(data_batch)
- label_batch_list.append(label_batch)
- data_batches = np.concatenate(data_batch_list, 0)
- label_batches = np.concatenate(label_batch_list, 0)
- print(data_batches.shape)
- print(label_batches.shape)
- test_area = 'Area_'+str(FLAGS.test_area)
- train_idxs = []
- test_idxs = []
- for i,room_name in enumerate(room_filelist):
- if test_area in room_name:
- test_idxs.append(i)
- else:
- train_idxs.append(i)
- train_data = data_batches[train_idxs,...]
- train_label = label_batches[train_idxs]
- test_data = data_batches[test_idxs,...]
- test_label = label_batches[test_idxs]
- print(train_data.shape, train_label.shape)
- print(test_data.shape, test_label.shape)
- def log_string(out_str):
- LOG_FOUT.write(out_str+'\n')
- LOG_FOUT.flush()
- print(out_str)
- def get_learning_rate(batch):
- learning_rate = tf.train.exponential_decay(
- BASE_LEARNING_RATE, # Base learning rate.
- batch * BATCH_SIZE, # Current index into the dataset.
- DECAY_STEP, # Decay step.
- DECAY_RATE, # Decay rate.
- staircase=True)
- learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!!
- return learning_rate
- def get_bn_decay(batch):
- bn_momentum = tf.train.exponential_decay(
- BN_INIT_DECAY,
- batch*BATCH_SIZE,
- BN_DECAY_DECAY_STEP,
- BN_DECAY_DECAY_RATE,
- staircase=True)
- bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
- return bn_decay
- def train():
- with tf.Graph().as_default():
- with tf.device('/gpu:'+str(GPU_INDEX)):
- pointclouds_pl, labels_pl = placeholder_inputs(BATCH_SIZE, NUM_POINT)
- is_training_pl = tf.placeholder(tf.bool, shape=())
-
- # Note the global_step=batch parameter to minimize.
- # That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
- batch = tf.Variable(0)
- bn_decay = get_bn_decay(batch)
- tf.summary.scalar('bn_decay', bn_decay)
- # Get model and loss
- pred = get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay)
- loss = get_loss(pred, labels_pl)
- tf.summary.scalar('loss', loss)
- correct = tf.equal(tf.argmax(pred, 2), tf.to_int64(labels_pl))
- accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(BATCH_SIZE*NUM_POINT)
- tf.summary.scalar('accuracy', accuracy)
- # Get training operator
- learning_rate = get_learning_rate(batch)
- tf.summary.scalar('learning_rate', learning_rate)
- if OPTIMIZER == 'momentum':
- optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
- elif OPTIMIZER == 'adam':
- optimizer = tf.train.AdamOptimizer(learning_rate)
- train_op = optimizer.minimize(loss, global_step=batch)
-
- # Add ops to save and restore all the variables.
- saver = tf.train.Saver()
-
- # Create a session
- config = tf.ConfigProto()
- config.gpu_options.allow_growth = True
- config.allow_soft_placement = True
- config.log_device_placement = True
- sess = tf.Session(config=config)
- # Add summary writers
- merged = tf.summary.merge_all()
- train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
- sess.graph)
- test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'))
- # Init variables
- init = tf.global_variables_initializer()
- sess.run(init, {is_training_pl:True})
- ops = {'pointclouds_pl': pointclouds_pl,
- 'labels_pl': labels_pl,
- 'is_training_pl': is_training_pl,
- 'pred': pred,
- 'loss': loss,
- 'train_op': train_op,
- 'merged': merged,
- 'step': batch}
- for epoch in range(MAX_EPOCH):
- log_string('**** EPOCH %03d ****' % (epoch))
- sys.stdout.flush()
-
- train_one_epoch(sess, ops, train_writer)
- eval_one_epoch(sess, ops, test_writer)
-
- # Save the variables to disk.
- if epoch % 10 == 0:
- save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"))
- log_string("Model saved in file: %s" % save_path)
- def train_one_epoch(sess, ops, train_writer):
- """ ops: dict mapping from string to tf ops """
- is_training = True
-
- log_string('----')
- current_data, current_label, _ = provider.shuffle_data(train_data[:,0:NUM_POINT,:], train_label)
-
- file_size = current_data.shape[0]
- num_batches = file_size // BATCH_SIZE
-
- total_correct = 0
- total_seen = 0
- loss_sum = 0
-
- for batch_idx in range(num_batches):
- if batch_idx % 100 == 0:
- print('Current batch/total batch num: %d/%d'%(batch_idx,num_batches))
- start_idx = batch_idx * BATCH_SIZE
- end_idx = (batch_idx+1) * BATCH_SIZE
-
- feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
- ops['labels_pl']: current_label[start_idx:end_idx],
- ops['is_training_pl']: is_training,}
- summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred']],
- feed_dict=feed_dict)
- train_writer.add_summary(summary, step)
- pred_val = np.argmax(pred_val, 2)
- correct = np.sum(pred_val == current_label[start_idx:end_idx])
- total_correct += correct
- total_seen += (BATCH_SIZE*NUM_POINT)
- loss_sum += loss_val
-
- log_string('mean loss: %f' % (loss_sum / float(num_batches)))
- log_string('accuracy: %f' % (total_correct / float(total_seen)))
-
- def eval_one_epoch(sess, ops, test_writer):
- """ ops: dict mapping from string to tf ops """
- is_training = False
- total_correct = 0
- total_seen = 0
- loss_sum = 0
- total_seen_class = [0 for _ in range(NUM_CLASSES)]
- total_correct_class = [0 for _ in range(NUM_CLASSES)]
-
- log_string('----')
- current_data = test_data[:,0:NUM_POINT,:]
- current_label = np.squeeze(test_label)
-
- file_size = current_data.shape[0]
- num_batches = file_size // BATCH_SIZE
-
- for batch_idx in range(num_batches):
- start_idx = batch_idx * BATCH_SIZE
- end_idx = (batch_idx+1) * BATCH_SIZE
- feed_dict = {ops['pointclouds_pl']: current_data[start_idx:end_idx, :, :],
- ops['labels_pl']: current_label[start_idx:end_idx],
- ops['is_training_pl']: is_training}
- summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'], ops['loss'], ops['pred']],
- feed_dict=feed_dict)
- test_writer.add_summary(summary, step)
- pred_val = np.argmax(pred_val, 2)
- correct = np.sum(pred_val == current_label[start_idx:end_idx])
- total_correct += correct
- total_seen += (BATCH_SIZE*NUM_POINT)
- loss_sum += (loss_val*BATCH_SIZE)
- for i in range(start_idx, end_idx):
- for j in range(NUM_POINT):
- l = current_label[i, j]
- total_seen_class[l] += 1
- total_correct_class[l] += (pred_val[i-start_idx, j] == l)
-
- log_string('eval mean loss: %f' % (loss_sum / float(total_seen/NUM_POINT)))
- log_string('eval accuracy: %f'% (total_correct / float(total_seen)))
- log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float))))
-
- if __name__ == "__main__":
- train()
- LOG_FOUT.close()
运行PB转换RKNN时报错信息如下
- W:tensorflow:From /home/toybrick/.local/lib/python3.7/site-packages/onnx_tf/handlers/backend/ceil.py:10: The name tf.ceil is deprecated. Please use tf.math.ceil instead.
- W:tensorflow:From /home/toybrick/.local/lib/python3.7/site-packages/onnx_tf/handlers/backend/depth_to_space.py:12: The name tf.depth_to_space is deprecated. Please use tf.compat.v1.depth_to_space instead.
- W:tensorflow:
- The TensorFlow contrib module will not be included in TensorFlow 2.0.
- For more information, please see:
- * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md
- * https://github.com/tensorflow/addons
- * https://github.com/tensorflow/io (for I/O related ops)
- If you depend on functionality not listed there, please file an issue.
- W:tensorflow:From /home/toybrick/.local/lib/python3.7/site-packages/onnx_tf/handlers/backend/log.py:10: The name tf.log is deprecated. Please use tf.math.log instead.
- W:tensorflow:From /home/toybrick/.local/lib/python3.7/site-packages/onnx_tf/handlers/backend/random_normal.py:9: The name tf.random_normal is deprecated. Please use tf.random.normal instead.
- W:tensorflow:From /home/toybrick/.local/lib/python3.7/site-packages/onnx_tf/handlers/backend/random_uniform.py:9: The name tf.random_uniform is deprecated. Please use tf.random.uniform instead.
- W:tensorflow:From /home/toybrick/.local/lib/python3.7/site-packages/onnx_tf/handlers/backend/upsample.py:13: The name tf.image.resize_images is deprecated. Please use tf.image.resize instead.
- /home/toybrick/.local/lib/python3.7/site-packages/onnx_tf/common/__init__.py:87: UserWarning: FrontendHandler.get_outputs_names is deprecated. It will be removed in future release.. Use node.outputs instead.
- warnings.warn(message)
- W:tensorflow:From /home/toybrick/.local/lib/python3.7/site-packages/rknn/api/rknn.py:67: extract_sub_graph (from tensorflow.python.framework.graph_util_impl) is deprecated and will be removed in a future version.
- Instructions for updating:
- Use `tf.compat.v1.graph_util.extract_sub_graph`
- E Catch exception when loading tensorflow model: pointnet_model_final.pb!
- E Traceback (most recent call last):
- E File "rknn/api/rknn_base.py", line 215, in rknn.api.rknn_base.RKNNBase.load_tensorflow
- E File "rknn/base/RKNNlib/converter/convert_tf.py", line 527, in rknn.base.RKNNlib.converter.convert_tf.convert_tf.pre_process
- E File "rknn/base/RKNNlib/converter/tensorflowloader.py", line 77, in rknn.base.RKNNlib.converter.tensorflowloader.TF_Graph_Preprocess.pre_proces
- E File "rknn/base/RKNNlib/converter/tensorflowloader.py", line 520, in rknn.base.RKNNlib.converter.tensorflowloader.TF_Graph_Preprocess.freeze_switch_path_v3
- E File "rknn/base/RKNNlib/converter/tensorflowloader.py", line 436, in rknn.base.RKNNlib.converter.tensorflowloader.TF_Graph_Preprocess.freeze_switch_path_v3.fix_select_branch
- E File "rknn/base/RKNNlib/converter/tf_util.py", line 198, in rknn.base.RKNNlib.converter.tf_util.TFProto_Util.change_input
- E File "/home/toybrick/.local/lib/python3.7/site-packages/google/protobuf/internal/containers.py", line 204, in __getitem__
- E return self._values[key]
- E IndexError: list index out of range
- done
- --> Building model
- Traceback (most recent call last):
- File "rknn_transfer.py", line 26, in <module>
- rknn.build()
- File "/home/toybrick/.local/lib/python3.7/site-packages/rknn/api/rknn.py", line 222, in build
- inputs = self.rknn_base.net.get_input_layers()
- AttributeError: 'NoneType' object has no attribute 'get_input_layers'
|
本帖子中包含更多资源
您需要 登录 才可以下载或查看,没有帐号?立即注册
x
|