From 75a096826061234e97a03bad7789712ca04e57cc Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 01:15:29 +0000 Subject: [PATCH 01/69] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20=E5=B4=94=E9=A9=BF?= =?UTF-8?q?=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91=E6=B7=B1?= =?UTF-8?q?=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3=E5=BB=BA?= =?UTF-8?q?=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../.keep" | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/.keep" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/.keep" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/.keep" new file mode 100644 index 0000000..e69de29 -- Gitee From be13659b7e2aa8f268a0b74e03c51265572a5dfb Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 01:30:41 +0000 Subject: [PATCH 02/69] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20data?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../data/.keep" | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/.keep" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/.keep" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/.keep" new file mode 100644 index 0000000..e69de29 -- Gitee From 525196f8f6cdfaf47b960bd0863a047327ea5ada Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 01:32:24 +0000 Subject: [PATCH 03/69] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20modelnet40=5Fply=5Fh?= =?UTF-8?q?df5=5F2048?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../data/modelnet40_ply_hdf5_2048/.keep" | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/modelnet40_ply_hdf5_2048/.keep" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/modelnet40_ply_hdf5_2048/.keep" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/modelnet40_ply_hdf5_2048/.keep" new file mode 100644 index 0000000..e69de29 -- Gitee From 4a7a8bf9cf440032afe288635887f593307f54bf Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 01:37:29 +0000 Subject: [PATCH 04/69] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20models?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../models/.keep" | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/.keep" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/.keep" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/.keep" new file mode 100644 index 0000000..e69de29 -- Gitee From 70657f744dc0d197b4b8db6c28a3d91e650160c7 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 01:40:40 +0000 Subject: [PATCH 05/69] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20pointnet2=5Fcls=5Fms?= =?UTF-8?q?g.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../models/pointnet2_cls_msg.py/.keep" | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet2_cls_msg.py/.keep" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet2_cls_msg.py/.keep" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet2_cls_msg.py/.keep" new file mode 100644 index 0000000..e69de29 -- Gitee From 3ae0bbf0b34c3158c7b52c0f14fde990707dd766 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 01:41:20 +0000 Subject: [PATCH 06/69] =?UTF-8?q?update=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB/models/pointnet2?= =?UTF-8?q?=5Fcls=5Fmsg.py/.keep.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../models/pointnet2_cls_msg.py/.keep" | 61 +++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet2_cls_msg.py/.keep" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet2_cls_msg.py/.keep" index e69de29..c750c42 100644 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet2_cls_msg.py/.keep" +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet2_cls_msg.py/.keep" @@ -0,0 +1,61 @@ +""" + PointNet++ Model for point clouds classification +""" + +import os +import sys +BASE_DIR = os.path.dirname(__file__) +sys.path.append(BASE_DIR) +sys.path.append(os.path.join(BASE_DIR, '../utils')) +import tensorflow as tf +import numpy as np +import tf_util +from pointnet_util import pointnet_sa_module + +def placeholder_inputs(batch_size, num_point): + pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3)) + labels_pl = tf.placeholder(tf.int32, shape=(batch_size)) + return pointclouds_pl, labels_pl + +def get_model(point_cloud, is_training, bn_decay=None): + """ Classification PointNet, input is BxNx3, output Bx40 """ + batch_size = point_cloud.get_shape()[0].value + num_point = point_cloud.get_shape()[1].value + end_points = {} + l0_xyz = point_cloud + l0_points = None + end_points['l0_xyz'] = l0_xyz + + # Set abstraction layers + # Note: When using NCHW for layer 2, we see increased GPU memory usage (in TF1.4). + # So we only use NCHW for layer 1 until this issue can be resolved. + l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1', use_nchw=True) + l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2') + l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3') + + # Fully connected layers + net = tf.reshape(l3_points, [batch_size, -1]) + net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay) + net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1') + net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay) + net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp2') + net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3') + + return net, end_points + + +def get_loss(pred, label, end_points): + """ pred: B*NUM_CLASSES, + label: B, """ + loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label) + classify_loss = tf.reduce_mean(loss) + tf.summary.scalar('classify loss', classify_loss) + tf.add_to_collection('losses', classify_loss) + return classify_loss + + +if __name__=='__main__': + with tf.Graph().as_default(): + inputs = tf.zeros((32,1024,3)) + output, _ = get_model(inputs, tf.constant(True)) + print(output) -- Gitee From daf68344f40880208e4725da0761949b9303ae8c Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 01:42:12 +0000 Subject: [PATCH 07/69] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20tf=5Fops?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../tf_ops/.keep" | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/.keep" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/.keep" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/.keep" new file mode 100644 index 0000000..e69de29 -- Gitee From 44fcc51d924a1c139672775a0503e8824069da0b Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 01:43:25 +0000 Subject: [PATCH 08/69] =?UTF-8?q?add=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB/tf=5Fops.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../tf_ops/3d_interpolation" | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation" new file mode 100644 index 0000000..e69de29 -- Gitee From d11441b8fd5eee69eba1969387b37c7b9ce5053b Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 01:43:46 +0000 Subject: [PATCH 09/69] =?UTF-8?q?=E6=96=B0=E5=BB=BA=203d=5Finterpolation?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../tf_ops/3d_interpolation/.keep" | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation" => "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/.keep" (100%) diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/.keep" similarity index 100% rename from "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation" rename to "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/.keep" -- Gitee From 28ac7259f162b0e55186e89bf5209989cb5d0319 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 01:45:28 +0000 Subject: [PATCH 10/69] =?UTF-8?q?add=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB/tf=5Fops/3d=5Fint?= =?UTF-8?q?erpolation/interpolate.cpp.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../tf_ops/3d_interpolation/interpolate.cpp" | 169 ++++++++++++++++++ 1 file changed, 169 insertions(+) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/interpolate.cpp" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/interpolate.cpp" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/interpolate.cpp" new file mode 100644 index 0000000..c5ac240 --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/interpolate.cpp" @@ -0,0 +1,169 @@ +#include +#include +#include // memset +#include // rand, RAND_MAX +#include // sqrtf +#include +#include +using namespace std; +float randomf(){ + return (rand()+0.5)/(RAND_MAX+1.0); +} +static double get_time(){ + timespec tp; + clock_gettime(CLOCK_MONOTONIC,&tp); + return tp.tv_sec+tp.tv_nsec*1e-9; +} + +// Find three nearest neigbors with square distance +// input: xyz1 (b,n,3), xyz2(b,m,3) +// output: dist (b,n,3), idx (b,n,3) +void threenn_cpu(int b, int n, int m, const float *xyz1, const float *xyz2, float *dist, int *idx) { + for (int i=0;i Date: Thu, 27 Oct 2022 01:46:05 +0000 Subject: [PATCH 11/69] =?UTF-8?q?add=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB/tf=5Fops/3d=5Fint?= =?UTF-8?q?erpolation/tf=5Finterpolate.cpp.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../3d_interpolation/tf_interpolate.cpp" | 267 ++++++++++++++++++ 1 file changed, 267 insertions(+) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/tf_interpolate.cpp" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/tf_interpolate.cpp" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/tf_interpolate.cpp" new file mode 100644 index 0000000..d01aeff --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/tf_interpolate.cpp" @@ -0,0 +1,267 @@ +#include +#include +#include // memset +#include // rand, RAND_MAX +#include // sqrtf +#include "tensorflow/core/framework/op.h" +#include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/shape_inference.h" +#include "tensorflow/core/framework/common_shape_fns.h" + +#pragma GCC diagnostic ignored "-Wunused-result" + +using namespace tensorflow; + +REGISTER_OP("ThreeNN") + .Input("xyz1: float32") + .Input("xyz2: float32") + .Output("dist: float32") + .Output("idx: int32") + .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { + c->set_output(0, c->input(0)); + c->set_output(1, c->input(0)); + return Status::OK(); + }); +REGISTER_OP("ThreeInterpolate") + .Input("points: float32") + .Input("idx: int32") + .Input("weight: float32") + .Output("out: float32") + .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { + ::tensorflow::shape_inference::ShapeHandle dims1; // (b,m,c) + c->WithRank(c->input(0), 3, &dims1); + ::tensorflow::shape_inference::ShapeHandle dims2; // (b,n,3) + c->WithRank(c->input(1), 3, &dims2); + // (b,n,c) + ::tensorflow::shape_inference::ShapeHandle output = c->MakeShape({c->Dim(dims1, 0), c->Dim(dims2, 1), c->Dim(dims1, 2)}); + c->set_output(0, output); + return Status::OK(); + }); +REGISTER_OP("ThreeInterpolateGrad") + .Input("points: float32") + .Input("idx: int32") + .Input("weight: float32") + .Input("grad_out: float32") + .Output("grad_points: float32") + .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { + c->set_output(0, c->input(0)); + return Status::OK(); + }); + +float randomf(){ + return (rand()+0.5)/(RAND_MAX+1.0); +} +static double get_time(){ + timespec tp; + clock_gettime(CLOCK_MONOTONIC,&tp); + return tp.tv_sec+tp.tv_nsec*1e-9; +} + +// Find three nearest neigbors with square distance +// input: xyz1 (b,n,3), xyz2(b,m,3) +// output: dist (b,n,3), idx (b,n,3) +void threenn_cpu(int b, int n, int m, const float *xyz1, const float *xyz2, float *dist, int *idx) { + for (int i=0;iinput(0); + OP_REQUIRES(context, xyz1_tensor.dims()==3 && xyz1_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeNN expects (b,n,3) xyz1 shape.")); + int b = xyz1_tensor.shape().dim_size(0); + int n = xyz1_tensor.shape().dim_size(1); + + const Tensor& xyz2_tensor = context->input(1); + OP_REQUIRES(context, xyz2_tensor.dims()==3 && xyz2_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeNN expects (b,m,3) xyz2 shape.")); + int m = xyz2_tensor.shape().dim_size(1); + + Tensor *dist_tensor = nullptr; + OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape{b,n,3}, &dist_tensor)); + Tensor *idx_tensor = nullptr; + OP_REQUIRES_OK(context, context->allocate_output(1, TensorShape{b,n,3}, &idx_tensor)); + + auto xyz1_flat = xyz1_tensor.flat(); + const float *xyz1 = &(xyz1_flat(0)); + auto xyz2_flat = xyz2_tensor.flat(); + const float *xyz2 = &(xyz2_flat(0)); + auto dist_flat = dist_tensor->flat(); + float *dist = &(dist_flat(0)); + auto idx_flat = idx_tensor->flat(); + int *idx = &(idx_flat(0)); + threenn_cpu(b,n,m,xyz1,xyz2,dist,idx); + } +}; +REGISTER_KERNEL_BUILDER(Name("ThreeNN").Device(DEVICE_CPU), ThreeNNOp); + + + +class ThreeInterpolateOp: public OpKernel{ + public: + explicit ThreeInterpolateOp(OpKernelConstruction * context):OpKernel(context){} + + void Compute(OpKernelContext * context) override { + const Tensor& points_tensor=context->input(0); + OP_REQUIRES(context, points_tensor.dims()==3, errors::InvalidArgument("ThreeInterpolate expects (b,m,c) points shape")); + int b = points_tensor.shape().dim_size(0); + int m = points_tensor.shape().dim_size(1); + int c = points_tensor.shape().dim_size(2); + + const Tensor& idx_tensor=context->input(1); + OP_REQUIRES(context,idx_tensor.dims()==3 && idx_tensor.shape().dim_size(0)==b && idx_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeInterpolate expects (b,n,3) idx shape")); + int n = idx_tensor.shape().dim_size(1); + const Tensor& weight_tensor=context->input(2); + OP_REQUIRES(context,weight_tensor.dims()==3 && weight_tensor.shape().dim_size(0)==b && weight_tensor.shape().dim_size(1)==n && weight_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeInterpolate expects (b,n,3) weight shape")); + + Tensor * out_tensor = nullptr; + OP_REQUIRES_OK(context, context->allocate_output(0,TensorShape{b,n,c}, &out_tensor)); + + auto points_flat = points_tensor.flat(); + const float *points = &(points_flat(0)); + auto idx_flat = idx_tensor.flat(); + const int *idx = &(idx_flat(0)); + auto weight_flat = weight_tensor.flat(); + const float *weight = &(weight_flat(0)); + auto out_flat = out_tensor->flat(); + float *out = &(out_flat(0)); + threeinterpolate_cpu(b,m,c,n,points,idx,weight,out); + } +}; +REGISTER_KERNEL_BUILDER(Name("ThreeInterpolate").Device(DEVICE_CPU),ThreeInterpolateOp); + + +class ThreeInterpolateGradOp: public OpKernel{ + public: + explicit ThreeInterpolateGradOp(OpKernelConstruction * context):OpKernel(context){} + + void Compute(OpKernelContext * context) override { + const Tensor& points_tensor=context->input(0); + OP_REQUIRES(context, points_tensor.dims()==3, errors::InvalidArgument("ThreeInterpolateGrad expects (b,m,c) points shape")); + int b = points_tensor.shape().dim_size(0); + int m = points_tensor.shape().dim_size(1); + int c = points_tensor.shape().dim_size(2); + + const Tensor& idx_tensor=context->input(1); + OP_REQUIRES(context,idx_tensor.dims()==3 && idx_tensor.shape().dim_size(0)==b, errors::InvalidArgument("ThreeInterpolateGrad expects (b,n,3) idx shape")); + int n = idx_tensor.shape().dim_size(1); + const Tensor& weight_tensor=context->input(2); + OP_REQUIRES(context,weight_tensor.dims()==3 && weight_tensor.shape().dim_size(0)==b && weight_tensor.shape().dim_size(1)==n && weight_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeInterpolateGrad expects (b,n,3) weight shape")); + + const Tensor& grad_out_tensor=context->input(3); + OP_REQUIRES(context,grad_out_tensor.dims()==3 && grad_out_tensor.shape().dim_size(0)==b && grad_out_tensor.shape().dim_size(1)==n && grad_out_tensor.shape().dim_size(2)==c, errors::InvalidArgument("ThreeInterpolateGrad expects (b,n,c) grad_out shape")); + + Tensor * grad_points_tensor = nullptr; + OP_REQUIRES_OK(context, context->allocate_output(0,TensorShape{b,m,c}, &grad_points_tensor)); + + auto points_flat = points_tensor.flat(); + const float *points = &(points_flat(0)); + auto idx_flat = idx_tensor.flat(); + const int *idx = &(idx_flat(0)); + auto weight_flat = weight_tensor.flat(); + const float *weight = &(weight_flat(0)); + auto grad_out_flat = grad_out_tensor.flat(); + const float *grad_out = &(grad_out_flat(0)); + auto grad_points_flat = grad_points_tensor->flat(); + float *grad_points = &(grad_points_flat(0)); + memset(grad_points, 0, sizeof(float)*b*m*c); + threeinterpolate_grad_cpu(b,n,c,m,grad_out,idx,weight,grad_points); + } +}; +REGISTER_KERNEL_BUILDER(Name("ThreeInterpolateGrad").Device(DEVICE_CPU),ThreeInterpolateGradOp); + + -- Gitee From 3c4d19a5d161068bf36a4503b45335d133218f58 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 01:48:24 +0000 Subject: [PATCH 12/69] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20co?= =?UTF-8?q?de/2022=5Fautumn/=E5=B4=94=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E?= =?UTF-8?q?3D=E7=82=B9=E4=BA=91=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0?= =?UTF-8?q?=E7=9A=84=E9=9C=87=E5=AE=B3=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86?= =?UTF-8?q?=E5=88=AB/tf=5Fops/3d=5Finterpolation?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../tf_ops/3d_interpolation/.keep" | 0 .../tf_ops/3d_interpolation/interpolate.cpp" | 169 ----------- .../3d_interpolation/tf_interpolate.cpp" | 267 ------------------ 3 files changed, 436 deletions(-) delete mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/.keep" delete mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/interpolate.cpp" delete mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/tf_interpolate.cpp" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/.keep" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/.keep" deleted file mode 100644 index e69de29..0000000 diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/interpolate.cpp" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/interpolate.cpp" deleted file mode 100644 index c5ac240..0000000 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/interpolate.cpp" +++ /dev/null @@ -1,169 +0,0 @@ -#include -#include -#include // memset -#include // rand, RAND_MAX -#include // sqrtf -#include -#include -using namespace std; -float randomf(){ - return (rand()+0.5)/(RAND_MAX+1.0); -} -static double get_time(){ - timespec tp; - clock_gettime(CLOCK_MONOTONIC,&tp); - return tp.tv_sec+tp.tv_nsec*1e-9; -} - -// Find three nearest neigbors with square distance -// input: xyz1 (b,n,3), xyz2(b,m,3) -// output: dist (b,n,3), idx (b,n,3) -void threenn_cpu(int b, int n, int m, const float *xyz1, const float *xyz2, float *dist, int *idx) { - for (int i=0;i -#include -#include // memset -#include // rand, RAND_MAX -#include // sqrtf -#include "tensorflow/core/framework/op.h" -#include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/framework/shape_inference.h" -#include "tensorflow/core/framework/common_shape_fns.h" - -#pragma GCC diagnostic ignored "-Wunused-result" - -using namespace tensorflow; - -REGISTER_OP("ThreeNN") - .Input("xyz1: float32") - .Input("xyz2: float32") - .Output("dist: float32") - .Output("idx: int32") - .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { - c->set_output(0, c->input(0)); - c->set_output(1, c->input(0)); - return Status::OK(); - }); -REGISTER_OP("ThreeInterpolate") - .Input("points: float32") - .Input("idx: int32") - .Input("weight: float32") - .Output("out: float32") - .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { - ::tensorflow::shape_inference::ShapeHandle dims1; // (b,m,c) - c->WithRank(c->input(0), 3, &dims1); - ::tensorflow::shape_inference::ShapeHandle dims2; // (b,n,3) - c->WithRank(c->input(1), 3, &dims2); - // (b,n,c) - ::tensorflow::shape_inference::ShapeHandle output = c->MakeShape({c->Dim(dims1, 0), c->Dim(dims2, 1), c->Dim(dims1, 2)}); - c->set_output(0, output); - return Status::OK(); - }); -REGISTER_OP("ThreeInterpolateGrad") - .Input("points: float32") - .Input("idx: int32") - .Input("weight: float32") - .Input("grad_out: float32") - .Output("grad_points: float32") - .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { - c->set_output(0, c->input(0)); - return Status::OK(); - }); - -float randomf(){ - return (rand()+0.5)/(RAND_MAX+1.0); -} -static double get_time(){ - timespec tp; - clock_gettime(CLOCK_MONOTONIC,&tp); - return tp.tv_sec+tp.tv_nsec*1e-9; -} - -// Find three nearest neigbors with square distance -// input: xyz1 (b,n,3), xyz2(b,m,3) -// output: dist (b,n,3), idx (b,n,3) -void threenn_cpu(int b, int n, int m, const float *xyz1, const float *xyz2, float *dist, int *idx) { - for (int i=0;iinput(0); - OP_REQUIRES(context, xyz1_tensor.dims()==3 && xyz1_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeNN expects (b,n,3) xyz1 shape.")); - int b = xyz1_tensor.shape().dim_size(0); - int n = xyz1_tensor.shape().dim_size(1); - - const Tensor& xyz2_tensor = context->input(1); - OP_REQUIRES(context, xyz2_tensor.dims()==3 && xyz2_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeNN expects (b,m,3) xyz2 shape.")); - int m = xyz2_tensor.shape().dim_size(1); - - Tensor *dist_tensor = nullptr; - OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape{b,n,3}, &dist_tensor)); - Tensor *idx_tensor = nullptr; - OP_REQUIRES_OK(context, context->allocate_output(1, TensorShape{b,n,3}, &idx_tensor)); - - auto xyz1_flat = xyz1_tensor.flat(); - const float *xyz1 = &(xyz1_flat(0)); - auto xyz2_flat = xyz2_tensor.flat(); - const float *xyz2 = &(xyz2_flat(0)); - auto dist_flat = dist_tensor->flat(); - float *dist = &(dist_flat(0)); - auto idx_flat = idx_tensor->flat(); - int *idx = &(idx_flat(0)); - threenn_cpu(b,n,m,xyz1,xyz2,dist,idx); - } -}; -REGISTER_KERNEL_BUILDER(Name("ThreeNN").Device(DEVICE_CPU), ThreeNNOp); - - - -class ThreeInterpolateOp: public OpKernel{ - public: - explicit ThreeInterpolateOp(OpKernelConstruction * context):OpKernel(context){} - - void Compute(OpKernelContext * context) override { - const Tensor& points_tensor=context->input(0); - OP_REQUIRES(context, points_tensor.dims()==3, errors::InvalidArgument("ThreeInterpolate expects (b,m,c) points shape")); - int b = points_tensor.shape().dim_size(0); - int m = points_tensor.shape().dim_size(1); - int c = points_tensor.shape().dim_size(2); - - const Tensor& idx_tensor=context->input(1); - OP_REQUIRES(context,idx_tensor.dims()==3 && idx_tensor.shape().dim_size(0)==b && idx_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeInterpolate expects (b,n,3) idx shape")); - int n = idx_tensor.shape().dim_size(1); - const Tensor& weight_tensor=context->input(2); - OP_REQUIRES(context,weight_tensor.dims()==3 && weight_tensor.shape().dim_size(0)==b && weight_tensor.shape().dim_size(1)==n && weight_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeInterpolate expects (b,n,3) weight shape")); - - Tensor * out_tensor = nullptr; - OP_REQUIRES_OK(context, context->allocate_output(0,TensorShape{b,n,c}, &out_tensor)); - - auto points_flat = points_tensor.flat(); - const float *points = &(points_flat(0)); - auto idx_flat = idx_tensor.flat(); - const int *idx = &(idx_flat(0)); - auto weight_flat = weight_tensor.flat(); - const float *weight = &(weight_flat(0)); - auto out_flat = out_tensor->flat(); - float *out = &(out_flat(0)); - threeinterpolate_cpu(b,m,c,n,points,idx,weight,out); - } -}; -REGISTER_KERNEL_BUILDER(Name("ThreeInterpolate").Device(DEVICE_CPU),ThreeInterpolateOp); - - -class ThreeInterpolateGradOp: public OpKernel{ - public: - explicit ThreeInterpolateGradOp(OpKernelConstruction * context):OpKernel(context){} - - void Compute(OpKernelContext * context) override { - const Tensor& points_tensor=context->input(0); - OP_REQUIRES(context, points_tensor.dims()==3, errors::InvalidArgument("ThreeInterpolateGrad expects (b,m,c) points shape")); - int b = points_tensor.shape().dim_size(0); - int m = points_tensor.shape().dim_size(1); - int c = points_tensor.shape().dim_size(2); - - const Tensor& idx_tensor=context->input(1); - OP_REQUIRES(context,idx_tensor.dims()==3 && idx_tensor.shape().dim_size(0)==b, errors::InvalidArgument("ThreeInterpolateGrad expects (b,n,3) idx shape")); - int n = idx_tensor.shape().dim_size(1); - const Tensor& weight_tensor=context->input(2); - OP_REQUIRES(context,weight_tensor.dims()==3 && weight_tensor.shape().dim_size(0)==b && weight_tensor.shape().dim_size(1)==n && weight_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeInterpolateGrad expects (b,n,3) weight shape")); - - const Tensor& grad_out_tensor=context->input(3); - OP_REQUIRES(context,grad_out_tensor.dims()==3 && grad_out_tensor.shape().dim_size(0)==b && grad_out_tensor.shape().dim_size(1)==n && grad_out_tensor.shape().dim_size(2)==c, errors::InvalidArgument("ThreeInterpolateGrad expects (b,n,c) grad_out shape")); - - Tensor * grad_points_tensor = nullptr; - OP_REQUIRES_OK(context, context->allocate_output(0,TensorShape{b,m,c}, &grad_points_tensor)); - - auto points_flat = points_tensor.flat(); - const float *points = &(points_flat(0)); - auto idx_flat = idx_tensor.flat(); - const int *idx = &(idx_flat(0)); - auto weight_flat = weight_tensor.flat(); - const float *weight = &(weight_flat(0)); - auto grad_out_flat = grad_out_tensor.flat(); - const float *grad_out = &(grad_out_flat(0)); - auto grad_points_flat = grad_points_tensor->flat(); - float *grad_points = &(grad_points_flat(0)); - memset(grad_points, 0, sizeof(float)*b*m*c); - threeinterpolate_grad_cpu(b,n,c,m,grad_out,idx,weight,grad_points); - } -}; -REGISTER_KERNEL_BUILDER(Name("ThreeInterpolateGrad").Device(DEVICE_CPU),ThreeInterpolateGradOp); - - -- Gitee From 5902994456d397c684465a19ddff95b467b7b7e6 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 01:48:34 +0000 Subject: [PATCH 13/69] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20co?= =?UTF-8?q?de/2022=5Fautumn/=E5=B4=94=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E?= =?UTF-8?q?3D=E7=82=B9=E4=BA=91=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0?= =?UTF-8?q?=E7=9A=84=E9=9C=87=E5=AE=B3=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86?= =?UTF-8?q?=E5=88=AB/tf=5Fops?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../tf_ops/.keep" | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/.keep" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/.keep" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/.keep" deleted file mode 100644 index e69de29..0000000 -- Gitee From 74031f928e40ea998ca0b034fd3b29654c2036df Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 01:54:16 +0000 Subject: [PATCH 14/69] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20co?= =?UTF-8?q?de/2022=5Fautumn/=E5=B4=94=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E?= =?UTF-8?q?3D=E7=82=B9=E4=BA=91=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0?= =?UTF-8?q?=E7=9A=84=E9=9C=87=E5=AE=B3=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86?= =?UTF-8?q?=E5=88=AB/models/pointnet2=5Fcls=5Fmsg.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../models/pointnet2_cls_msg.py/.keep" | 61 ------------------- 1 file changed, 61 deletions(-) delete mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet2_cls_msg.py/.keep" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet2_cls_msg.py/.keep" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet2_cls_msg.py/.keep" deleted file mode 100644 index c750c42..0000000 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet2_cls_msg.py/.keep" +++ /dev/null @@ -1,61 +0,0 @@ -""" - PointNet++ Model for point clouds classification -""" - -import os -import sys -BASE_DIR = os.path.dirname(__file__) -sys.path.append(BASE_DIR) -sys.path.append(os.path.join(BASE_DIR, '../utils')) -import tensorflow as tf -import numpy as np -import tf_util -from pointnet_util import pointnet_sa_module - -def placeholder_inputs(batch_size, num_point): - pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3)) - labels_pl = tf.placeholder(tf.int32, shape=(batch_size)) - return pointclouds_pl, labels_pl - -def get_model(point_cloud, is_training, bn_decay=None): - """ Classification PointNet, input is BxNx3, output Bx40 """ - batch_size = point_cloud.get_shape()[0].value - num_point = point_cloud.get_shape()[1].value - end_points = {} - l0_xyz = point_cloud - l0_points = None - end_points['l0_xyz'] = l0_xyz - - # Set abstraction layers - # Note: When using NCHW for layer 2, we see increased GPU memory usage (in TF1.4). - # So we only use NCHW for layer 1 until this issue can be resolved. - l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1', use_nchw=True) - l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2') - l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3') - - # Fully connected layers - net = tf.reshape(l3_points, [batch_size, -1]) - net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay) - net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1') - net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay) - net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp2') - net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3') - - return net, end_points - - -def get_loss(pred, label, end_points): - """ pred: B*NUM_CLASSES, - label: B, """ - loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label) - classify_loss = tf.reduce_mean(loss) - tf.summary.scalar('classify loss', classify_loss) - tf.add_to_collection('losses', classify_loss) - return classify_loss - - -if __name__=='__main__': - with tf.Graph().as_default(): - inputs = tf.zeros((32,1024,3)) - output, _ = get_model(inputs, tf.constant(True)) - print(output) -- Gitee From 9b6df4fa03e7ea1698136b47806586903986aff7 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 01:54:43 +0000 Subject: [PATCH 15/69] =?UTF-8?q?add=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB/models/pointnet.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../models/pointnet_cls.py" | 40 +++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet_cls.py" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet_cls.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet_cls.py" new file mode 100644 index 0000000..520e8f2 --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet_cls.py" @@ -0,0 +1,40 @@ +import torch.nn as nn +import torch.utils.data +import torch.nn.functional as F +from pointnet import PointNetEncoder, feature_transform_reguliarzer + +class get_model(nn.Module): + def __init__(self, k=40, normal_channel=True): + super(get_model, self).__init__() + if normal_channel: + channel = 6 + else: + channel = 3 + self.feat = PointNetEncoder(global_feat=True, feature_transform=True, channel=channel) + self.fc1 = nn.Linear(1024, 512) + self.fc2 = nn.Linear(512, 256) + self.fc3 = nn.Linear(256, k) + self.dropout = nn.Dropout(p=0.4) + self.bn1 = nn.BatchNorm1d(512) + self.bn2 = nn.BatchNorm1d(256) + self.relu = nn.ReLU() + + def forward(self, x): + x, trans, trans_feat = self.feat(x) + x = F.relu(self.bn1(self.fc1(x))) + x = F.relu(self.bn2(self.dropout(self.fc2(x)))) + x = self.fc3(x) + x = F.log_softmax(x, dim=1) + return x, trans_feat + +class get_loss(torch.nn.Module): + def __init__(self, mat_diff_loss_scale=0.001): + super(get_loss, self).__init__() + self.mat_diff_loss_scale = mat_diff_loss_scale + + def forward(self, pred, target, trans_feat): + loss = F.nll_loss(pred, target) + mat_diff_loss = feature_transform_reguliarzer(trans_feat) + + total_loss = loss + mat_diff_loss * self.mat_diff_loss_scale + return total_loss -- Gitee From 8be694b8c145aeafacda5bedd6c282760906f8df Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 01:55:14 +0000 Subject: [PATCH 16/69] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20data=5Futils?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../data_utils/.keep" | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/.keep" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/.keep" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/.keep" new file mode 100644 index 0000000..e69de29 -- Gitee From 808d02b5aa64dd48010e5e5b86c5199039bdfe52 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 01:58:01 +0000 Subject: [PATCH 17/69] =?UTF-8?q?add=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB/data=5Futils/coll?= =?UTF-8?q?ect=5Findoor3d=5Fdata.py.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../data_utils/collect_indoor3d_data.py" | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/collect_indoor3d_data.py" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/collect_indoor3d_data.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/collect_indoor3d_data.py" new file mode 100644 index 0000000..f1bdeeb --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/collect_indoor3d_data.py" @@ -0,0 +1,24 @@ +import os +import sys +from indoor3d_util import DATA_PATH, collect_point_label + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +ROOT_DIR = os.path.dirname(BASE_DIR) +sys.path.append(BASE_DIR) + +anno_paths = [line.rstrip() for line in open(os.path.join(BASE_DIR, 'meta/anno_paths.txt'))] +anno_paths = [os.path.join(DATA_PATH, p) for p in anno_paths] + +output_folder = os.path.join(ROOT_DIR, 'data/stanford_indoor3d') +if not os.path.exists(output_folder): + os.mkdir(output_folder) + +# Note: there is an extra character in the v1.2 data in Area_5/hallway_6. It's fixed manually. +for anno_path in anno_paths: + print(anno_path) + try: + elements = anno_path.split('/') + out_filename = elements[-3]+'_'+elements[-2]+'.npy' # Area_1_hallway_1.npy + collect_point_label(anno_path, os.path.join(output_folder, out_filename), 'numpy') + except: + print(anno_path, 'ERROR!!') -- Gitee From bdc411069fce3e4e2b07bcee53dcec063d530af0 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 01:58:35 +0000 Subject: [PATCH 18/69] =?UTF-8?q?add=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB/data=5Futils/indo?= =?UTF-8?q?or3d=5Futil.py.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../data_utils/indoor3d_util.py" | 598 ++++++++++++++++++ 1 file changed, 598 insertions(+) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/indoor3d_util.py" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/indoor3d_util.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/indoor3d_util.py" new file mode 100644 index 0000000..42a7d97 --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/indoor3d_util.py" @@ -0,0 +1,598 @@ +import numpy as np +import glob +import os +import sys + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +ROOT_DIR = os.path.dirname(BASE_DIR) +sys.path.append(BASE_DIR) + +DATA_PATH = os.path.join(ROOT_DIR, 'data','s3dis', 'Stanford3dDataset_v1.2_Aligned_Version') +g_classes = [x.rstrip() for x in open(os.path.join(BASE_DIR, 'meta/class_names.txt'))] +g_class2label = {cls: i for i,cls in enumerate(g_classes)} +g_class2color = {'ceiling': [0,255,0], + 'floor': [0,0,255], + 'wall': [0,255,255], + 'beam': [255,255,0], + 'column': [255,0,255], + 'window': [100,100,255], + 'door': [200,200,100], + 'table': [170,120,200], + 'chair': [255,0,0], + 'sofa': [200,100,100], + 'bookcase': [10,200,100], + 'board': [200,200,200], + 'clutter': [50,50,50]} +g_easy_view_labels = [7,8,9,10,11,1] +g_label2color = {g_classes.index(cls): g_class2color[cls] for cls in g_classes} + + +# ----------------------------------------------------------------------------- +# CONVERT ORIGINAL DATA TO OUR DATA_LABEL FILES +# ----------------------------------------------------------------------------- + +def collect_point_label(anno_path, out_filename, file_format='txt'): + """ Convert original dataset files to data_label file (each line is XYZRGBL). + We aggregated all the points from each instance in the room. + + Args: + anno_path: path to annotations. e.g. Area_1/office_2/Annotations/ + out_filename: path to save collected points and labels (each line is XYZRGBL) + file_format: txt or numpy, determines what file format to save. + Returns: + None + Note: + the points are shifted before save, the most negative point is now at origin. + """ + points_list = [] + for f in glob.glob(os.path.join(anno_path, '*.txt')): + cls = os.path.basename(f).split('_')[0] + print(f) + if cls not in g_classes: # note: in some room there is 'staris' class.. + cls = 'clutter' + + points = np.loadtxt(f) + labels = np.ones((points.shape[0],1)) * g_class2label[cls] + points_list.append(np.concatenate([points, labels], 1)) # Nx7 + + data_label = np.concatenate(points_list, 0) + xyz_min = np.amin(data_label, axis=0)[0:3] + data_label[:, 0:3] -= xyz_min + + if file_format=='txt': + fout = open(out_filename, 'w') + for i in range(data_label.shape[0]): + fout.write('%f %f %f %d %d %d %d\n' % \ + (data_label[i,0], data_label[i,1], data_label[i,2], + data_label[i,3], data_label[i,4], data_label[i,5], + data_label[i,6])) + fout.close() + elif file_format=='numpy': + np.save(out_filename, data_label) + else: + print('ERROR!! Unknown file format: %s, please use txt or numpy.' % \ + (file_format)) + exit() + +def data_to_obj(data,name='example.obj',no_wall=True): + fout = open(name, 'w') + label = data[:, -1].astype(int) + for i in range(data.shape[0]): + if no_wall and ((label[i] == 2) or (label[i]==0)): + continue + fout.write('v %f %f %f %d %d %d\n' % \ + (data[i, 0], data[i, 1], data[i, 2], data[i, 3], data[i, 4], data[i, 5])) + fout.close() + +def point_label_to_obj(input_filename, out_filename, label_color=True, easy_view=False, no_wall=False): + """ For visualization of a room from data_label file, + input_filename: each line is X Y Z R G B L + out_filename: OBJ filename, + visualize input file by coloring point with label color + easy_view: only visualize furnitures and floor + """ + data_label = np.loadtxt(input_filename) + data = data_label[:, 0:6] + label = data_label[:, -1].astype(int) + fout = open(out_filename, 'w') + for i in range(data.shape[0]): + color = g_label2color[label[i]] + if easy_view and (label[i] not in g_easy_view_labels): + continue + if no_wall and ((label[i] == 2) or (label[i]==0)): + continue + if label_color: + fout.write('v %f %f %f %d %d %d\n' % \ + (data[i,0], data[i,1], data[i,2], color[0], color[1], color[2])) + else: + fout.write('v %f %f %f %d %d %d\n' % \ + (data[i,0], data[i,1], data[i,2], data[i,3], data[i,4], data[i,5])) + fout.close() + + + +# ----------------------------------------------------------------------------- +# PREPARE BLOCK DATA FOR DEEPNETS TRAINING/TESTING +# ----------------------------------------------------------------------------- + +def sample_data(data, num_sample): + """ data is in N x ... + we want to keep num_samplexC of them. + if N > num_sample, we will randomly keep num_sample of them. + if N < num_sample, we will randomly duplicate samples. + """ + N = data.shape[0] + if (N == num_sample): + return data, range(N) + elif (N > num_sample): + sample = np.random.choice(N, num_sample) + return data[sample, ...], sample + else: + sample = np.random.choice(N, num_sample-N) + dup_data = data[sample, ...] + return np.concatenate([data, dup_data], 0), list(range(N))+list(sample) + +def sample_data_label(data, label, num_sample): + new_data, sample_indices = sample_data(data, num_sample) + new_label = label[sample_indices] + return new_data, new_label + +def room2blocks(data, label, num_point, block_size=1.0, stride=1.0, + random_sample=False, sample_num=None, sample_aug=1): + """ Prepare block training data. + Args: + data: N x 6 numpy array, 012 are XYZ in meters, 345 are RGB in [0,1] + assumes the data is shifted (min point is origin) and aligned + (aligned with XYZ axis) + label: N size uint8 numpy array from 0-12 + num_point: int, how many points to sample in each block + block_size: float, physical size of the block in meters + stride: float, stride for block sweeping + random_sample: bool, if True, we will randomly sample blocks in the room + sample_num: int, if random sample, how many blocks to sample + [default: room area] + sample_aug: if random sample, how much aug + Returns: + block_datas: K x num_point x 6 np array of XYZRGB, RGB is in [0,1] + block_labels: K x num_point x 1 np array of uint8 labels + + TODO: for this version, blocking is in fixed, non-overlapping pattern. + """ + assert(stride<=block_size) + + limit = np.amax(data, 0)[0:3] + + # Get the corner location for our sampling blocks + xbeg_list = [] + ybeg_list = [] + if not random_sample: + num_block_x = int(np.ceil((limit[0] - block_size) / stride)) + 1 + num_block_y = int(np.ceil(collect_point_label(limit[1] - block_size) / stride)) + 1 + for i in range(num_block_x): + for j in range(num_block_y): + xbeg_list.append(i*stride) + ybeg_list.append(j*stride) + else: + num_block_x = int(np.ceil(limit[0] / block_size)) + num_block_y = int(np.ceil(limit[1] / block_size)) + if sample_num is None: + sample_num = num_block_x * num_block_y * sample_aug + for _ in range(sample_num): + xbeg = np.random.uniform(-block_size, limit[0]) + ybeg = np.random.uniform(-block_size, limit[1]) + xbeg_list.append(xbeg) + ybeg_list.append(ybeg) + + # Collect blocks + block_data_list = [] + block_label_list = [] + idx = 0 + for idx in range(len(xbeg_list)): + xbeg = xbeg_list[idx] + ybeg = ybeg_list[idx] + xcond = (data[:,0]<=xbeg+block_size) & (data[:,0]>=xbeg) + ycond = (data[:,1]<=ybeg+block_size) & (data[:,1]>=ybeg) + cond = xcond & ycond + if np.sum(cond) < 100: # discard block if there are less than 100 pts. + continue + + block_data = data[cond, :] + block_label = label[cond] + + # randomly subsample data + block_data_sampled, block_label_sampled = \ + sample_data_label(block_data, block_label, num_point) + block_data_list.append(np.expand_dims(block_data_sampled, 0)) + block_label_list.append(np.expand_dims(block_label_sampled, 0)) + + return np.concatenate(block_data_list, 0), \ + np.concatenate(block_label_list, 0) + + +def room2blocks_plus(data_label, num_point, block_size, stride, + random_sample, sample_num, sample_aug): + """ room2block with input filename and RGB preprocessing. + """ + data = data_label[:,0:6] + data[:,3:6] /= 255.0 + label = data_label[:,-1].astype(np.uint8) + + return room2blocks(data, label, num_point, block_size, stride, + random_sample, sample_num, sample_aug) + +def room2blocks_wrapper(data_label_filename, num_point, block_size=1.0, stride=1.0, + random_sample=False, sample_num=None, sample_aug=1): + if data_label_filename[-3:] == 'txt': + data_label = np.loadtxt(data_label_filename) + elif data_label_filename[-3:] == 'npy': + data_label = np.load(data_label_filename) + else: + print('Unknown file type! exiting.') + exit() + return room2blocks_plus(data_label, num_point, block_size, stride, + random_sample, sample_num, sample_aug) + +def room2blocks_plus_normalized(data_label, num_point, block_size, stride, + random_sample, sample_num, sample_aug): + """ room2block, with input filename and RGB preprocessing. + for each block centralize XYZ, add normalized XYZ as 678 channels + """ + data = data_label[:,0:6] + data[:,3:6] /= 255.0 + label = data_label[:,-1].astype(np.uint8) + max_room_x = max(data[:,0]) + max_room_y = max(data[:,1]) + max_room_z = max(data[:,2]) + + data_batch, label_batch = room2blocks(data, label, num_point, block_size, stride, + random_sample, sample_num, sample_aug) + new_data_batch = np.zeros((data_batch.shape[0], num_point, 9)) + for b in range(data_batch.shape[0]): + new_data_batch[b, :, 6] = data_batch[b, :, 0]/max_room_x + new_data_batch[b, :, 7] = data_batch[b, :, 1]/max_room_y + new_data_batch[b, :, 8] = data_batch[b, :, 2]/max_room_z + minx = min(data_batch[b, :, 0]) + miny = min(data_batch[b, :, 1]) + data_batch[b, :, 0] -= (minx+block_size/2) + data_batch[b, :, 1] -= (miny+block_size/2) + new_data_batch[:, :, 0:6] = data_batch + return new_data_batch, label_batch + + +def room2blocks_wrapper_normalized(data_label_filename, num_point, block_size=1.0, stride=1.0, + random_sample=False, sample_num=None, sample_aug=1): + if data_label_filename[-3:] == 'txt': + data_label = np.loadtxt(data_label_filename) + elif data_label_filename[-3:] == 'npy': + data_label = np.load(data_label_filename) + else: + print('Unknown file type! exiting.') + exit() + return room2blocks_plus_normalized(data_label, num_point, block_size, stride, + random_sample, sample_num, sample_aug) + +def room2samples(data, label, sample_num_point): + """ Prepare whole room samples. + + Args: + data: N x 6 numpy array, 012 are XYZ in meters, 345 are RGB in [0,1] + assumes the data is shifted (min point is origin) and + aligned (aligned with XYZ axis) + label: N size uint8 numpy array from 0-12 + sample_num_point: int, how many points to sample in each sample + Returns: + sample_datas: K x sample_num_point x 9 + numpy array of XYZRGBX'Y'Z', RGB is in [0,1] + sample_labels: K x sample_num_point x 1 np array of uint8 labels + """ + N = data.shape[0] + order = np.arange(N) + np.random.shuffle(order) + data = data[order, :] + label = label[order] + + batch_num = int(np.ceil(N / float(sample_num_point))) + sample_datas = np.zeros((batch_num, sample_num_point, 6)) + sample_labels = np.zeros((batch_num, sample_num_point, 1)) + + for i in range(batch_num): + beg_idx = i*sample_num_point + end_idx = min((i+1)*sample_num_point, N) + num = end_idx - beg_idx + sample_datas[i,0:num,:] = data[beg_idx:end_idx, :] + sample_labels[i,0:num,0] = label[beg_idx:end_idx] + if num < sample_num_point: + makeup_indices = np.random.choice(N, sample_num_point - num) + sample_datas[i,num:,:] = data[makeup_indices, :] + sample_labels[i,num:,0] = label[makeup_indices] + return sample_datas, sample_labels + +def room2samples_plus_normalized(data_label, num_point): + """ room2sample, with input filename and RGB preprocessing. + for each block centralize XYZ, add normalized XYZ as 678 channels + """ + data = data_label[:,0:6] + data[:,3:6] /= 255.0 + label = data_label[:,-1].astype(np.uint8) + max_room_x = max(data[:,0]) + max_room_y = max(data[:,1]) + max_room_z = max(data[:,2]) + #print(max_room_x, max_room_y, max_room_z) + + data_batch, label_batch = room2samples(data, label, num_point) + new_data_batch = np.zeros((data_batch.shape[0], num_point, 9)) + for b in range(data_batch.shape[0]): + new_data_batch[b, :, 6] = data_batch[b, :, 0]/max_room_x + new_data_batch[b, :, 7] = data_batch[b, :, 1]/max_room_y + new_data_batch[b, :, 8] = data_batch[b, :, 2]/max_room_z + #minx = min(data_batch[b, :, 0]) + #miny = min(data_batch[b, :, 1]) + #data_batch[b, :, 0] -= (minx+block_size/2) + #data_batch[b, :, 1] -= (miny+block_size/2) + new_data_batch[:, :, 0:6] = data_batch + return new_data_batch, label_batch + + +def room2samples_wrapper_normalized(data_label_filename, num_point): + if data_label_filename[-3:] == 'txt': + data_label = np.loadtxt(data_label_filename) + elif data_label_filename[-3:] == 'npy': + data_label = np.load(data_label_filename) + else: + print('Unknown file type! exiting.') + exit() + return room2samples_plus_normalized(data_label, num_point) + + +# ----------------------------------------------------------------------------- +# EXTRACT INSTANCE BBOX FROM ORIGINAL DATA (for detection evaluation) +# ----------------------------------------------------------------------------- + +def collect_bounding_box(anno_path, out_filename): + """ Compute bounding boxes from each instance in original dataset files on + one room. **We assume the bbox is aligned with XYZ coordinate.** + + Args: + anno_path: path to annotations. e.g. Area_1/office_2/Annotations/ + out_filename: path to save instance bounding boxes for that room. + each line is x1 y1 z1 x2 y2 z2 label, + where (x1,y1,z1) is the point on the diagonal closer to origin + Returns: + None + Note: + room points are shifted, the most negative point is now at origin. + """ + bbox_label_list = [] + + for f in glob.glob(os.path.join(anno_path, '*.txt')): + cls = os.path.basename(f).split('_')[0] + if cls not in g_classes: # note: in some room there is 'staris' class.. + cls = 'clutter' + points = np.loadtxt(f) + label = g_class2label[cls] + # Compute tightest axis aligned bounding box + xyz_min = np.amin(points[:, 0:3], axis=0) + xyz_max = np.amax(points[:, 0:3], axis=0) + ins_bbox_label = np.expand_dims( + np.concatenate([xyz_min, xyz_max, np.array([label])], 0), 0) + bbox_label_list.append(ins_bbox_label) + + bbox_label = np.concatenate(bbox_label_list, 0) + room_xyz_min = np.amin(bbox_label[:, 0:3], axis=0) + bbox_label[:, 0:3] -= room_xyz_min + bbox_label[:, 3:6] -= room_xyz_min + + fout = open(out_filename, 'w') + for i in range(bbox_label.shape[0]): + fout.write('%f %f %f %f %f %f %d\n' % \ + (bbox_label[i,0], bbox_label[i,1], bbox_label[i,2], + bbox_label[i,3], bbox_label[i,4], bbox_label[i,5], + bbox_label[i,6])) + fout.close() + +def bbox_label_to_obj(input_filename, out_filename_prefix, easy_view=False): + """ Visualization of bounding boxes. + + Args: + input_filename: each line is x1 y1 z1 x2 y2 z2 label + out_filename_prefix: OBJ filename prefix, + visualize object by g_label2color + easy_view: if True, only visualize furniture and floor + Returns: + output a list of OBJ file and MTL files with the same prefix + """ + bbox_label = np.loadtxt(input_filename) + bbox = bbox_label[:, 0:6] + label = bbox_label[:, -1].astype(int) + v_cnt = 0 # count vertex + ins_cnt = 0 # count instance + for i in range(bbox.shape[0]): + if easy_view and (label[i] not in g_easy_view_labels): + continue + obj_filename = out_filename_prefix+'_'+g_classes[label[i]]+'_'+str(ins_cnt)+'.obj' + mtl_filename = out_filename_prefix+'_'+g_classes[label[i]]+'_'+str(ins_cnt)+'.mtl' + fout_obj = open(obj_filename, 'w') + fout_mtl = open(mtl_filename, 'w') + fout_obj.write('mtllib %s\n' % (os.path.basename(mtl_filename))) + + length = bbox[i, 3:6] - bbox[i, 0:3] + a = length[0] + b = length[1] + c = length[2] + x = bbox[i, 0] + y = bbox[i, 1] + z = bbox[i, 2] + color = np.array(g_label2color[label[i]], dtype=float) / 255.0 + + material = 'material%d' % (ins_cnt) + fout_obj.write('usemtl %s\n' % (material)) + fout_obj.write('v %f %f %f\n' % (x,y,z+c)) + fout_obj.write('v %f %f %f\n' % (x,y+b,z+c)) + fout_obj.write('v %f %f %f\n' % (x+a,y+b,z+c)) + fout_obj.write('v %f %f %f\n' % (x+a,y,z+c)) + fout_obj.write('v %f %f %f\n' % (x,y,z)) + fout_obj.write('v %f %f %f\n' % (x,y+b,z)) + fout_obj.write('v %f %f %f\n' % (x+a,y+b,z)) + fout_obj.write('v %f %f %f\n' % (x+a,y,z)) + fout_obj.write('g default\n') + v_cnt = 0 # for individual box + fout_obj.write('f %d %d %d %d\n' % (4+v_cnt, 3+v_cnt, 2+v_cnt, 1+v_cnt)) + fout_obj.write('f %d %d %d %d\n' % (1+v_cnt, 2+v_cnt, 6+v_cnt, 5+v_cnt)) + fout_obj.write('f %d %d %d %d\n' % (7+v_cnt, 6+v_cnt, 2+v_cnt, 3+v_cnt)) + fout_obj.write('f %d %d %d %d\n' % (4+v_cnt, 8+v_cnt, 7+v_cnt, 3+v_cnt)) + fout_obj.write('f %d %d %d %d\n' % (5+v_cnt, 8+v_cnt, 4+v_cnt, 1+v_cnt)) + fout_obj.write('f %d %d %d %d\n' % (5+v_cnt, 6+v_cnt, 7+v_cnt, 8+v_cnt)) + fout_obj.write('\n') + + fout_mtl.write('newmtl %s\n' % (material)) + fout_mtl.write('Kd %f %f %f\n' % (color[0], color[1], color[2])) + fout_mtl.write('\n') + fout_obj.close() + fout_mtl.close() + + v_cnt += 8 + ins_cnt += 1 + +def bbox_label_to_obj_room(input_filename, out_filename_prefix, easy_view=False, permute=None, center=False, exclude_table=False): + """ Visualization of bounding boxes. + + Args: + input_filename: each line is x1 y1 z1 x2 y2 z2 label + out_filename_prefix: OBJ filename prefix, + visualize object by g_label2color + easy_view: if True, only visualize furniture and floor + permute: if not None, permute XYZ for rendering, e.g. [0 2 1] + center: if True, move obj to have zero origin + Returns: + output a list of OBJ file and MTL files with the same prefix + """ + bbox_label = np.loadtxt(input_filename) + bbox = bbox_label[:, 0:6] + if permute is not None: + assert(len(permute)==3) + permute = np.array(permute) + bbox[:,0:3] = bbox[:,permute] + bbox[:,3:6] = bbox[:,permute+3] + if center: + xyz_max = np.amax(bbox[:,3:6], 0) + bbox[:,0:3] -= (xyz_max/2.0) + bbox[:,3:6] -= (xyz_max/2.0) + bbox /= np.max(xyz_max/2.0) + label = bbox_label[:, -1].astype(int) + obj_filename = out_filename_prefix+'.obj' + mtl_filename = out_filename_prefix+'.mtl' + + fout_obj = open(obj_filename, 'w') + fout_mtl = open(mtl_filename, 'w') + fout_obj.write('mtllib %s\n' % (os.path.basename(mtl_filename))) + v_cnt = 0 # count vertex + ins_cnt = 0 # count instance + for i in range(bbox.shape[0]): + if easy_view and (label[i] not in g_easy_view_labels): + continue + if exclude_table and label[i] == g_classes.index('table'): + continue + + length = bbox[i, 3:6] - bbox[i, 0:3] + a = length[0] + b = length[1] + c = length[2] + x = bbox[i, 0] + y = bbox[i, 1] + z = bbox[i, 2] + color = np.array(g_label2color[label[i]], dtype=float) / 255.0 + + material = 'material%d' % (ins_cnt) + fout_obj.write('usemtl %s\n' % (material)) + fout_obj.write('v %f %f %f\n' % (x,y,z+c)) + fout_obj.write('v %f %f %f\n' % (x,y+b,z+c)) + fout_obj.write('v %f %f %f\n' % (x+a,y+b,z+c)) + fout_obj.write('v %f %f %f\n' % (x+a,y,z+c)) + fout_obj.write('v %f %f %f\n' % (x,y,z)) + fout_obj.write('v %f %f %f\n' % (x,y+b,z)) + fout_obj.write('v %f %f %f\n' % (x+a,y+b,z)) + fout_obj.write('v %f %f %f\n' % (x+a,y,z)) + fout_obj.write('g default\n') + fout_obj.write('f %d %d %d %d\n' % (4+v_cnt, 3+v_cnt, 2+v_cnt, 1+v_cnt)) + fout_obj.write('f %d %d %d %d\n' % (1+v_cnt, 2+v_cnt, 6+v_cnt, 5+v_cnt)) + fout_obj.write('f %d %d %d %d\n' % (7+v_cnt, 6+v_cnt, 2+v_cnt, 3+v_cnt)) + fout_obj.write('f %d %d %d %d\n' % (4+v_cnt, 8+v_cnt, 7+v_cnt, 3+v_cnt)) + fout_obj.write('f %d %d %d %d\n' % (5+v_cnt, 8+v_cnt, 4+v_cnt, 1+v_cnt)) + fout_obj.write('f %d %d %d %d\n' % (5+v_cnt, 6+v_cnt, 7+v_cnt, 8+v_cnt)) + fout_obj.write('\n') + + fout_mtl.write('newmtl %s\n' % (material)) + fout_mtl.write('Kd %f %f %f\n' % (color[0], color[1], color[2])) + fout_mtl.write('\n') + + v_cnt += 8 + ins_cnt += 1 + + fout_obj.close() + fout_mtl.close() + + +def collect_point_bounding_box(anno_path, out_filename, file_format): + """ Compute bounding boxes from each instance in original dataset files on + one room. **We assume the bbox is aligned with XYZ coordinate.** + Save both the point XYZRGB and the bounding box for the point's + parent element. + + Args: + anno_path: path to annotations. e.g. Area_1/office_2/Annotations/ + out_filename: path to save instance bounding boxes for each point, + plus the point's XYZRGBL + each line is XYZRGBL offsetX offsetY offsetZ a b c, + where cx = X+offsetX, cy=X+offsetY, cz=Z+offsetZ + where (cx,cy,cz) is center of the box, a,b,c are distances from center + to the surfaces of the box, i.e. x1 = cx-a, x2 = cx+a, y1=cy-b etc. + file_format: output file format, txt or numpy + Returns: + None + + Note: + room points are shifted, the most negative point is now at origin. + """ + point_bbox_list = [] + + for f in glob.glob(os.path.join(anno_path, '*.txt')): + cls = os.path.basename(f).split('_')[0] + if cls not in g_classes: # note: in some room there is 'staris' class.. + cls = 'clutter' + points = np.loadtxt(f) # Nx6 + label = g_class2label[cls] # N, + # Compute tightest axis aligned bounding box + xyz_min = np.amin(points[:, 0:3], axis=0) # 3, + xyz_max = np.amax(points[:, 0:3], axis=0) # 3, + xyz_center = (xyz_min + xyz_max) / 2 + dimension = (xyz_max - xyz_min) / 2 + + xyz_offsets = xyz_center - points[:,0:3] # Nx3 + dimensions = np.ones((points.shape[0],3)) * dimension # Nx3 + labels = np.ones((points.shape[0],1)) * label # N + point_bbox_list.append(np.concatenate([points, labels, + xyz_offsets, dimensions], 1)) # Nx13 + + point_bbox = np.concatenate(point_bbox_list, 0) # KxNx13 + room_xyz_min = np.amin(point_bbox[:, 0:3], axis=0) + point_bbox[:, 0:3] -= room_xyz_min + + if file_format == 'txt': + fout = open(out_filename, 'w') + for i in range(point_bbox.shape[0]): + fout.write('%f %f %f %d %d %d %d %f %f %f %f %f %f\n' % \ + (point_bbox[i,0], point_bbox[i,1], point_bbox[i,2], + point_bbox[i,3], point_bbox[i,4], point_bbox[i,5], + point_bbox[i,6], + point_bbox[i,7], point_bbox[i,8], point_bbox[i,9], + point_bbox[i,10], point_bbox[i,11], point_bbox[i,12])) + + fout.close() + elif file_format == 'numpy': + np.save(out_filename, point_bbox) + else: + print('ERROR!! Unknown file format: %s, please use txt or numpy.' % \ + (file_format)) + exit() + + -- Gitee From 1c4df1d5f4d4f22d67d61dc9cb74d6fc884b7328 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 01:59:05 +0000 Subject: [PATCH 19/69] =?UTF-8?q?add=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB/data=5Futils/Mode?= =?UTF-8?q?lNetDataLoader.py.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../data_utils/ModelNetDataLoader.py" | 103 ++++++++++++++++++ 1 file changed, 103 insertions(+) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/ModelNetDataLoader.py" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/ModelNetDataLoader.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/ModelNetDataLoader.py" new file mode 100644 index 0000000..3e343ef --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/ModelNetDataLoader.py" @@ -0,0 +1,103 @@ +import numpy as np +import warnings +import os +from torch.utils.data import Dataset +warnings.filterwarnings('ignore') + + + +def pc_normalize(pc): + centroid = np.mean(pc, axis=0) + pc = pc - centroid + m = np.max(np.sqrt(np.sum(pc**2, axis=1))) + pc = pc / m + return pc + +def farthest_point_sample(point, npoint): + """ + Input: + xyz: pointcloud data, [N, D] + npoint: number of samples + Return: + centroids: sampled pointcloud index, [npoint, D] + """ + N, D = point.shape + xyz = point[:,:3] + centroids = np.zeros((npoint,)) + distance = np.ones((N,)) * 1e10 + farthest = np.random.randint(0, N) + for i in range(npoint): + centroids[i] = farthest + centroid = xyz[farthest, :] + dist = np.sum((xyz - centroid) ** 2, -1) + mask = dist < distance + distance[mask] = dist[mask] + farthest = np.argmax(distance, -1) + point = point[centroids.astype(np.int32)] + return point + +class ModelNetDataLoader(Dataset): + def __init__(self, root, npoint=1024, split='train', uniform=False, normal_channel=True, cache_size=15000): + self.root = root + self.npoints = npoint + self.uniform = uniform + self.catfile = os.path.join(self.root, 'modelnet40_shape_names.txt') + + self.cat = [line.rstrip() for line in open(self.catfile)] + self.classes = dict(zip(self.cat, range(len(self.cat)))) + self.normal_channel = normal_channel + + shape_ids = {} + shape_ids['train'] = [line.rstrip() for line in open(os.path.join(self.root, 'modelnet40_train.txt'))] + shape_ids['test'] = [line.rstrip() for line in open(os.path.join(self.root, 'modelnet40_test.txt'))] + + assert (split == 'train' or split == 'test') + shape_names = ['_'.join(x.split('_')[0:-1]) for x in shape_ids[split]] + # list of (shape_name, shape_txt_file_path) tuple + self.datapath = [(shape_names[i], os.path.join(self.root, shape_names[i], shape_ids[split][i]) + '.txt') for i + in range(len(shape_ids[split]))] + print('The size of %s data is %d'%(split,len(self.datapath))) + + self.cache_size = cache_size # how many data points to cache in memory + self.cache = {} # from index to (point_set, cls) tuple + + def __len__(self): + return len(self.datapath) + + def _get_item(self, index): + if index in self.cache: + point_set, cls = self.cache[index] + else: + fn = self.datapath[index] + cls = self.classes[self.datapath[index][0]] + cls = np.array([cls]).astype(np.int32) + point_set = np.loadtxt(fn[1], delimiter=',').astype(np.float32) + if self.uniform: + point_set = farthest_point_sample(point_set, self.npoints) + else: + point_set = point_set[0:self.npoints,:] + + point_set[:, 0:3] = pc_normalize(point_set[:, 0:3]) + + if not self.normal_channel: + point_set = point_set[:, 0:3] + + if len(self.cache) < self.cache_size: + self.cache[index] = (point_set, cls) + + return point_set, cls + + def __getitem__(self, index): + return self._get_item(index) + + + + +if __name__ == '__main__': + import torch + + data = ModelNetDataLoader('/data/modelnet40_normal_resampled/',split='train', uniform=False, normal_channel=True,) + DataLoader = torch.utils.data.DataLoader(data, batch_size=12, shuffle=True) + for point,label in DataLoader: + print(point.shape) + print(label.shape) \ No newline at end of file -- Gitee From 50f2d3418174cc1609b443476dd02d26f6b647c9 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 02:00:47 +0000 Subject: [PATCH 20/69] =?UTF-8?q?add=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB/data=5Futils.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../data_utils/extract and save point cloud .py" | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/extract and save point cloud .py" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/extract and save point cloud .py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/extract and save point cloud .py" new file mode 100644 index 0000000..e69de29 -- Gitee From fb4f22317adbcf6cb34110a16cb440b686571e61 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 02:01:10 +0000 Subject: [PATCH 21/69] =?UTF-8?q?update=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB/data=5Futils/extr?= =?UTF-8?q?act=20and=20save=20point=20cloud=20.py.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../extract and save point cloud .py" | 157 ++++++++++++++++++ 1 file changed, 157 insertions(+) diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/extract and save point cloud .py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/extract and save point cloud .py" index e69de29..c761dfd 100644 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/extract and save point cloud .py" +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/extract and save point cloud .py" @@ -0,0 +1,157 @@ +# -*- coding: utf-8 -*- +# 注意此时的laspy,有版本问题,2.0以上的版本就会出现问题,此时的版本是1.7.0 +from laspy.file import File +import os +import arcpy +import shutil +import numpy as np +np.set_printoptions(suppress=True) + +# 获取txt文件中的各种文件 +def GetData_fromFile(list_filename): + return [line.rstrip() for line in open(list_filename,'r')] + +# 只获取文件里面的shpfile中的shp文件 +def Get_specific_File(dir, ext): + need_file = [] + allfiles = os.listdir(dir) + for i in range(len(allfiles)): + name,extend = os.path.splitext(allfiles[i]) + if extend[1:] == ext: + root_shp_path = os.path.join(dir,allfiles[i]) + need_file.append(root_shp_path) + return need_file + +#分割对应点云块的shp文件,以便于下一步提取 +def split_move_shp(shp_path, out_path, label,splitfield): + arcpy.Split_analysis(in_features = shp_path, split_features = shp_path, + split_field = splitfield, out_workspace = out_path) + all_files = os.listdir(out_path) + for k in range(len(label)): + move_path_dir = os.path.join(out_path, label[k]) + for file in all_files: + name_first, ext_first = os.path.splitext(file) # 分离文件名和后缀 + f_name, ext_second = os.path.splitext(name_first) # 再次分离文件名和后缀,因为出现了.shp.xml的文件,对于.shp的不影响 + if f_name[:-4] == label[k]: + shutil.move( os.path.join(out_path,file), move_path_dir) + # 点云规范化 +def PC_NORMLIZE(pc): + centroid = np.mean(pc, axis=0) + pc = pc - centroid + m = np.max(np.sqrt(np.sum(pc**2, axis=1))) + pc = pc / m + return pc + +# 采样到2048 +def sample_data(data, num_sample): + """ data is in N x ... + we want to keep num_samplexC of them. + if N > num_sample, we will randomly keep num_sample of them. + if N < num_sample, we will randomly duplicate samples. + """ + N = data.shape[0] + if (N == num_sample): + return data + elif (N > num_sample): + sample = np.random.choice(N, num_sample) + return data[sample, ...] + else: + # 相当于从N中抽取 num_sample - N个随机数组成一维数组array,成为data的下标索引值 + sample = np.random.choice(N, num_sample - N) + dup_data = data[sample, ...] # 取数据 + # 按行拼接 + return np.concatenate([data, dup_data], axis=0) + # return np.concatenate([data, dup_data], 0), list(range(N)) + list(sample) + + +if __name__ == "__main__": + + las_name = GetData_fromFile('E:\CITYtest\FilelistAdd/lasname.txt') + label_name = GetData_fromFile('E:\CITYtest\FilelistAdd\shapelable.txt') + #自定义需要得到什么类型的文件数据 + file_class = ['Classshpfile', 'Classlasfile','Classtxtfile'] + #输入文件·路径 + input_shp_dir = 'E:\CITYtest\PCinput//' + input_las_dir = 'E:\CITYtest/NDSMlasdata//' + save_dir = 'E:\CITYtest\savepath/' + + + net_num = 2048 + for m in range(len(las_name)): + # 每个类别shp文件的路径 + shp_save_path = save_dir + '/'+ las_name[m] + '/'+ file_class[0] + las_save_path = save_dir + '/'+ las_name[m] + '/'+ file_class[1] + txt_save_path = save_dir + '/'+ las_name[m] + '/'+ file_class[2] + # 获取每整个点云块的shp文件 + # 注意shp_full 是一个列表文件,不是直接变量,引用时需要用shp_full[0,1,2] + shp_full = Get_specific_File(os.path.join(input_shp_dir, las_name[m]), 'shp') + # # # 开始对整个shp文件分割和移动 + if len(shp_full) != 0: + split_move_shp( shp_full[0], shp_save_path, label_name,'newname') + # 数据分割完毕,开始进行las点云提取 + # 得到每个label文件下的shp文件 + # 定义要提取的las路径,开始提取 + all_las_list = os.listdir(input_las_dir) + + las_ndsm_name, extend = os.path.splitext(all_las_list[m]) + + origin_las = os.path.join(input_las_dir, all_las_list[m]) + + for i in range(len(label_name)): + # 获取每个类别的单个样本shp + single_shp_path = os.path.join(shp_save_path, label_name[i]) + shp_list = Get_specific_File(single_shp_path, 'shp') + # 提取单个las的路径 + single_las_save = os.path.join(las_save_path, label_name[i]) + + if len(shp_list) != 0: + for j in range(len(shp_list)): + stir = str(j + 1) + st = stir.zfill(4) # 补零补够四位数 + arcpy.ddd.ExtractLas(in_las_dataset = origin_las, + target_folder = single_las_save, + boundary= shp_list[j], + name_suffix=label_name[i] + st, remove_vlr=True, + rearrange_points='REARRANGE_POINTS', ) + + las_list_path = os.path.join(single_las_save, las_ndsm_name + label_name[i] + st + '.las') + + # 只提取样本las中的X,Y,Z和intensity,raw_classification信息 + f = File(las_list_path, mode='rw') + # 改变样本标签为 0,1,2 + # print(f.x.shape) + point_cloud = np.vstack((f.x, f.y, f.z, f.intensity, f.raw_classification)).T + + point_cloud[:,4] = i + # 给点云加上底面,获取行数 + row_num = point_cloud.shape[0] + # 生成和行数一 样的一维0数组 + z_back = np.zeros(row_num) + # 底面 + point_back = np.vstack((f.x, f.y, z_back, f.intensity, f.raw_classification)).T + # 屋顶和底面放一起 + # print(point_back.shape) + point_cloud_add = np.vstack((point_cloud, point_back)) + # point_cloud_add = np.concatenate((point_cloud, point_back),axis=0) + # 采样到2048 + sample_pc = sample_data(point_cloud_add, net_num) + # 只取数据坐标信息归一化,中心化 + pc_norm = PC_NORMLIZE(sample_pc[:, 0:3]) + # 规范后数据代替原来的坐标 + sample_pc[:, 0:3] = pc_norm + # 存储为txt格式 + single_txt_save = os.path.join(txt_save_path,label_name[i]) + + np.savetxt( os.path.join(single_txt_save , las_name[m] + label_name[i] + st + '.txt'), + sample_pc, fmt="%.6f", delimiter=" ") + else: + print ("该类别无样本数据") + + else: + print('该块las的样本shp为空') + + print('successful') +print('over') + + + -- Gitee From 773bfa0aa6e9830cf87f9588d37ca8c7a26eda05 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 02:01:32 +0000 Subject: [PATCH 22/69] =?UTF-8?q?add=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB/data=5Futils.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../data_utils/make hdf5_file.py" | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/make hdf5_file.py" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/make hdf5_file.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/make hdf5_file.py" new file mode 100644 index 0000000..e69de29 -- Gitee From 53230781023359bde51ecb58feaeb673cb08a964 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 02:01:49 +0000 Subject: [PATCH 23/69] =?UTF-8?q?update=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB/data=5Futils/make?= =?UTF-8?q?=20hdf5=5Ffile.py.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../data_utils/make hdf5_file.py" | 58 +++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/make hdf5_file.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/make hdf5_file.py" index e69de29..8fad1d8 100644 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/make hdf5_file.py" +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/make hdf5_file.py" @@ -0,0 +1,58 @@ +# -*- coding: UTF-8 -*- +import os +import numpy as np +import h5py +np.set_printoptions(suppress=True) + +def getDataFiles(path_root): + filelist = os.listdir(path_root) + for i in range(len(filelist)): + filelist[i] = os.path.join(path_root,filelist[i]) + return filelist + +#得到的返回值是一维字符串数组 +def loadDataFile(path): + data = np.loadtxt(path) + point_xyz = data[:, 0:3] + label=data[:,4] + label_int = label.astype(int) + return point_xyz, label_int + +if __name__ == "__main__": + + train_file = ['train0', 'test0'] + net_num = 2048 + train_dir = 'E:\CITYtest\Trainfile//' + + for i in range(len(train_file)): + root_file = os.path.join(train_dir,train_file[i]) + DATA_FILES = getDataFiles(root_file) + + DATA_ALL = [] + LABEL_ALL = [] + for fn in range(len(DATA_FILES)): + pre_data, his_label = loadDataFile(DATA_FILES[fn]) + pre_label = his_label.reshape(net_num, 1) # 重塑为num行1列的数据 得到num个点云的标签 + + # data_label = np.hstack((pre_data, pre_label)) + DATA_ALL.append(pre_data) #列表元素 + # label一个样本中的类别是一样的,取一个数字就可以了 + LABEL_ALL.append(pre_label[0]) + + # 把DATA_ALL和LABEL_ALL的列表转换成数组格式, + out_data = np.vstack(DATA_ALL) + out_label = np.vstack(LABEL_ALL) + + # 重塑为三维数组,2048 个 2048*4 的三个数组 + data_reshape = out_data.reshape(net_num, net_num, 3) + # 写入训练数据 + filename= train_dir +'/'+ 'point_data_' + train_file[i] + '.h5' + if not os.path.exists(filename): + with h5py.File(filename, 'w') as f: + f['data'] = data_reshape + f['label'] = out_label + f.close() + else: + print('hdf5文件已存在') + +print("over") -- Gitee From cd7c404ea590c2a3274a3f423f3fbacec576354c Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 02:02:41 +0000 Subject: [PATCH 24/69] =?UTF-8?q?add=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../provider.py" | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/provider.py" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/provider.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/provider.py" new file mode 100644 index 0000000..e69de29 -- Gitee From 649877a61b29450d9a2c8c33c65fef3bd4ec1aa4 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 02:02:50 +0000 Subject: [PATCH 25/69] =?UTF-8?q?update=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB/provider.py.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../provider.py" | 251 ++++++++++++++++++ 1 file changed, 251 insertions(+) diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/provider.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/provider.py" index e69de29..5604691 100644 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/provider.py" +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/provider.py" @@ -0,0 +1,251 @@ +import numpy as np + +def normalize_data(batch_data): + """ Normalize the batch data, use coordinates of the block centered at origin, + Input: + BxNxC array + Output: + BxNxC array + """ + B, N, C = batch_data.shape + normal_data = np.zeros((B, N, C)) + for b in range(B): + pc = batch_data[b] + centroid = np.mean(pc, axis=0) + pc = pc - centroid + m = np.max(np.sqrt(np.sum(pc ** 2, axis=1))) + pc = pc / m + normal_data[b] = pc + return normal_data + + +def shuffle_data(data, labels): + """ Shuffle data and labels. + Input: + data: B,N,... numpy array + label: B,... numpy array + Return: + shuffled data, label and shuffle indices + """ + idx = np.arange(len(labels)) + np.random.shuffle(idx) + return data[idx, ...], labels[idx], idx + +def shuffle_points(batch_data): + """ Shuffle orders of points in each point cloud -- changes FPS behavior. + Use the same shuffling idx for the entire batch. + Input: + BxNxC array + Output: + BxNxC array + """ + idx = np.arange(batch_data.shape[1]) + np.random.shuffle(idx) + return batch_data[:,idx,:] + +def rotate_point_cloud(batch_data): + """ Randomly rotate the point clouds to augument the dataset + rotation is per shape based along up direction + Input: + BxNx3 array, original batch of point clouds + Return: + BxNx3 array, rotated batch of point clouds + """ + rotated_data = np.zeros(batch_data.shape, dtype=np.float32) + for k in range(batch_data.shape[0]): + rotation_angle = np.random.uniform() * 2 * np.pi + cosval = np.cos(rotation_angle) + sinval = np.sin(rotation_angle) + rotation_matrix = np.array([[cosval, 0, sinval], + [0, 1, 0], + [-sinval, 0, cosval]]) + shape_pc = batch_data[k, ...] + rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix) + return rotated_data + +def rotate_point_cloud_z(batch_data): + """ Randomly rotate the point clouds to augument the dataset + rotation is per shape based along up direction + Input: + BxNx3 array, original batch of point clouds + Return: + BxNx3 array, rotated batch of point clouds + """ + rotated_data = np.zeros(batch_data.shape, dtype=np.float32) + for k in range(batch_data.shape[0]): + rotation_angle = np.random.uniform() * 2 * np.pi + cosval = np.cos(rotation_angle) + sinval = np.sin(rotation_angle) + rotation_matrix = np.array([[cosval, sinval, 0], + [-sinval, cosval, 0], + [0, 0, 1]]) + shape_pc = batch_data[k, ...] + rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix) + return rotated_data + +def rotate_point_cloud_with_normal(batch_xyz_normal): + ''' Randomly rotate XYZ, normal point cloud. + Input: + batch_xyz_normal: B,N,6, first three channels are XYZ, last 3 all normal + Output: + B,N,6, rotated XYZ, normal point cloud + ''' + for k in range(batch_xyz_normal.shape[0]): + rotation_angle = np.random.uniform() * 2 * np.pi + cosval = np.cos(rotation_angle) + sinval = np.sin(rotation_angle) + rotation_matrix = np.array([[cosval, 0, sinval], + [0, 1, 0], + [-sinval, 0, cosval]]) + shape_pc = batch_xyz_normal[k,:,0:3] + shape_normal = batch_xyz_normal[k,:,3:6] + batch_xyz_normal[k,:,0:3] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix) + batch_xyz_normal[k,:,3:6] = np.dot(shape_normal.reshape((-1, 3)), rotation_matrix) + return batch_xyz_normal + +def rotate_perturbation_point_cloud_with_normal(batch_data, angle_sigma=0.06, angle_clip=0.18): + """ Randomly perturb the point clouds by small rotations + Input: + BxNx6 array, original batch of point clouds and point normals + Return: + BxNx3 array, rotated batch of point clouds + """ + rotated_data = np.zeros(batch_data.shape, dtype=np.float32) + for k in range(batch_data.shape[0]): + angles = np.clip(angle_sigma*np.random.randn(3), -angle_clip, angle_clip) + Rx = np.array([[1,0,0], + [0,np.cos(angles[0]),-np.sin(angles[0])], + [0,np.sin(angles[0]),np.cos(angles[0])]]) + Ry = np.array([[np.cos(angles[1]),0,np.sin(angles[1])], + [0,1,0], + [-np.sin(angles[1]),0,np.cos(angles[1])]]) + Rz = np.array([[np.cos(angles[2]),-np.sin(angles[2]),0], + [np.sin(angles[2]),np.cos(angles[2]),0], + [0,0,1]]) + R = np.dot(Rz, np.dot(Ry,Rx)) + shape_pc = batch_data[k,:,0:3] + shape_normal = batch_data[k,:,3:6] + rotated_data[k,:,0:3] = np.dot(shape_pc.reshape((-1, 3)), R) + rotated_data[k,:,3:6] = np.dot(shape_normal.reshape((-1, 3)), R) + return rotated_data + + +def rotate_point_cloud_by_angle(batch_data, rotation_angle): + """ Rotate the point cloud along up direction with certain angle. + Input: + BxNx3 array, original batch of point clouds + Return: + BxNx3 array, rotated batch of point clouds + """ + rotated_data = np.zeros(batch_data.shape, dtype=np.float32) + for k in range(batch_data.shape[0]): + #rotation_angle = np.random.uniform() * 2 * np.pi + cosval = np.cos(rotation_angle) + sinval = np.sin(rotation_angle) + rotation_matrix = np.array([[cosval, 0, sinval], + [0, 1, 0], + [-sinval, 0, cosval]]) + shape_pc = batch_data[k,:,0:3] + rotated_data[k,:,0:3] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix) + return rotated_data + +def rotate_point_cloud_by_angle_with_normal(batch_data, rotation_angle): + """ Rotate the point cloud along up direction with certain angle. + Input: + BxNx6 array, original batch of point clouds with normal + scalar, angle of rotation + Return: + BxNx6 array, rotated batch of point clouds iwth normal + """ + rotated_data = np.zeros(batch_data.shape, dtype=np.float32) + for k in range(batch_data.shape[0]): + #rotation_angle = np.random.uniform() * 2 * np.pi + cosval = np.cos(rotation_angle) + sinval = np.sin(rotation_angle) + rotation_matrix = np.array([[cosval, 0, sinval], + [0, 1, 0], + [-sinval, 0, cosval]]) + shape_pc = batch_data[k,:,0:3] + shape_normal = batch_data[k,:,3:6] + rotated_data[k,:,0:3] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix) + rotated_data[k,:,3:6] = np.dot(shape_normal.reshape((-1,3)), rotation_matrix) + return rotated_data + + + +def rotate_perturbation_point_cloud(batch_data, angle_sigma=0.06, angle_clip=0.18): + """ Randomly perturb the point clouds by small rotations + Input: + BxNx3 array, original batch of point clouds + Return: + BxNx3 array, rotated batch of point clouds + """ + rotated_data = np.zeros(batch_data.shape, dtype=np.float32) + for k in range(batch_data.shape[0]): + angles = np.clip(angle_sigma*np.random.randn(3), -angle_clip, angle_clip) + Rx = np.array([[1,0,0], + [0,np.cos(angles[0]),-np.sin(angles[0])], + [0,np.sin(angles[0]),np.cos(angles[0])]]) + Ry = np.array([[np.cos(angles[1]),0,np.sin(angles[1])], + [0,1,0], + [-np.sin(angles[1]),0,np.cos(angles[1])]]) + Rz = np.array([[np.cos(angles[2]),-np.sin(angles[2]),0], + [np.sin(angles[2]),np.cos(angles[2]),0], + [0,0,1]]) + R = np.dot(Rz, np.dot(Ry,Rx)) + shape_pc = batch_data[k, ...] + rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), R) + return rotated_data + + +def jitter_point_cloud(batch_data, sigma=0.01, clip=0.05): + """ Randomly jitter points. jittering is per point. + Input: + BxNx3 array, original batch of point clouds + Return: + BxNx3 array, jittered batch of point clouds + """ + B, N, C = batch_data.shape + assert(clip > 0) + jittered_data = np.clip(sigma * np.random.randn(B, N, C), -1*clip, clip) + jittered_data += batch_data + return jittered_data + +def shift_point_cloud(batch_data, shift_range=0.1): + """ Randomly shift point cloud. Shift is per point cloud. + Input: + BxNx3 array, original batch of point clouds + Return: + BxNx3 array, shifted batch of point clouds + """ + B, N, C = batch_data.shape + shifts = np.random.uniform(-shift_range, shift_range, (B,3)) + for batch_index in range(B): + batch_data[batch_index,:,:] += shifts[batch_index,:] + return batch_data + + +def random_scale_point_cloud(batch_data, scale_low=0.8, scale_high=1.25): + """ Randomly scale the point cloud. Scale is per point cloud. + Input: + BxNx3 array, original batch of point clouds + Return: + BxNx3 array, scaled batch of point clouds + """ + B, N, C = batch_data.shape + scales = np.random.uniform(scale_low, scale_high, B) + for batch_index in range(B): + batch_data[batch_index,:,:] *= scales[batch_index] + return batch_data + +def random_point_dropout(batch_pc, max_dropout_ratio=0.875): + ''' batch_pc: BxNx3 ''' + for b in range(batch_pc.shape[0]): + dropout_ratio = np.random.random()*max_dropout_ratio # 0~0.875 + drop_idx = np.where(np.random.random((batch_pc.shape[1]))<=dropout_ratio)[0] + if len(drop_idx)>0: + batch_pc[b,drop_idx,:] = batch_pc[b,0,:] # set to the first point + return batch_pc + + + -- Gitee From 00108bb7c8a243e975adc92cee04bef5bb2f022f Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 02:03:20 +0000 Subject: [PATCH 26/69] =?UTF-8?q?add=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../train_cls.py" | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/train_cls.py" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/train_cls.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/train_cls.py" new file mode 100644 index 0000000..e69de29 -- Gitee From b45325a410a92b0ea1afc53921cf15d1870a5ea2 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 02:03:51 +0000 Subject: [PATCH 27/69] =?UTF-8?q?update=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB/train=5Fcls.py.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../train_cls.py" | 229 ++++++++++++++++++ 1 file changed, 229 insertions(+) diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/train_cls.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/train_cls.py" index e69de29..2eee725 100644 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/train_cls.py" +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/train_cls.py" @@ -0,0 +1,229 @@ +""" +Author: Benny +Date: Nov 2019 +""" +from data_utils.ModelNetDataLoader import ModelNetDataLoader +import argparse +import numpy as np +import os +import torch +import datetime +import logging +from pathlib import Path +from tqdm import tqdm +import sys +import provider +import importlib +import shutil + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +ROOT_DIR = BASE_DIR +sys.path.append(os.path.join(ROOT_DIR, 'models')) + +torch.backends.cudnn.enabled = False +# torch.backends.cudnn.enabled = True +# torch.backends.cudnn.benchmark = False + + +# num_class = 40 +def parse_args(): + '''PARAMETERS''' + parser = argparse.ArgumentParser('PointNet') + parser.add_argument('--batch_size', type=int, default = 8, help='batch size in training [default: 24]') + + parser.add_argument('--model', default='pointnet_cls', help='model name [default: pointnet_cls]') + + parser.add_argument('--num_class', type=int, default= 3, help='class number [default: 40]') + + parser.add_argument('--epoch', default= 150, type=int, help='number of epoch in training [default: 200]') + + parser.add_argument('--learning_rate', default=0.001, type=float, help='learning rate in training [default: 0.001]') + parser.add_argument('--gpu', type=str, default='0', help='specify gpu device [default: 0]') + + parser.add_argument('--num_point', type=int, default=1024, help='Point Number [default: 1024]') + + parser.add_argument('--optimizer', type=str, default='Adam', help='optimizer for training [default: Adam]') + parser.add_argument('--log_dir', type=str, default=None, help='experiment root') + parser.add_argument('--decay_rate', type=float, default=1e-4, help='decay rate [default: 1e-4]') + parser.add_argument('--normal', action='store_true', default=False, help='Whether to use normal information [default: False]') + args = parser.parse_args() + return args +# https://whttps://www.cnblogs.com/yibeimingyue/p/13800159.html +# argparse.ArgumentParser函数的解释。 + +def test(model, loader,class_number): + class_number =args.num_class + # num_class = args.num_class + + mean_correct = [] + class_acc = np.zeros((class_number,3)) + for j, data in tqdm(enumerate(loader), total=len(loader)): + points, target = data + target = target[:, 0] + points = points.transpose(2, 1) + points, target = points.cuda(), target.cuda() + classifier = model.eval() + pred, _ = classifier(points) + pred_choice = pred.data.max(1)[1] + for cat in np.unique(target.cpu()): + classacc = pred_choice[target==cat].eq(target[target==cat].long().data).cpu().sum() + class_acc[cat,0]+= classacc.item()/float(points[target==cat].size()[0]) + class_acc[cat,1]+=1 + correct = pred_choice.eq(target.long().data).cpu().sum() + mean_correct.append(correct.item()/float(points.size()[0])) + class_acc[:,2] = class_acc[:,0]/ class_acc[:,1] + class_acc = np.mean(class_acc[:,2]) + instance_acc = np.mean(mean_correct) + return instance_acc, class_acc + + +def main(args): + def log_string(str): + logger.info(str) + print(str) + + '''HYPER PARAMETER''' + os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu + + '''CREATE DIR''' + timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')) + experiment_dir = Path('./log/') + experiment_dir.mkdir(exist_ok=True) + experiment_dir = experiment_dir.joinpath('classification') + experiment_dir.mkdir(exist_ok=True) + if args.log_dir is None: + experiment_dir = experiment_dir.joinpath(timestr) + else: + experiment_dir = experiment_dir.joinpath(args.log_dir) + experiment_dir.mkdir(exist_ok=True) + checkpoints_dir = experiment_dir.joinpath('checkpoints/') + checkpoints_dir.mkdir(exist_ok=True) + log_dir = experiment_dir.joinpath('logs/') + log_dir.mkdir(exist_ok=True) + + '''LOG''' + args = parse_args() + logger = logging.getLogger("Model") + logger.setLevel(logging.INFO) + formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') + file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model)) + file_handler.setLevel(logging.INFO) + file_handler.setFormatter(formatter) + logger.addHandler(file_handler) + log_string('PARAMETER ...') + log_string(args) + + '''DATA LOADING''' + log_string('Load dataset ...') + + DATA_PATH = '/home/cc/PNtorch-test/PointNet-torch1.1-two/data/modelnet40_normal_resampled/' + + TRAIN_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='train', + normal_channel=args.normal) + TEST_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='test', + normal_channel=args.normal) + + trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET, batch_size=args.batch_size, shuffle=True, num_workers=4) + testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batch_size, shuffle=False, num_workers=4) + + '''MODEL LOADING''' + # num_class = 40 + MODEL = importlib.import_module(args.model) + shutil.copy('./models/%s.py' % args.model, str(experiment_dir)) + shutil.copy('./models/pointnet_util.py', str(experiment_dir)) + + classifier = MODEL.get_model(args.num_class,normal_channel=args.normal).cuda() + criterion = MODEL.get_loss().cuda() + + try: + checkpoint = torch.load(str(experiment_dir) + '/checkpoints/best_model.pth') + start_epoch = checkpoint['epoch'] + classifier.load_state_dict(checkpoint['model_state_dict']) + log_string('Use pretrain model') + except: + log_string('No existing model, starting training from scratch...') + start_epoch = 0 + + + if args.optimizer == 'Adam': + optimizer = torch.optim.Adam( + classifier.parameters(), + lr=args.learning_rate, + betas=(0.9, 0.999), + eps=1e-08, + weight_decay=args.decay_rate + ) + else: + optimizer = torch.optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9) + + scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.7) + global_epoch = 0 + global_step = 0 + best_instance_acc = 0.0 + best_class_acc = 0.0 + mean_correct = [] + + '''TRANING''' + logger.info('Start training...') + for epoch in range(start_epoch,args.epoch): + log_string('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, args.epoch)) + + scheduler.step() + for batch_id, data in tqdm(enumerate(trainDataLoader, 0), total=len(trainDataLoader), smoothing=0.9): + points, target = data + points = points.data.numpy() + points = provider.random_point_dropout(points) + points[:,:, 0:3] = provider.random_scale_point_cloud(points[:,:, 0:3]) + points[:,:, 0:3] = provider.shift_point_cloud(points[:,:, 0:3]) + points = torch.Tensor(points) + target = target[:, 0] + + points = points.transpose(2, 1) + points, target = points.cuda(), target.cuda() + optimizer.zero_grad() + + classifier = classifier.train() + pred, trans_feat = classifier(points) + loss = criterion(pred, target.long(), trans_feat) + pred_choice = pred.data.max(1)[1] + correct = pred_choice.eq(target.long().data).cpu().sum() + mean_correct.append(correct.item() / float(points.size()[0])) + loss.backward() + optimizer.step() + global_step += 1 + + train_instance_acc = np.mean(mean_correct) + log_string('Train Instance Accuracy: %f' % train_instance_acc) + + + with torch.no_grad(): + instance_acc, class_acc = test(classifier.eval(), testDataLoader) + + if (instance_acc >= best_instance_acc): + best_instance_acc = instance_acc + best_epoch = epoch + 1 + + if (class_acc >= best_class_acc): + best_class_acc = class_acc + log_string('Test Instance Accuracy: %f, Class Accuracy: %f'% (instance_acc, class_acc)) + log_string('Best Instance Accuracy: %f, Class Accuracy: %f'% (best_instance_acc, best_class_acc)) + + if (instance_acc >= best_instance_acc): + logger.info('Save model...') + savepath = str(checkpoints_dir) + '/best_model.pth' + log_string('Saving at %s'% savepath) + state = { + 'epoch': best_epoch, + 'instance_acc': instance_acc, + 'class_acc': class_acc, + 'model_state_dict': classifier.state_dict(), + 'optimizer_state_dict': optimizer.state_dict(), + } + torch.save(state, savepath) + global_epoch += 1 + + logger.info('End of training...') + +if __name__ == '__main__': + args = parse_args() + main(args) -- Gitee From 19745ee48bc8912f38c446d5ff1cdf07a5653f67 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 02:13:27 +0000 Subject: [PATCH 28/69] =?UTF-8?q?update=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB/train=5Fcls.py.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../train_cls.py" | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/train_cls.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/train_cls.py" index 2eee725..94bd1f6 100644 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/train_cls.py" +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/train_cls.py" @@ -116,7 +116,7 @@ def main(args): '''DATA LOADING''' log_string('Load dataset ...') - DATA_PATH = '/home/cc/PNtorch-test/PointNet-torch1.1-two/data/modelnet40_normal_resampled/' + DATA_PATH = os.path.join(BASE_DIR,'data/modelnet40/') TRAIN_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='train', normal_channel=args.normal) -- Gitee From 9724d096ee7261c70331d7fa288e600a35a207e6 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 02:13:46 +0000 Subject: [PATCH 29/69] =?UTF-8?q?add=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../test_cls.py" | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/test_cls.py" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/test_cls.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/test_cls.py" new file mode 100644 index 0000000..e69de29 -- Gitee From 78d8fb1f953d208a734daa02c9f5951945c5cf31 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 02:14:11 +0000 Subject: [PATCH 30/69] =?UTF-8?q?update=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB/test=5Fcls.py.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../test_cls.py" | 105 ++++++++++++++++++ 1 file changed, 105 insertions(+) diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/test_cls.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/test_cls.py" index e69de29..9e9fa8d 100644 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/test_cls.py" +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/test_cls.py" @@ -0,0 +1,105 @@ +""" +Author: Benny +Date: Nov 2019 +""" +from data_utils.ModelNetDataLoader import ModelNetDataLoader +import argparse +import numpy as np +import os +import torch +import logging +from tqdm import tqdm +import sys +import importlib + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +ROOT_DIR = BASE_DIR +sys.path.append(os.path.join(ROOT_DIR, 'models')) + + +def parse_args(): + '''PARAMETERS''' + parser = argparse.ArgumentParser('PointNet') + parser.add_argument('--batch_size', type=int, default=8, help='batch size in training') + parser.add_argument('--gpu', type=str, default='0', help='specify gpu device') + parser.add_argument('--num_point', type=int, default=1024, help='Point Number [default: 1024]') + parser.add_argument('--log_dir', type=str, default='pointnet2_ssg_normal', help='Experiment root') + parser.add_argument('--normal', action='store_true', default=True, help='Whether to use normal information [default: False]') + parser.add_argument('--num_votes', type=int, default=3, help='Aggregate classification scores with voting [default: 3]') + return parser.parse_args() + +def test(model, loader, num_class=40, vote_num=1): + mean_correct = [] + class_acc = np.zeros((num_class,3)) + for j, data in tqdm(enumerate(loader), total=len(loader)): + points, target = data + target = target[:, 0] + points = points.transpose(2, 1) + points, target = points.cuda(), target.cuda() + classifier = model.eval() + vote_pool = torch.zeros(target.size()[0],num_class).cuda() + for _ in range(vote_num): + pred, _ = classifier(points) + vote_pool += pred + pred = vote_pool/vote_num + pred_choice = pred.data.max(1)[1] + for cat in np.unique(target.cpu()): + classacc = pred_choice[target==cat].eq(target[target==cat].long().data).cpu().sum() + class_acc[cat,0]+= classacc.item()/float(points[target==cat].size()[0]) + class_acc[cat,1]+=1 + correct = pred_choice.eq(target.long().data).cpu().sum() + mean_correct.append(correct.item()/float(points.size()[0])) + class_acc[:,2] = class_acc[:,0]/ class_acc[:,1] + class_acc = np.mean(class_acc[:,2]) + instance_acc = np.mean(mean_correct) + return instance_acc, class_acc + + +def main(args): + def log_string(str): + logger.info(str) + print(str) + + '''HYPER PARAMETER''' + os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu + + '''CREATE DIR''' + experiment_dir = 'log/classification/' + args.log_dir + + '''LOG''' + args = parse_args() + logger = logging.getLogger("Model") + logger.setLevel(logging.INFO) + formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') + file_handler = logging.FileHandler('%s/eval.txt' % experiment_dir) + file_handler.setLevel(logging.INFO) + file_handler.setFormatter(formatter) + logger.addHandler(file_handler) + log_string('PARAMETER ...') + log_string(args) + + '''DATA LOADING''' + log_string('Load dataset ...') + DATA_PATH = 'data/modelnet40/' + TEST_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='test', normal_channel=args.normal) + testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batch_size, shuffle=False, num_workers=4) + + '''MODEL LOADING''' + num_class = 40 + model_name = os.listdir(experiment_dir+'/logs')[0].split('.')[0] + MODEL = importlib.import_module(model_name) + + classifier = MODEL.get_model(num_class,normal_channel=args.normal).cuda() + + checkpoint = torch.load(str(experiment_dir) + '/checkpoints/best_model.pth') + classifier.load_state_dict(checkpoint['model_state_dict']) + + with torch.no_grad(): + instance_acc, class_acc = test(classifier.eval(), testDataLoader, vote_num=args.num_votes) + log_string('Test Instance Accuracy: %f, Class Accuracy: %f' % (instance_acc, class_acc)) + + + +if __name__ == '__main__': + args = parse_args() + main(args) -- Gitee From d8283a82a4448f036ec419e2f1fb5a3e6ba21517 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 02:14:58 +0000 Subject: [PATCH 31/69] =?UTF-8?q?=E9=87=8D=E5=91=BD=E5=90=8D=20code/2022?= =?UTF-8?q?=5Fautumn/=E5=B4=94=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D?= =?UTF-8?q?=E7=82=B9=E4=BA=91=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84?= =?UTF-8?q?=E9=9C=87=E5=AE=B3=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB?= =?UTF-8?q?/data/modelnet40=5Fply=5Fhdf5=5F2048=20=E4=B8=BA=20code/2022=5F?= =?UTF-8?q?autumn/=E5=B4=94=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D?= =?UTF-8?q?=E7=82=B9=E4=BA=91=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84?= =?UTF-8?q?=E9=9C=87=E5=AE=B3=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB?= =?UTF-8?q?/data/modelnet40?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../data/modelnet40/.keep" | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/modelnet40_ply_hdf5_2048/.keep" => "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/modelnet40/.keep" (100%) diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/modelnet40_ply_hdf5_2048/.keep" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/modelnet40/.keep" similarity index 100% rename from "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/modelnet40_ply_hdf5_2048/.keep" rename to "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/modelnet40/.keep" -- Gitee From da6132d1fa8d6638a5c2a9e08f3a5bf44d00fbd9 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 02:15:11 +0000 Subject: [PATCH 32/69] =?UTF-8?q?add=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB/data/modelnet40.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../data/modelnet40/download" | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/modelnet40/download" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/modelnet40/download" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/modelnet40/download" new file mode 100644 index 0000000..e69de29 -- Gitee From 195a675877f13b295b02fd893eaa62abc56038a0 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Thu, 27 Oct 2022 02:15:19 +0000 Subject: [PATCH 33/69] =?UTF-8?q?update=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB/data/modelnet40/d?= =?UTF-8?q?ownload.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../data/modelnet40/download" | 2 ++ 1 file changed, 2 insertions(+) diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/modelnet40/download" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/modelnet40/download" index e69de29..d850c61 100644 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/modelnet40/download" +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/modelnet40/download" @@ -0,0 +1,2 @@ +链接:https://pan.baidu.com/s/1J1svTCLW_Iy7blMan0YiPQ +提取码:20yl \ No newline at end of file -- Gitee From e9050f21fe21731e9519ae688d4192eb54d99191 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:14:03 +0000 Subject: [PATCH 34/69] =?UTF-8?q?add=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB/models/pointnet2?= =?UTF-8?q?=5Fcls=5Fssg.py.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../models/pointnet2_cls_ssg.py" | 62 +++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet2_cls_ssg.py" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet2_cls_ssg.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet2_cls_ssg.py" new file mode 100644 index 0000000..1e845ff --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet2_cls_ssg.py" @@ -0,0 +1,62 @@ +""" + PointNet++ Model for point clouds classification +""" + +import os +import sys +BASE_DIR = os.path.dirname(__file__) +sys.path.append(BASE_DIR) +sys.path.append(os.path.join(BASE_DIR, '../utils')) +import tensorflow as tf +import numpy as np +import tf_util +from pointnet_util import pointnet_sa_module + +def placeholder_inputs(batch_size, num_point): + pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3)) + labels_pl = tf.placeholder(tf.int32, shape=(batch_size)) + return pointclouds_pl, labels_pl + +def get_model(point_cloud, is_training, bn_decay=None): + """ Classification PointNet, input is BxNx3, output Bx40 """ + batch_size = point_cloud.get_shape()[0].value + num_point = point_cloud.get_shape()[1].value + end_points = {} + l0_xyz = point_cloud + l0_points = None + end_points['l0_xyz'] = l0_xyz + + # Set abstraction layers + # Note: When using NCHW for layer 2, we see increased GPU memory usage (in TF1.4). + # So we only use NCHW for layer 1 until this issue can be resolved. + l1_xyz, l1_points, l1_indices = pointnet_sa_module(l0_xyz, l0_points, npoint=512, radius=0.2, nsample=32, mlp=[64,64,128], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer1', use_nchw=True) + l2_xyz, l2_points, l2_indices = pointnet_sa_module(l1_xyz, l1_points, npoint=128, radius=0.4, nsample=64, mlp=[128,128,256], mlp2=None, group_all=False, is_training=is_training, bn_decay=bn_decay, scope='layer2') + l3_xyz, l3_points, l3_indices = pointnet_sa_module(l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256,512,1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3') + + # Fully connected layers + net = tf.reshape(l3_points, [batch_size, -1]) + net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay) + net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp1') + net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay) + net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training, scope='dp2') + net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3') + + return net, end_points + + +def get_loss(pred, label, end_points): + """ pred: B*NUM_CLASSES, + label: B, """ + loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label) + classify_loss = tf.reduce_mean(loss) + tf.summary.scalar('classify loss', classify_loss) + tf.add_to_collection('losses', classify_loss) + return classify_loss + + +if __name__=='__main__': + with tf.Graph().as_default(): + inputs = tf.zeros((32,1024,3)) + output, _ = get_model(inputs, tf.constant(True)) + print(output) + -- Gitee From 574a26a7dd6b84b0268b54a1cb85fe94b096328f Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:14:31 +0000 Subject: [PATCH 35/69] =?UTF-8?q?=E9=87=8D=E5=91=BD=E5=90=8D=20code/2022?= =?UTF-8?q?=5Fautumn/=E5=B4=94=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D?= =?UTF-8?q?=E7=82=B9=E4=BA=91=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84?= =?UTF-8?q?=E9=9C=87=E5=AE=B3=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB?= =?UTF-8?q?/models/pointnet=5Fcls.py=20=E4=B8=BA=20code/2022=5Fautumn/?= =?UTF-8?q?=E5=B4=94=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9?= =?UTF-8?q?=E4=BA=91=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87?= =?UTF-8?q?=E5=AE=B3=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB/models/p?= =?UTF-8?q?ointnet=5Fcls.py=20for=20torch?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../models/pointnet_cls.py for torch" | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet_cls.py" => "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet_cls.py for torch" (100%) diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet_cls.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet_cls.py for torch" similarity index 100% rename from "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet_cls.py" rename to "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet_cls.py for torch" -- Gitee From ce79e4b03069f8c7c945627ea0757c7046b6c487 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:14:43 +0000 Subject: [PATCH 36/69] =?UTF-8?q?=E9=87=8D=E5=91=BD=E5=90=8D=20code/2022?= =?UTF-8?q?=5Fautumn/=E5=B4=94=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D?= =?UTF-8?q?=E7=82=B9=E4=BA=91=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84?= =?UTF-8?q?=E9=9C=87=E5=AE=B3=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB?= =?UTF-8?q?/models/pointnet2=5Fcls=5Fssg.py=20=E4=B8=BA=20code/2022=5Fautu?= =?UTF-8?q?mn/=E5=B4=94=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9?= =?UTF-8?q?=E4=BA=91=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87?= =?UTF-8?q?=E5=AE=B3=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB/models/p?= =?UTF-8?q?ointnet2=5Fcls=5Fssg.py=20for=20tf?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../models/pointnet2_cls_ssg.py for tf" | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet2_cls_ssg.py" => "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet2_cls_ssg.py for tf" (100%) diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet2_cls_ssg.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet2_cls_ssg.py for tf" similarity index 100% rename from "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet2_cls_ssg.py" rename to "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet2_cls_ssg.py for tf" -- Gitee From 9b5d415559c30952ca3347014d14b5d5cb2ddfe4 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:26:36 +0000 Subject: [PATCH 37/69] pointnet-tf Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../evaluate.py" | 166 ++++++++ .../modelnet_dataset.py" | 144 +++++++ .../modelnet_h5_dataset.py" | 126 ++++++ .../tf_interpolate_compile.sh" | 7 + ...347\232\204gpu\350\256\276\345\244\207.py" | 12 + .../train.py" | 307 +++++++++++++++ .../train_multi_gpu.py" | 364 ++++++++++++++++++ ...57\345\220\246\345\217\257\347\224\250.py" | 3 + 8 files changed, 1129 insertions(+) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/evaluate.py" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/modelnet_dataset.py" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/modelnet_h5_dataset.py" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_interpolate_compile.sh" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf\346\243\200\346\237\245\345\217\257\347\224\250\347\232\204gpu\350\256\276\345\244\207.py" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/train.py" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/train_multi_gpu.py" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/\346\265\213\350\257\225cudnn\346\230\257\345\220\246\345\217\257\347\224\250.py" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/evaluate.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/evaluate.py" new file mode 100644 index 0000000..224d65b --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/evaluate.py" @@ -0,0 +1,166 @@ +''' + Evaluate classification performance with optional voting. + Will use H5 dataset in default. If using normal, will shift to the normal dataset. +''' +import tensorflow as tf +import numpy as np +import argparse +import socket +import importlib +import time +import os +import scipy.misc +import sys +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +ROOT_DIR = BASE_DIR +sys.path.append(BASE_DIR) +sys.path.append(os.path.join(ROOT_DIR, 'models')) +sys.path.append(os.path.join(ROOT_DIR, 'utils')) +import provider +import modelnet_dataset +import modelnet_h5_dataset + +parser = argparse.ArgumentParser() +parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]') +parser.add_argument('--model', default='pointnet2_cls_ssg', help='Model name. [default: pointnet2_cls_ssg]') +parser.add_argument('--batch_size', type=int, default=4, help='Batch Size during training [default: 16]') + + +parser.add_argument('--num_point', type=int, default=1024, help='Point Number [256/512/1024/2048] [default: 1024]') +parser.add_argument('--model_path', default='log/model.ckpt', help='model checkpoint file path [default: log/model.ckpt]') +parser.add_argument('--dump_dir', default='dump', help='dump folder path [dump]') +parser.add_argument('--normal', action='store_true', help='Whether to use normal information') +parser.add_argument('--num_votes', type=int, default=1, help='Aggregate classification scores from multiple rotations [default: 1]') +FLAGS = parser.parse_args() + + +BATCH_SIZE = FLAGS.batch_size +NUM_POINT = FLAGS.num_point +MODEL_PATH = FLAGS.model_path +GPU_INDEX = FLAGS.gpu +MODEL = importlib.import_module(FLAGS.model) # import network module +DUMP_DIR = FLAGS.dump_dir +if not os.path.exists(DUMP_DIR): os.mkdir(DUMP_DIR) +LOG_FOUT = open(os.path.join(DUMP_DIR, 'log_evaluate.txt'), 'w') +LOG_FOUT.write(str(FLAGS)+'\n') + +NUM_CLASSES = 40 +SHAPE_NAMES = [line.rstrip() for line in \ + open(os.path.join(ROOT_DIR, 'data/modelnet40_ply_hdf5_2048/shape_names.txt'))] + +HOSTNAME = socket.gethostname() + +# Shapenet official train/test split +if FLAGS.normal: + assert(NUM_POINT<=10000) + DATA_PATH = os.path.join(ROOT_DIR, 'data/modelnet40_normal_resampled') + TRAIN_DATASET = modelnet_dataset.ModelNetDataset(root=DATA_PATH, npoints=NUM_POINT, split='train', normal_channel=FLAGS.normal, batch_size=BATCH_SIZE) + TEST_DATASET = modelnet_dataset.ModelNetDataset(root=DATA_PATH, npoints=NUM_POINT, split='test', normal_channel=FLAGS.normal, batch_size=BATCH_SIZE) +else: + assert(NUM_POINT<=2048) + TRAIN_DATASET = modelnet_h5_dataset.ModelNetH5Dataset(os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/train_files.txt'), batch_size=BATCH_SIZE, npoints=NUM_POINT, shuffle=True) + TEST_DATASET = modelnet_h5_dataset.ModelNetH5Dataset(os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/test_files.txt'), batch_size=BATCH_SIZE, npoints=NUM_POINT, shuffle=False) + +def log_string(out_str): + LOG_FOUT.write(out_str+'\n') + LOG_FOUT.flush() + print(out_str) + +def evaluate(num_votes): + is_training = False + + with tf.device('/gpu:'+str(GPU_INDEX)): + pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT) + is_training_pl = tf.placeholder(tf.bool, shape=()) + + # simple model + pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl) + MODEL.get_loss(pred, labels_pl, end_points) + losses = tf.get_collection('losses') + total_loss = tf.add_n(losses, name='total_loss') + + # Add ops to save and restore all the variables. + saver = tf.train.Saver() + + # Create a session + config = tf.ConfigProto() + config.gpu_options.allow_growth = True + config.allow_soft_placement = True + config.log_device_placement = False + sess = tf.Session(config=config) + + # Restore variables from disk. + saver.restore(sess, MODEL_PATH) + log_string("Model restored.") + + ops = {'pointclouds_pl': pointclouds_pl, + 'labels_pl': labels_pl, + 'is_training_pl': is_training_pl, + 'pred': pred, + 'loss': total_loss} + + eval_one_epoch(sess, ops, num_votes) + +def eval_one_epoch(sess, ops, num_votes=1, topk=1): + is_training = False + + # Make sure batch data is of same size + cur_batch_data = np.zeros((BATCH_SIZE,NUM_POINT,TEST_DATASET.num_channel())) + cur_batch_label = np.zeros((BATCH_SIZE), dtype=np.int32) + + total_correct = 0 + total_seen = 0 + loss_sum = 0 + batch_idx = 0 + shape_ious = [] + total_seen_class = [0 for _ in range(NUM_CLASSES)] + total_correct_class = [0 for _ in range(NUM_CLASSES)] + + while TEST_DATASET.has_next_batch(): + batch_data, batch_label = TEST_DATASET.next_batch(augment=False) + bsize = batch_data.shape[0] + print('Batch: %03d, batch size: %d'%(batch_idx, bsize)) + # for the last batch in the epoch, the bsize:end are from last batch + cur_batch_data[0:bsize,...] = batch_data + cur_batch_label[0:bsize] = batch_label + + batch_pred_sum = np.zeros((BATCH_SIZE, NUM_CLASSES)) # score for classes + for vote_idx in range(num_votes): + # Shuffle point order to achieve different farthest samplings + shuffled_indices = np.arange(NUM_POINT) + np.random.shuffle(shuffled_indices) + if FLAGS.normal: + rotated_data = provider.rotate_point_cloud_by_angle_with_normal(cur_batch_data[:, shuffled_indices, :], + vote_idx/float(num_votes) * np.pi * 2) + else: + rotated_data = provider.rotate_point_cloud_by_angle(cur_batch_data[:, shuffled_indices, :], + vote_idx/float(num_votes) * np.pi * 2) + feed_dict = {ops['pointclouds_pl']: rotated_data, + ops['labels_pl']: cur_batch_label, + ops['is_training_pl']: is_training} + loss_val, pred_val = sess.run([ops['loss'], ops['pred']], feed_dict=feed_dict) + batch_pred_sum += pred_val + pred_val = np.argmax(batch_pred_sum, 1) + correct = np.sum(pred_val[0:bsize] == batch_label[0:bsize]) + total_correct += correct + total_seen += bsize + loss_sum += loss_val + batch_idx += 1 + for i in range(bsize): + l = batch_label[i] + total_seen_class[l] += 1 + total_correct_class[l] += (pred_val[i] == l) + + log_string('eval mean loss: %f' % (loss_sum / float(batch_idx))) + log_string('eval accuracy: %f'% (total_correct / float(total_seen))) + log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)))) + + class_accuracies = np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float) + for i, name in enumerate(SHAPE_NAMES): + log_string('%10s:\t%0.3f' % (name, class_accuracies[i])) + + +if __name__=='__main__': + with tf.Graph().as_default(): + evaluate(num_votes=FLAGS.num_votes) + LOG_FOUT.close() diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/modelnet_dataset.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/modelnet_dataset.py" new file mode 100644 index 0000000..78f326e --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/modelnet_dataset.py" @@ -0,0 +1,144 @@ +''' + ModelNet dataset. Support ModelNet40, ModelNet10, XYZ and normal channels. Up to 10000 points. +''' + +import os +import os.path +import json +import numpy as np +import sys +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +ROOT_DIR = BASE_DIR +sys.path.append(os.path.join(ROOT_DIR, 'utils')) +import provider + +def pc_normalize(pc): + l = pc.shape[0] + centroid = np.mean(pc, axis=0) + pc = pc - centroid + m = np.max(np.sqrt(np.sum(pc**2, axis=1))) + pc = pc / m + return pc + +class ModelNetDataset(): + def __init__(self, root, batch_size = 32, npoints = 1024, split='train', normalize=True, normal_channel=False, modelnet10=False, cache_size=15000, shuffle=None): + self.root = root + self.batch_size = batch_size + self.npoints = npoints + self.normalize = normalize + if modelnet10: + self.catfile = os.path.join(self.root, 'modelnet10_shape_names.txt') + else: + self.catfile = os.path.join(self.root, 'shape_names.txt') + self.cat = [line.rstrip() for line in open(self.catfile)] + self.classes = dict(zip(self.cat, range(len(self.cat)))) + self.normal_channel = normal_channel + + shape_ids = {} + if modelnet10: + shape_ids['train'] = [line.rstrip() for line in open(os.path.join(self.root, 'modelnet10_train.txt'))] + shape_ids['test']= [line.rstrip() for line in open(os.path.join(self.root, 'modelnet10_test.txt'))] + else: + shape_ids['train'] = [line.rstrip() for line in open(os.path.join(self.root, 'modelnet40_train.txt'))] + shape_ids['test']= [line.rstrip() for line in open(os.path.join(self.root, 'modelnet40_test.txt'))] + assert(split=='train' or split=='test') + shape_names = ['_'.join(x.split('_')[0:-1]) for x in shape_ids[split]] + # list of (shape_name, shape_txt_file_path) tuple + self.datapath = [(shape_names[i], os.path.join(self.root, shape_names[i], shape_ids[split][i])+'.txt') for i in range(len(shape_ids[split]))] + + self.cache_size = cache_size # how many data points to cache in memory + self.cache = {} # from index to (point_set, cls) tuple + + if shuffle is None: + if split == 'train': self.shuffle = True + else: self.shuffle = False + else: + self.shuffle = shuffle + + self.reset() + + def _augment_batch_data(self, batch_data): + if self.normal_channel: + rotated_data = provider.rotate_point_cloud_with_normal(batch_data) + rotated_data = provider.rotate_perturbation_point_cloud_with_normal(rotated_data) + else: + rotated_data = provider.rotate_point_cloud(batch_data) + rotated_data = provider.rotate_perturbation_point_cloud(rotated_data) + + jittered_data = provider.random_scale_point_cloud(rotated_data[:,:,0:3]) + jittered_data = provider.shift_point_cloud(jittered_data) + jittered_data = provider.jitter_point_cloud(jittered_data) + rotated_data[:,:,0:3] = jittered_data + return provider.shuffle_points(rotated_data) + + + def _get_item(self, index): + if index in self.cache: + point_set, cls = self.cache[index] + else: + fn = self.datapath[index] + cls = self.classes[self.datapath[index][0]] + cls = np.array([cls]).astype(np.int32) + point_set = np.loadtxt(fn[1],delimiter=',').astype(np.float32) + # Take the first npoints + point_set = point_set[0:self.npoints,:] + if self.normalize: + point_set[:,0:3] = pc_normalize(point_set[:,0:3]) + if not self.normal_channel: + point_set = point_set[:,0:3] + if len(self.cache) < self.cache_size: + self.cache[index] = (point_set, cls) + return point_set, cls + + def __getitem__(self, index): + return self._get_item(index) + + def __len__(self): + return len(self.datapath) + + def num_channel(self): + if self.normal_channel: + return 6 + else: + return 3 + + def reset(self): + self.idxs = np.arange(0, len(self.datapath)) + if self.shuffle: + np.random.shuffle(self.idxs) + self.num_batches = (len(self.datapath)+self.batch_size-1) // self.batch_size + self.batch_idx = 0 + + def has_next_batch(self): + return self.batch_idx < self.num_batches + + def next_batch(self, augment=False): + ''' returned dimension may be smaller than self.batch_size ''' + start_idx = self.batch_idx * self.batch_size + end_idx = min((self.batch_idx+1) * self.batch_size, len(self.datapath)) + bsize = end_idx - start_idx + batch_data = np.zeros((bsize, self.npoints, self.num_channel())) + batch_label = np.zeros((bsize), dtype=np.int32) + for i in range(bsize): + ps,cls = self._get_item(self.idxs[i+start_idx]) + batch_data[i] = ps + batch_label[i] = cls + self.batch_idx += 1 + if augment: batch_data = self._augment_batch_data(batch_data) + return batch_data, batch_label + +if __name__ == '__main__': + d = ModelNetDataset(root = '../data/modelnet40_normal_resampled', split='test') + print(d.shuffle) + print(len(d)) + import time + tic = time.time() + for i in range(10): + ps, cls = d[i] + print(time.time() - tic) + print(ps.shape, type(ps), cls) + + print(d.has_next_batch()) + ps_batch, cls_batch = d.next_batch(True) + print(ps_batch.shape) + print(cls_batch.shape) diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/modelnet_h5_dataset.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/modelnet_h5_dataset.py" new file mode 100644 index 0000000..ecd6e16 --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/modelnet_h5_dataset.py" @@ -0,0 +1,126 @@ +''' + ModelNet dataset. Support ModelNet40, XYZ channels. Up to 2048 points. + Faster IO than ModelNetDataset in the first epoch. +''' + +import os +import sys +import numpy as np +import h5py +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) +ROOT_DIR = BASE_DIR +sys.path.append(os.path.join(ROOT_DIR, 'utils')) +import provider + + +# Download dataset for point cloud classification +DATA_DIR = os.path.join(ROOT_DIR, 'data') +if not os.path.exists(DATA_DIR): + os.mkdir(DATA_DIR) +if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')): + www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip' + zipfile = os.path.basename(www) + os.system('wget %s; unzip %s' % (www, zipfile)) + os.system('mv %s %s' % (zipfile[:-4], DATA_DIR)) + os.system('rm %s' % (zipfile)) + + +def shuffle_data(data, labels): + """ Shuffle data and labels. + Input: + data: B,N,... numpy array + label: B,... numpy array + Return: + shuffled data, label and shuffle indices + """ + idx = np.arange(len(labels)) + np.random.shuffle(idx) + return data[idx, ...], labels[idx], idx + +def getDataFiles(list_filename): + return [line.rstrip() for line in open(list_filename)] + +def load_h5(h5_filename): + f = h5py.File(h5_filename) + data = f['data'][:] + label = f['label'][:] + return (data, label) + +def loadDataFile(filename): + return load_h5(filename) + + +class ModelNetH5Dataset(object): + def __init__(self, list_filename, batch_size = 32, npoints = 1024, shuffle=True): + self.list_filename = list_filename + self.batch_size = batch_size + self.npoints = npoints + self.shuffle = shuffle + self.h5_files = getDataFiles(self.list_filename) + self.reset() + + def reset(self): + ''' reset order of h5 files ''' + self.file_idxs = np.arange(0, len(self.h5_files)) + if self.shuffle: np.random.shuffle(self.file_idxs) + self.current_data = None + self.current_label = None + self.current_file_idx = 0 + self.batch_idx = 0 + + def _augment_batch_data(self, batch_data): + rotated_data = provider.rotate_point_cloud(batch_data) + rotated_data = provider.rotate_perturbation_point_cloud(rotated_data) + jittered_data = provider.random_scale_point_cloud(rotated_data[:,:,0:3]) + jittered_data = provider.shift_point_cloud(jittered_data) + jittered_data = provider.jitter_point_cloud(jittered_data) + rotated_data[:,:,0:3] = jittered_data + return provider.shuffle_points(rotated_data) + + + def _get_data_filename(self): + return self.h5_files[self.file_idxs[self.current_file_idx]] + + def _load_data_file(self, filename): + self.current_data,self.current_label = load_h5(filename) + self.current_label = np.squeeze(self.current_label) + self.batch_idx = 0 + if self.shuffle: + self.current_data, self.current_label, _ = shuffle_data(self.current_data,self.current_label) + + def _has_next_batch_in_file(self): + return self.batch_idx*self.batch_size < self.current_data.shape[0] + + def num_channel(self): + return 3 + + def has_next_batch(self): + # TODO: add backend thread to load data + if (self.current_data is None) or (not self._has_next_batch_in_file()): + if self.current_file_idx >= len(self.h5_files): + return False + self._load_data_file(self._get_data_filename()) + self.batch_idx = 0 + self.current_file_idx += 1 + return self._has_next_batch_in_file() + + def next_batch(self, augment=False): + ''' returned dimension may be smaller than self.batch_size ''' + start_idx = self.batch_idx * self.batch_size + end_idx = min((self.batch_idx+1) * self.batch_size, self.current_data.shape[0]) + bsize = end_idx - start_idx + batch_label = np.zeros((bsize), dtype=np.int32) + data_batch = self.current_data[start_idx:end_idx, 0:self.npoints, :].copy() + label_batch = self.current_label[start_idx:end_idx].copy() + self.batch_idx += 1 + if augment: data_batch = self._augment_batch_data(data_batch) + return data_batch, label_batch + +if __name__=='__main__': + d = ModelNetH5Dataset('data/modelnet40_ply_hdf5_2048/train_files.txt') + print(d.shuffle) + print(d.has_next_batch()) + ps_batch, cls_batch = d.next_batch(True) + print(ps_batch.shape) + print(cls_batch.shape) diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_interpolate_compile.sh" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_interpolate_compile.sh" new file mode 100644 index 0000000..8af3cf7 --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_interpolate_compile.sh" @@ -0,0 +1,7 @@ +# TF1.2 +#g++ -std=c++11 tf_interpolate.cpp -o tf_interpolate_so.so -shared -fPIC -I /usr/local/lib/python2.7/dist-packages/tensorflow/include -I /usr/local/cuda-8.0/include -lcudart -L /usr/local/cuda-8.0/lib64/ -O2 -D_GLIBCXX_USE_CXX11_ABI=0 + +# TF1.4 +#g++ -std=c++11 tf_interpolate.cpp -o tf_interpolate_so.so -shared -fPIC -I /usr/local/lib/python2.7/dist-packages/tensorflow/include -I /usr/local/cuda-8.0/include -I /usr/local/lib/python2.7/dist-packages/tensorflow/include/external/nsync/public -lcudart -L /usr/local/cuda-8.0/lib64/ -L/usr/local/lib/python2.7/dist-packages/tensorflow -ltensorflow_framework -O2 -D_GLIBCXX_USE_CXX11_ABI=0 + +g++ -std=c++11 tf_interpolate.cpp -o tf_interpolate_so.so -shared -fPIC -I /home/cc/anaconda3/envs/tf1.8.0/lib/python3.6/site-packages/tensorflow/include -I /usr/local/cuda-9.0/include -I /home/cc/anaconda3/envs/tf1.8.0/lib/python3.6/site-packages/tensorflow/include/external/nsync/public -lcudart -L /usr/local/cuda-9.0/lib64/ -L/home/cc/anaconda3/envs/tf1.8.0/lib/python3.6/site-packages/tensorflow -l:libtensorflow_framework.so -O2 -D_GLIBCXX_USE_CXX11_ABI=0 diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf\346\243\200\346\237\245\345\217\257\347\224\250\347\232\204gpu\350\256\276\345\244\207.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf\346\243\200\346\237\245\345\217\257\347\224\250\347\232\204gpu\350\256\276\345\244\207.py" new file mode 100644 index 0000000..8bd0d15 --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf\346\243\200\346\237\245\345\217\257\347\224\250\347\232\204gpu\350\256\276\345\244\207.py" @@ -0,0 +1,12 @@ +#--coding=utf8 +from tensorflow.python.client import device_lib + +print(device_lib.list_local_devices()) + +from tensorflow.python.client import device_lib + +# 列出全部的本地机器设备 +local_device_protos = device_lib.list_local_devices() +# 打印 +print(local_device_protos) + diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/train.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/train.py" new file mode 100644 index 0000000..0c03076 --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/train.py" @@ -0,0 +1,307 @@ +''' + Single-GPU training. + Will use H5 dataset in default. If using normal, will shift to the normal dataset. +''' +import argparse +import math +from datetime import datetime +import h5py +import numpy as np +import tensorflow as tf +import socket +import importlib +import os +import sys +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +ROOT_DIR = BASE_DIR +sys.path.append(BASE_DIR) +sys.path.append(os.path.join(ROOT_DIR, 'models')) +sys.path.append(os.path.join(ROOT_DIR, 'utils')) +import provider +import tf_util +import modelnet_dataset +import modelnet_h5_dataset + +# os.environ["CUDA_VISIBLE_DEVICES"] = "0" +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' + +# from tensorflow.compat.v1 import ConfigProto +# from tensorflow.compat.v1 import InteractiveSession +# config = tf.compat.v1.ConfigProto() +# config.gpu_options.allow_growth = True +# session =tf.compat.v1.InteractiveSession(config=config) + + + +parser = argparse.ArgumentParser() +parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]') +parser.add_argument('--model', default='pointnet2_cls_ssg', help='Model name [default: pointnet2_cls_ssg]') + +parser.add_argument('--log_dir', default='log', help='Log dir [default: log]') +parser.add_argument('--num_point', type=int, default=1024, help='Point Number [default: 1024]') +parser.add_argument('--max_epoch', type=int, default=250, help='Epoch to run [default: 251]') + + +parser.add_argument('--batch_size', type=int, default=8, help='Batch Size during training [default: 16]') + +parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]') +parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]') +parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]') +parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]') + + +parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.7]') +parser.add_argument('--normal', action='store_true', help='Whether to use normal information') +FLAGS = parser.parse_args() + +EPOCH_CNT = 0 + +BATCH_SIZE = FLAGS.batch_size +NUM_POINT = FLAGS.num_point +MAX_EPOCH = FLAGS.max_epoch +BASE_LEARNING_RATE = FLAGS.learning_rate +GPU_INDEX = FLAGS.gpu +MOMENTUM = FLAGS.momentum +OPTIMIZER = FLAGS.optimizer +DECAY_STEP = FLAGS.decay_step +DECAY_RATE = FLAGS.decay_rate + +MODEL = importlib.import_module(FLAGS.model) # import network module + + + +MODEL_FILE = os.path.join(ROOT_DIR, 'models', FLAGS.model+'.py') + +LOG_DIR = FLAGS.log_dir +if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR) +os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def +os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure +LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w') +LOG_FOUT.write(str(FLAGS)+'\n') + +BN_INIT_DECAY = 0.5 +BN_DECAY_DECAY_RATE = 0.5 +BN_DECAY_DECAY_STEP = float(DECAY_STEP) +BN_DECAY_CLIP = 0.99 + +HOSTNAME = socket.gethostname() + +NUM_CLASSES = 40 + +# Shapenet official train/test split + +TRAIN_DATASET = modelnet_h5_dataset.ModelNetH5Dataset(os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/train_files.txt'), + batch_size=BATCH_SIZE, npoints=NUM_POINT, shuffle=True) +TEST_DATASET = modelnet_h5_dataset.ModelNetH5Dataset(os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/test_files.txt'), + batch_size=BATCH_SIZE, npoints=NUM_POINT, shuffle=False) + +def log_string(out_str): + LOG_FOUT.write(out_str+'\n') + LOG_FOUT.flush() + print(out_str) + +def get_learning_rate(batch): + learning_rate = tf.train.exponential_decay( + BASE_LEARNING_RATE, # Base learning rate. + batch * BATCH_SIZE, # Current index into the dataset. + DECAY_STEP, # Decay step. + DECAY_RATE, # Decay rate. + staircase=True) + learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE! + return learning_rate + +def get_bn_decay(batch): + bn_momentum = tf.train.exponential_decay( + BN_INIT_DECAY, + batch*BATCH_SIZE, + BN_DECAY_DECAY_STEP, + BN_DECAY_DECAY_RATE, + staircase=True) + bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum) + return bn_decay + +def train(): + with tf.Graph().as_default(): + with tf.device('/gpu:'+str(GPU_INDEX)): + pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT) + is_training_pl = tf.placeholder(tf.bool, shape=()) + + # Note the global_step=batch parameter to minimize. + # That tells the optimizer to helpfully increment the 'batch' parameter + # for you every time it trains. + batch = tf.get_variable('batch', [], + initializer=tf.constant_initializer(0), trainable=False) + bn_decay = get_bn_decay(batch) + tf.summary.scalar('bn_decay', bn_decay) + + # Get model and loss + pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay) + MODEL.get_loss(pred, labels_pl, end_points) + losses = tf.get_collection('losses') + total_loss = tf.add_n(losses, name='total_loss') + tf.summary.scalar('total_loss', total_loss) + for l in losses + [total_loss]: + tf.summary.scalar(l.op.name, l) + + correct = tf.equal(tf.argmax(pred, 1), tf.to_int64(labels_pl)) + # correct = tf.equal(tf.argmax(pred, 1), tf.cast(labels_pl)) + accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(BATCH_SIZE) + tf.summary.scalar('accuracy', accuracy) + + print("--- Get training operator") + # Get training operator + learning_rate = get_learning_rate(batch) + tf.summary.scalar('learning_rate', learning_rate) + if OPTIMIZER == 'momentum': + optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM) + elif OPTIMIZER == 'adam': + optimizer = tf.train.AdamOptimizer(learning_rate) + train_op = optimizer.minimize(total_loss, global_step=batch) + + # Add ops to save and restore all the variables. + saver = tf.train.Saver() + + # Create a session + config = tf.ConfigProto() + config.gpu_options.allow_growth = True + config.allow_soft_placement = True + config.log_device_placement = False + # config.graph_options.optimizer_options.global_jit_level =tf.OptimizerOptions.ON_1 + sess = tf.Session(config=config) + + + + + # Add summary writers + merged = tf.summary.merge_all() + train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'), sess.graph) + test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'), sess.graph) + + # Init variables + init = tf.global_variables_initializer() + sess.run(init) + + ops = {'pointclouds_pl': pointclouds_pl, + 'labels_pl': labels_pl, + 'is_training_pl': is_training_pl, + 'pred': pred, + 'loss': total_loss, + 'train_op': train_op, + 'merged': merged, + 'step': batch, + 'end_points': end_points} + + best_acc = -1 + for epoch in range(MAX_EPOCH): + log_string('**** EPOCH %03d ****' % (epoch)) + sys.stdout.flush() + + train_one_epoch(sess, ops, train_writer) + eval_one_epoch(sess, ops, test_writer) + + # Save the variables to disk. + if epoch % 10 == 0: + save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt")) + log_string("Model saved in file: %s" % save_path) + + +def train_one_epoch(sess, ops, train_writer): + """ ops: dict mapping from string to tf ops """ + is_training = True + + log_string(str(datetime.now())) + + # Make sure batch data is of same size + cur_batch_data = np.zeros((BATCH_SIZE,NUM_POINT,TRAIN_DATASET.num_channel())) + cur_batch_label = np.zeros((BATCH_SIZE), dtype=np.int32) + + total_correct = 0 + total_seen = 0 + loss_sum = 0 + batch_idx = 0 + while TRAIN_DATASET.has_next_batch(): + batch_data, batch_label = TRAIN_DATASET.next_batch(augment=True) + #batch_data = provider.random_point_dropout(batch_data) + bsize = batch_data.shape[0] + cur_batch_data[0:bsize,...] = batch_data + cur_batch_label[0:bsize] = batch_label + + feed_dict = {ops['pointclouds_pl']: cur_batch_data, + ops['labels_pl']: cur_batch_label, + ops['is_training_pl']: is_training,} + summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'], + ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict) + train_writer.add_summary(summary, step) + pred_val = np.argmax(pred_val, 1) + correct = np.sum(pred_val[0:bsize] == batch_label[0:bsize]) + total_correct += correct + total_seen += bsize + loss_sum += loss_val + if (batch_idx+1)%50 == 0: + log_string(' ---- batch: %03d ----' % (batch_idx+1)) + log_string('mean loss: %f' % (loss_sum / 50)) + log_string('accuracy: %f' % (total_correct / float(total_seen))) + total_correct = 0 + total_seen = 0 + loss_sum = 0 + batch_idx += 1 + + TRAIN_DATASET.reset() + +def eval_one_epoch(sess, ops, test_writer): + """ ops: dict mapping from string to tf ops """ + global EPOCH_CNT + is_training = False + + # Make sure batch data is of same size + cur_batch_data = np.zeros((BATCH_SIZE,NUM_POINT,TEST_DATASET.num_channel())) + cur_batch_label = np.zeros((BATCH_SIZE), dtype=np.int32) + + total_correct = 0 + total_seen = 0 + loss_sum = 0 + batch_idx = 0 + shape_ious = [] + total_seen_class = [0 for _ in range(NUM_CLASSES)] + total_correct_class = [0 for _ in range(NUM_CLASSES)] + + log_string(str(datetime.now())) + log_string('---- EPOCH %03d EVALUATION ----'%(EPOCH_CNT)) + + while TEST_DATASET.has_next_batch(): + batch_data, batch_label = TEST_DATASET.next_batch(augment=False) + bsize = batch_data.shape[0] + # for the last batch in the epoch, the bsize:end are from last batch + cur_batch_data[0:bsize,...] = batch_data + cur_batch_label[0:bsize] = batch_label + + feed_dict = {ops['pointclouds_pl']: cur_batch_data, + ops['labels_pl']: cur_batch_label, + ops['is_training_pl']: is_training} + summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'], + ops['loss'], ops['pred']], feed_dict=feed_dict) + test_writer.add_summary(summary, step) + pred_val = np.argmax(pred_val, 1) + correct = np.sum(pred_val[0:bsize] == batch_label[0:bsize]) + total_correct += correct + total_seen += bsize + loss_sum += loss_val + batch_idx += 1 + for i in range(0, bsize): + l = batch_label[i] + total_seen_class[l] += 1 + total_correct_class[l] += (pred_val[i] == l) + + log_string('eval mean loss: %f' % (loss_sum / float(batch_idx))) + log_string('eval accuracy: %f'% (total_correct / float(total_seen))) + log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)))) + EPOCH_CNT += 1 + + TEST_DATASET.reset() + return total_correct/float(total_seen) + + +if __name__ == "__main__": + log_string('pid: %s'%(str(os.getpid()))) + train() + LOG_FOUT.close() diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/train_multi_gpu.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/train_multi_gpu.py" new file mode 100644 index 0000000..51d0f82 --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/train_multi_gpu.py" @@ -0,0 +1,364 @@ +''' + Multi-GPU training. + Near linear scale acceleration for multi-gpus on a single machine. + Will use H5 dataset in default. If using normal, will shift to the normal dataset. +''' + +import argparse +import math +from datetime import datetime +import h5py +import numpy as np +import tensorflow as tf +import socket +import importlib +import os +import sys +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +ROOT_DIR = BASE_DIR +sys.path.append(BASE_DIR) +sys.path.append(os.path.join(ROOT_DIR, 'models')) +sys.path.append(os.path.join(ROOT_DIR, 'utils')) +import provider +import tf_util +import modelnet_dataset +import modelnet_h5_dataset + +parser = argparse.ArgumentParser() +parser.add_argument('--num_gpus', type=int, default=1, help='How many gpus to use [default: 1]') +parser.add_argument('--model', default='pointnet2_cls_ssg', help='Model name [default: pointnet2_cls_ssg]') +parser.add_argument('--log_dir', default='log', help='Log dir [default: log]') +parser.add_argument('--num_point', type=int, default=1024, help='Point Number [default: 1024]') +parser.add_argument('--max_epoch', type=int, default=5, help='Epoch to run [default: 251]') + + + +parser.add_argument('--batch_size', type=int, default=8, help='Batch Size during training [default: 32]') +parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]') +parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]') +parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]') +parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]') +parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.7]') +parser.add_argument('--normal', action='store_true', help='Whether to use normal information') +FLAGS = parser.parse_args() + +EPOCH_CNT = 0 + +NUM_GPUS = FLAGS.num_gpus +BATCH_SIZE = FLAGS.batch_size +assert(BATCH_SIZE % NUM_GPUS == 0) +DEVICE_BATCH_SIZE = BATCH_SIZE / NUM_GPUS + +NUM_POINT = FLAGS.num_point +MAX_EPOCH = FLAGS.max_epoch +BASE_LEARNING_RATE = FLAGS.learning_rate +MOMENTUM = FLAGS.momentum +OPTIMIZER = FLAGS.optimizer +DECAY_STEP = FLAGS.decay_step +DECAY_RATE = FLAGS.decay_rate + +MODEL = importlib.import_module(FLAGS.model) # import network module +MODEL_FILE = os.path.join(ROOT_DIR, 'models', FLAGS.model+'.py') +LOG_DIR = FLAGS.log_dir +if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR) +os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def +os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure +LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w') +LOG_FOUT.write(str(FLAGS)+'\n') + +BN_INIT_DECAY = 0.5 +BN_DECAY_DECAY_RATE = 0.5 +BN_DECAY_DECAY_STEP = float(DECAY_STEP) +BN_DECAY_CLIP = 0.99 + +HOSTNAME = socket.gethostname() + +NUM_CLASSES = 40 + +# # Shapenet official train/test split +# if FLAGS.normal: +# assert(NUM_POINT<=10000) +# DATA_PATH = os.path.join(ROOT_DIR, 'data/modelnet40_normal_resampled') +# TRAIN_DATASET = modelnet_dataset.ModelNetDataset(root=DATA_PATH, npoints=NUM_POINT, split='train', normal_channel=FLAGS.normal, batch_size=BATCH_SIZE) +# TEST_DATASET = modelnet_dataset.ModelNetDataset(root=DATA_PATH, npoints=NUM_POINT, split='test', normal_channel=FLAGS.normal, batch_size=BATCH_SIZE) +# else: +# assert(NUM_POINT<=2048) +TRAIN_DATASET = modelnet_h5_dataset.ModelNetH5Dataset(os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/train_files.txt'), + batch_size=BATCH_SIZE, npoints=NUM_POINT, shuffle=True) +TEST_DATASET = modelnet_h5_dataset.ModelNetH5Dataset(os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/test_files.txt'), + batch_size=BATCH_SIZE, npoints=NUM_POINT, shuffle=False) + +def log_string(out_str): + LOG_FOUT.write(out_str+'\n') + LOG_FOUT.flush() + print(out_str) + +def average_gradients(tower_grads): + """Calculate the average gradient for each shared variable across all towers. + Note that this function provides a synchronization point across all towers. + From tensorflow tutorial: cifar10/cifar10_multi_gpu_train.py + Args: + tower_grads: List of lists of (gradient, variable) tuples. The outer list + is over individual gradients. The inner list is over the gradient + calculation for each tower. + Returns: + List of pairs of (gradient, variable) where the gradient has been averaged + across all towers. + """ + average_grads = [] + for grad_and_vars in zip(*tower_grads): + # Note that each grad_and_vars looks like the following: + # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN)) + grads = [] + #for g, _ in grad_and_vars: + for g, v in grad_and_vars: + # Add 0 dimension to the gradients to represent the tower. + expanded_g = tf.expand_dims(g, 0) + + # Append on a 'tower' dimension which we will average over below. + grads.append(expanded_g) + + # Average over the 'tower' dimension. + grad = tf.concat(axis=0, values=grads) + grad = tf.reduce_mean(grad, 0) + + # Keep in mind that the Variables are redundant because they are shared + # across towers. So .. we will just return the first tower's pointer to + # the Variable. + v = grad_and_vars[0][1] + grad_and_var = (grad, v) + average_grads.append(grad_and_var) + return average_grads + + +def get_learning_rate(batch): + learning_rate = tf.train.exponential_decay( + BASE_LEARNING_RATE, # Base learning rate. + batch * BATCH_SIZE, # Current index into the dataset. + DECAY_STEP, # Decay step. + DECAY_RATE, # Decay rate. + staircase=True) + learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE! + return learning_rate + +def get_bn_decay(batch): + bn_momentum = tf.train.exponential_decay( + BN_INIT_DECAY, + batch*BATCH_SIZE, + BN_DECAY_DECAY_STEP, + BN_DECAY_DECAY_RATE, + staircase=True) + bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum) + return bn_decay + +def train(): + with tf.Graph().as_default(): + with tf.device('/cpu:0'): + pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT) + is_training_pl = tf.placeholder(tf.bool, shape=()) + + # Note the global_step=batch parameter to minimize. + # That tells the optimizer to helpfully increment the 'batch' parameter + # for you every time it trains. + batch = tf.get_variable('batch', [], + initializer=tf.constant_initializer(0), trainable=False) + bn_decay = get_bn_decay(batch) + tf.summary.scalar('bn_decay', bn_decay) + + # Set learning rate and optimizer + learning_rate = get_learning_rate(batch) + tf.summary.scalar('learning_rate', learning_rate) + if OPTIMIZER == 'momentum': + optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM) + elif OPTIMIZER == 'adam': + optimizer = tf.train.AdamOptimizer(learning_rate) + + # ------------------------------------------- + # Get model and loss on multiple GPU devices + # ------------------------------------------- + # Allocating variables on CPU first will greatly accelerate multi-gpu training. + # Ref: https://github.com/kuza55/keras-extras/issues/21 + MODEL.get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay) + + tower_grads = [] + pred_gpu = [] + total_loss_gpu = [] + for i in range(NUM_GPUS): + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + with tf.device('/gpu:%d'%(i)), tf.name_scope('gpu_%d'%(i)) as scope: + # Evenly split input data to each GPU + pc_batch = tf.slice(pointclouds_pl, + [i*DEVICE_BATCH_SIZE,0,0], [DEVICE_BATCH_SIZE,-1,-1]) + label_batch = tf.slice(labels_pl, + [i*DEVICE_BATCH_SIZE], [DEVICE_BATCH_SIZE]) + + pred, end_points = MODEL.get_model(pc_batch, + is_training=is_training_pl, bn_decay=bn_decay) + + MODEL.get_loss(pred, label_batch, end_points) + losses = tf.get_collection('losses', scope) + total_loss = tf.add_n(losses, name='total_loss') + for l in losses + [total_loss]: + tf.summary.scalar(l.op.name, l) + + grads = optimizer.compute_gradients(total_loss) + tower_grads.append(grads) + + pred_gpu.append(pred) + total_loss_gpu.append(total_loss) + + # Merge pred and losses from multiple GPUs + pred = tf.concat(pred_gpu, 0) + total_loss = tf.reduce_mean(total_loss_gpu) + + # Get training operator + grads = average_gradients(tower_grads) + train_op = optimizer.apply_gradients(grads, global_step=batch) + + correct = tf.equal(tf.argmax(pred, 1), tf.to_int64(labels_pl)) + accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(BATCH_SIZE) + tf.summary.scalar('accuracy', accuracy) + + # Add ops to save and restore all the variables. + saver = tf.train.Saver() + + # Create a session + config = tf.ConfigProto() + config.gpu_options.allow_growth = True + config.allow_soft_placement = True + config.log_device_placement = False + sess = tf.Session(config=config) + + # Add summary writers + merged = tf.summary.merge_all() + train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'), sess.graph) + test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'), sess.graph) + + # Init variables + init = tf.global_variables_initializer() + sess.run(init) + + ops = {'pointclouds_pl': pointclouds_pl, + 'labels_pl': labels_pl, + 'is_training_pl': is_training_pl, + 'pred': pred, + 'loss': total_loss, + 'train_op': train_op, + 'merged': merged, + 'step': batch, + 'end_points': end_points} + + best_acc = -1 + for epoch in range(MAX_EPOCH): + log_string('**** EPOCH %03d ****' % (epoch)) + sys.stdout.flush() + + train_one_epoch(sess, ops, train_writer) + eval_one_epoch(sess, ops, test_writer) + + # Save the variables to disk. + if epoch % 10 == 0: + save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt")) + log_string("Model saved in file: %s" % save_path) + + +def train_one_epoch(sess, ops, train_writer): + """ ops: dict mapping from string to tf ops """ + is_training = True + + log_string(str(datetime.now())) + + # Make sure batch data is of same size + cur_batch_data = np.zeros((BATCH_SIZE,NUM_POINT,TRAIN_DATASET.num_channel())) + cur_batch_label = np.zeros((BATCH_SIZE), dtype=np.int32) + + total_correct = 0 + total_seen = 0 + loss_sum = 0 + batch_idx = 0 + while TRAIN_DATASET.has_next_batch(): + batch_data, batch_label = TRAIN_DATASET.next_batch(augment=True) + #batch_data = provider.random_point_dropout(batch_data) + bsize = batch_data.shape[0] + cur_batch_data[0:bsize,...] = batch_data + cur_batch_label[0:bsize] = batch_label + + feed_dict = {ops['pointclouds_pl']: cur_batch_data, + ops['labels_pl']: cur_batch_label, + ops['is_training_pl']: is_training,} + summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'], + ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict) + train_writer.add_summary(summary, step) + pred_val = np.argmax(pred_val, 1) + correct = np.sum(pred_val[0:bsize] == batch_label[0:bsize]) + total_correct += correct + total_seen += bsize + loss_sum += loss_val + if (batch_idx+1)%50 == 0: + log_string(' ---- batch: %03d ----' % (batch_idx+1)) + log_string('mean loss: %f' % (loss_sum / 50)) + log_string('accuracy: %f' % (total_correct / float(total_seen))) + total_correct = 0 + total_seen = 0 + loss_sum = 0 + batch_idx += 1 + + TRAIN_DATASET.reset() + +def eval_one_epoch(sess, ops, test_writer): + """ ops: dict mapping from string to tf ops """ + global EPOCH_CNT + is_training = False + + # Make sure batch data is of same size + cur_batch_data = np.zeros((BATCH_SIZE,NUM_POINT,TEST_DATASET.num_channel())) + cur_batch_label = np.zeros((BATCH_SIZE), dtype=np.int32) + + total_correct = 0 + total_seen = 0 + loss_sum = 0 + batch_idx = 0 + shape_ious = [] + total_seen_class = [0 for _ in range(NUM_CLASSES)] + total_correct_class = [0 for _ in range(NUM_CLASSES)] + + log_string(str(datetime.now())) + log_string('---- EPOCH %03d EVALUATION ----'%(EPOCH_CNT)) + + while TEST_DATASET.has_next_batch(): + batch_data, batch_label = TEST_DATASET.next_batch(augment=False) + bsize = batch_data.shape[0] + # for the last batch in the epoch, the bsize:end are from last batch + cur_batch_data[0:bsize,...] = batch_data + cur_batch_label[0:bsize] = batch_label + + feed_dict = {ops['pointclouds_pl']: cur_batch_data, + ops['labels_pl']: cur_batch_label, + ops['is_training_pl']: is_training} + summary, step, loss_val, pred_val = sess.run([ops['merged'], ops['step'], + ops['loss'], ops['pred']], feed_dict=feed_dict) + test_writer.add_summary(summary, step) + pred_val = np.argmax(pred_val, 1) + correct = np.sum(pred_val[0:bsize] == batch_label[0:bsize]) + total_correct += correct + total_seen += bsize + loss_sum += loss_val + batch_idx += 1 + for i in range(0, bsize): + l = batch_label[i] + total_seen_class[l] += 1 + total_correct_class[l] += (pred_val[i] == l) + + log_string('eval mean loss: %f' % (loss_sum / float(batch_idx))) + log_string('eval accuracy: %f'% (total_correct / float(total_seen))) + log_string('eval avg class acc: %f' % (np.mean(np.array(total_correct_class)/np.array(total_seen_class,dtype=np.float)))) + EPOCH_CNT += 1 + + TEST_DATASET.reset() + return total_correct/float(total_seen) + + +if __name__ == "__main__": + log_string('pid: %s'%(str(os.getpid()))) + train() + LOG_FOUT.close() diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/\346\265\213\350\257\225cudnn\346\230\257\345\220\246\345\217\257\347\224\250.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/\346\265\213\350\257\225cudnn\346\230\257\345\220\246\345\217\257\347\224\250.py" new file mode 100644 index 0000000..02b665f --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/\346\265\213\350\257\225cudnn\346\230\257\345\220\246\345\217\257\347\224\250.py" @@ -0,0 +1,3 @@ +#coding = utf-8 +import tensorflow as tf +tf.Session() \ No newline at end of file -- Gitee From 497fbd829de44294ecbd6f3fa0698a6e4e33226c Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:26:55 +0000 Subject: [PATCH 38/69] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20co?= =?UTF-8?q?de/2022=5Fautumn/=E5=B4=94=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E?= =?UTF-8?q?3D=E7=82=B9=E4=BA=91=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0?= =?UTF-8?q?=E7=9A=84=E9=9C=87=E5=AE=B3=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86?= =?UTF-8?q?=E5=88=AB/train=5Fcls.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../train_cls.py" | 229 ------------------ 1 file changed, 229 deletions(-) delete mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/train_cls.py" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/train_cls.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/train_cls.py" deleted file mode 100644 index 94bd1f6..0000000 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/train_cls.py" +++ /dev/null @@ -1,229 +0,0 @@ -""" -Author: Benny -Date: Nov 2019 -""" -from data_utils.ModelNetDataLoader import ModelNetDataLoader -import argparse -import numpy as np -import os -import torch -import datetime -import logging -from pathlib import Path -from tqdm import tqdm -import sys -import provider -import importlib -import shutil - -BASE_DIR = os.path.dirname(os.path.abspath(__file__)) -ROOT_DIR = BASE_DIR -sys.path.append(os.path.join(ROOT_DIR, 'models')) - -torch.backends.cudnn.enabled = False -# torch.backends.cudnn.enabled = True -# torch.backends.cudnn.benchmark = False - - -# num_class = 40 -def parse_args(): - '''PARAMETERS''' - parser = argparse.ArgumentParser('PointNet') - parser.add_argument('--batch_size', type=int, default = 8, help='batch size in training [default: 24]') - - parser.add_argument('--model', default='pointnet_cls', help='model name [default: pointnet_cls]') - - parser.add_argument('--num_class', type=int, default= 3, help='class number [default: 40]') - - parser.add_argument('--epoch', default= 150, type=int, help='number of epoch in training [default: 200]') - - parser.add_argument('--learning_rate', default=0.001, type=float, help='learning rate in training [default: 0.001]') - parser.add_argument('--gpu', type=str, default='0', help='specify gpu device [default: 0]') - - parser.add_argument('--num_point', type=int, default=1024, help='Point Number [default: 1024]') - - parser.add_argument('--optimizer', type=str, default='Adam', help='optimizer for training [default: Adam]') - parser.add_argument('--log_dir', type=str, default=None, help='experiment root') - parser.add_argument('--decay_rate', type=float, default=1e-4, help='decay rate [default: 1e-4]') - parser.add_argument('--normal', action='store_true', default=False, help='Whether to use normal information [default: False]') - args = parser.parse_args() - return args -# https://whttps://www.cnblogs.com/yibeimingyue/p/13800159.html -# argparse.ArgumentParser函数的解释。 - -def test(model, loader,class_number): - class_number =args.num_class - # num_class = args.num_class - - mean_correct = [] - class_acc = np.zeros((class_number,3)) - for j, data in tqdm(enumerate(loader), total=len(loader)): - points, target = data - target = target[:, 0] - points = points.transpose(2, 1) - points, target = points.cuda(), target.cuda() - classifier = model.eval() - pred, _ = classifier(points) - pred_choice = pred.data.max(1)[1] - for cat in np.unique(target.cpu()): - classacc = pred_choice[target==cat].eq(target[target==cat].long().data).cpu().sum() - class_acc[cat,0]+= classacc.item()/float(points[target==cat].size()[0]) - class_acc[cat,1]+=1 - correct = pred_choice.eq(target.long().data).cpu().sum() - mean_correct.append(correct.item()/float(points.size()[0])) - class_acc[:,2] = class_acc[:,0]/ class_acc[:,1] - class_acc = np.mean(class_acc[:,2]) - instance_acc = np.mean(mean_correct) - return instance_acc, class_acc - - -def main(args): - def log_string(str): - logger.info(str) - print(str) - - '''HYPER PARAMETER''' - os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu - - '''CREATE DIR''' - timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')) - experiment_dir = Path('./log/') - experiment_dir.mkdir(exist_ok=True) - experiment_dir = experiment_dir.joinpath('classification') - experiment_dir.mkdir(exist_ok=True) - if args.log_dir is None: - experiment_dir = experiment_dir.joinpath(timestr) - else: - experiment_dir = experiment_dir.joinpath(args.log_dir) - experiment_dir.mkdir(exist_ok=True) - checkpoints_dir = experiment_dir.joinpath('checkpoints/') - checkpoints_dir.mkdir(exist_ok=True) - log_dir = experiment_dir.joinpath('logs/') - log_dir.mkdir(exist_ok=True) - - '''LOG''' - args = parse_args() - logger = logging.getLogger("Model") - logger.setLevel(logging.INFO) - formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') - file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model)) - file_handler.setLevel(logging.INFO) - file_handler.setFormatter(formatter) - logger.addHandler(file_handler) - log_string('PARAMETER ...') - log_string(args) - - '''DATA LOADING''' - log_string('Load dataset ...') - - DATA_PATH = os.path.join(BASE_DIR,'data/modelnet40/') - - TRAIN_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='train', - normal_channel=args.normal) - TEST_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='test', - normal_channel=args.normal) - - trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET, batch_size=args.batch_size, shuffle=True, num_workers=4) - testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batch_size, shuffle=False, num_workers=4) - - '''MODEL LOADING''' - # num_class = 40 - MODEL = importlib.import_module(args.model) - shutil.copy('./models/%s.py' % args.model, str(experiment_dir)) - shutil.copy('./models/pointnet_util.py', str(experiment_dir)) - - classifier = MODEL.get_model(args.num_class,normal_channel=args.normal).cuda() - criterion = MODEL.get_loss().cuda() - - try: - checkpoint = torch.load(str(experiment_dir) + '/checkpoints/best_model.pth') - start_epoch = checkpoint['epoch'] - classifier.load_state_dict(checkpoint['model_state_dict']) - log_string('Use pretrain model') - except: - log_string('No existing model, starting training from scratch...') - start_epoch = 0 - - - if args.optimizer == 'Adam': - optimizer = torch.optim.Adam( - classifier.parameters(), - lr=args.learning_rate, - betas=(0.9, 0.999), - eps=1e-08, - weight_decay=args.decay_rate - ) - else: - optimizer = torch.optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9) - - scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.7) - global_epoch = 0 - global_step = 0 - best_instance_acc = 0.0 - best_class_acc = 0.0 - mean_correct = [] - - '''TRANING''' - logger.info('Start training...') - for epoch in range(start_epoch,args.epoch): - log_string('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, args.epoch)) - - scheduler.step() - for batch_id, data in tqdm(enumerate(trainDataLoader, 0), total=len(trainDataLoader), smoothing=0.9): - points, target = data - points = points.data.numpy() - points = provider.random_point_dropout(points) - points[:,:, 0:3] = provider.random_scale_point_cloud(points[:,:, 0:3]) - points[:,:, 0:3] = provider.shift_point_cloud(points[:,:, 0:3]) - points = torch.Tensor(points) - target = target[:, 0] - - points = points.transpose(2, 1) - points, target = points.cuda(), target.cuda() - optimizer.zero_grad() - - classifier = classifier.train() - pred, trans_feat = classifier(points) - loss = criterion(pred, target.long(), trans_feat) - pred_choice = pred.data.max(1)[1] - correct = pred_choice.eq(target.long().data).cpu().sum() - mean_correct.append(correct.item() / float(points.size()[0])) - loss.backward() - optimizer.step() - global_step += 1 - - train_instance_acc = np.mean(mean_correct) - log_string('Train Instance Accuracy: %f' % train_instance_acc) - - - with torch.no_grad(): - instance_acc, class_acc = test(classifier.eval(), testDataLoader) - - if (instance_acc >= best_instance_acc): - best_instance_acc = instance_acc - best_epoch = epoch + 1 - - if (class_acc >= best_class_acc): - best_class_acc = class_acc - log_string('Test Instance Accuracy: %f, Class Accuracy: %f'% (instance_acc, class_acc)) - log_string('Best Instance Accuracy: %f, Class Accuracy: %f'% (best_instance_acc, best_class_acc)) - - if (instance_acc >= best_instance_acc): - logger.info('Save model...') - savepath = str(checkpoints_dir) + '/best_model.pth' - log_string('Saving at %s'% savepath) - state = { - 'epoch': best_epoch, - 'instance_acc': instance_acc, - 'class_acc': class_acc, - 'model_state_dict': classifier.state_dict(), - 'optimizer_state_dict': optimizer.state_dict(), - } - torch.save(state, savepath) - global_epoch += 1 - - logger.info('End of training...') - -if __name__ == '__main__': - args = parse_args() - main(args) -- Gitee From 20fc0bbaab153d503e177cbf59bdfc4f0d7b4635 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:27:11 +0000 Subject: [PATCH 39/69] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20co?= =?UTF-8?q?de/2022=5Fautumn/=E5=B4=94=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E?= =?UTF-8?q?3D=E7=82=B9=E4=BA=91=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0?= =?UTF-8?q?=E7=9A=84=E9=9C=87=E5=AE=B3=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86?= =?UTF-8?q?=E5=88=AB/test=5Fcls.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../test_cls.py" | 105 ------------------ 1 file changed, 105 deletions(-) delete mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/test_cls.py" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/test_cls.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/test_cls.py" deleted file mode 100644 index 9e9fa8d..0000000 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/test_cls.py" +++ /dev/null @@ -1,105 +0,0 @@ -""" -Author: Benny -Date: Nov 2019 -""" -from data_utils.ModelNetDataLoader import ModelNetDataLoader -import argparse -import numpy as np -import os -import torch -import logging -from tqdm import tqdm -import sys -import importlib - -BASE_DIR = os.path.dirname(os.path.abspath(__file__)) -ROOT_DIR = BASE_DIR -sys.path.append(os.path.join(ROOT_DIR, 'models')) - - -def parse_args(): - '''PARAMETERS''' - parser = argparse.ArgumentParser('PointNet') - parser.add_argument('--batch_size', type=int, default=8, help='batch size in training') - parser.add_argument('--gpu', type=str, default='0', help='specify gpu device') - parser.add_argument('--num_point', type=int, default=1024, help='Point Number [default: 1024]') - parser.add_argument('--log_dir', type=str, default='pointnet2_ssg_normal', help='Experiment root') - parser.add_argument('--normal', action='store_true', default=True, help='Whether to use normal information [default: False]') - parser.add_argument('--num_votes', type=int, default=3, help='Aggregate classification scores with voting [default: 3]') - return parser.parse_args() - -def test(model, loader, num_class=40, vote_num=1): - mean_correct = [] - class_acc = np.zeros((num_class,3)) - for j, data in tqdm(enumerate(loader), total=len(loader)): - points, target = data - target = target[:, 0] - points = points.transpose(2, 1) - points, target = points.cuda(), target.cuda() - classifier = model.eval() - vote_pool = torch.zeros(target.size()[0],num_class).cuda() - for _ in range(vote_num): - pred, _ = classifier(points) - vote_pool += pred - pred = vote_pool/vote_num - pred_choice = pred.data.max(1)[1] - for cat in np.unique(target.cpu()): - classacc = pred_choice[target==cat].eq(target[target==cat].long().data).cpu().sum() - class_acc[cat,0]+= classacc.item()/float(points[target==cat].size()[0]) - class_acc[cat,1]+=1 - correct = pred_choice.eq(target.long().data).cpu().sum() - mean_correct.append(correct.item()/float(points.size()[0])) - class_acc[:,2] = class_acc[:,0]/ class_acc[:,1] - class_acc = np.mean(class_acc[:,2]) - instance_acc = np.mean(mean_correct) - return instance_acc, class_acc - - -def main(args): - def log_string(str): - logger.info(str) - print(str) - - '''HYPER PARAMETER''' - os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu - - '''CREATE DIR''' - experiment_dir = 'log/classification/' + args.log_dir - - '''LOG''' - args = parse_args() - logger = logging.getLogger("Model") - logger.setLevel(logging.INFO) - formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') - file_handler = logging.FileHandler('%s/eval.txt' % experiment_dir) - file_handler.setLevel(logging.INFO) - file_handler.setFormatter(formatter) - logger.addHandler(file_handler) - log_string('PARAMETER ...') - log_string(args) - - '''DATA LOADING''' - log_string('Load dataset ...') - DATA_PATH = 'data/modelnet40/' - TEST_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='test', normal_channel=args.normal) - testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batch_size, shuffle=False, num_workers=4) - - '''MODEL LOADING''' - num_class = 40 - model_name = os.listdir(experiment_dir+'/logs')[0].split('.')[0] - MODEL = importlib.import_module(model_name) - - classifier = MODEL.get_model(num_class,normal_channel=args.normal).cuda() - - checkpoint = torch.load(str(experiment_dir) + '/checkpoints/best_model.pth') - classifier.load_state_dict(checkpoint['model_state_dict']) - - with torch.no_grad(): - instance_acc, class_acc = test(classifier.eval(), testDataLoader, vote_num=args.num_votes) - log_string('Test Instance Accuracy: %f, Class Accuracy: %f' % (instance_acc, class_acc)) - - - -if __name__ == '__main__': - args = parse_args() - main(args) -- Gitee From 393ac42d6474585d8ee4c0219c41f9f8d91423ca Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:27:40 +0000 Subject: [PATCH 40/69] =?UTF-8?q?add=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../tf_ops" | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops" new file mode 100644 index 0000000..e69de29 -- Gitee From ecc67e41cec4548b77eee0b5cc6841824e28b905 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:28:07 +0000 Subject: [PATCH 41/69] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20co?= =?UTF-8?q?de/2022=5Fautumn/=E5=B4=94=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E?= =?UTF-8?q?3D=E7=82=B9=E4=BA=91=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0?= =?UTF-8?q?=E7=9A=84=E9=9C=87=E5=AE=B3=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86?= =?UTF-8?q?=E5=88=AB/tf=5Fops?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../tf_ops" | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops" deleted file mode 100644 index e69de29..0000000 -- Gitee From b1c25ffefa87937bfa5149f16b0f0b8ce91414ab Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:28:19 +0000 Subject: [PATCH 42/69] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20tf=5Fops?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../tf_ops/.keep" | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/.keep" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/.keep" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/.keep" new file mode 100644 index 0000000..e69de29 -- Gitee From 17ae9c0781a6db68f2540521d769c9e1af03968f Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:28:37 +0000 Subject: [PATCH 43/69] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20co?= =?UTF-8?q?de/2022=5Fautumn/=E5=B4=94=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E?= =?UTF-8?q?3D=E7=82=B9=E4=BA=91=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0?= =?UTF-8?q?=E7=9A=84=E9=9C=87=E5=AE=B3=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86?= =?UTF-8?q?=E5=88=AB/tf=5Finterpolate=5Fcompile.sh?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../tf_interpolate_compile.sh" | 7 ------- 1 file changed, 7 deletions(-) delete mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_interpolate_compile.sh" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_interpolate_compile.sh" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_interpolate_compile.sh" deleted file mode 100644 index 8af3cf7..0000000 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_interpolate_compile.sh" +++ /dev/null @@ -1,7 +0,0 @@ -# TF1.2 -#g++ -std=c++11 tf_interpolate.cpp -o tf_interpolate_so.so -shared -fPIC -I /usr/local/lib/python2.7/dist-packages/tensorflow/include -I /usr/local/cuda-8.0/include -lcudart -L /usr/local/cuda-8.0/lib64/ -O2 -D_GLIBCXX_USE_CXX11_ABI=0 - -# TF1.4 -#g++ -std=c++11 tf_interpolate.cpp -o tf_interpolate_so.so -shared -fPIC -I /usr/local/lib/python2.7/dist-packages/tensorflow/include -I /usr/local/cuda-8.0/include -I /usr/local/lib/python2.7/dist-packages/tensorflow/include/external/nsync/public -lcudart -L /usr/local/cuda-8.0/lib64/ -L/usr/local/lib/python2.7/dist-packages/tensorflow -ltensorflow_framework -O2 -D_GLIBCXX_USE_CXX11_ABI=0 - -g++ -std=c++11 tf_interpolate.cpp -o tf_interpolate_so.so -shared -fPIC -I /home/cc/anaconda3/envs/tf1.8.0/lib/python3.6/site-packages/tensorflow/include -I /usr/local/cuda-9.0/include -I /home/cc/anaconda3/envs/tf1.8.0/lib/python3.6/site-packages/tensorflow/include/external/nsync/public -lcudart -L /usr/local/cuda-9.0/lib64/ -L/home/cc/anaconda3/envs/tf1.8.0/lib/python3.6/site-packages/tensorflow -l:libtensorflow_framework.so -O2 -D_GLIBCXX_USE_CXX11_ABI=0 -- Gitee From f8d59c5bee11671dd871a86d5d54387672dcf85b Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:29:00 +0000 Subject: [PATCH 44/69] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20co?= =?UTF-8?q?de/2022=5Fautumn/=E5=B4=94=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E?= =?UTF-8?q?3D=E7=82=B9=E4=BA=91=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0?= =?UTF-8?q?=E7=9A=84=E9=9C=87=E5=AE=B3=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86?= =?UTF-8?q?=E5=88=AB/provider.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../provider.py" | 251 ------------------ 1 file changed, 251 deletions(-) delete mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/provider.py" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/provider.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/provider.py" deleted file mode 100644 index 5604691..0000000 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/provider.py" +++ /dev/null @@ -1,251 +0,0 @@ -import numpy as np - -def normalize_data(batch_data): - """ Normalize the batch data, use coordinates of the block centered at origin, - Input: - BxNxC array - Output: - BxNxC array - """ - B, N, C = batch_data.shape - normal_data = np.zeros((B, N, C)) - for b in range(B): - pc = batch_data[b] - centroid = np.mean(pc, axis=0) - pc = pc - centroid - m = np.max(np.sqrt(np.sum(pc ** 2, axis=1))) - pc = pc / m - normal_data[b] = pc - return normal_data - - -def shuffle_data(data, labels): - """ Shuffle data and labels. - Input: - data: B,N,... numpy array - label: B,... numpy array - Return: - shuffled data, label and shuffle indices - """ - idx = np.arange(len(labels)) - np.random.shuffle(idx) - return data[idx, ...], labels[idx], idx - -def shuffle_points(batch_data): - """ Shuffle orders of points in each point cloud -- changes FPS behavior. - Use the same shuffling idx for the entire batch. - Input: - BxNxC array - Output: - BxNxC array - """ - idx = np.arange(batch_data.shape[1]) - np.random.shuffle(idx) - return batch_data[:,idx,:] - -def rotate_point_cloud(batch_data): - """ Randomly rotate the point clouds to augument the dataset - rotation is per shape based along up direction - Input: - BxNx3 array, original batch of point clouds - Return: - BxNx3 array, rotated batch of point clouds - """ - rotated_data = np.zeros(batch_data.shape, dtype=np.float32) - for k in range(batch_data.shape[0]): - rotation_angle = np.random.uniform() * 2 * np.pi - cosval = np.cos(rotation_angle) - sinval = np.sin(rotation_angle) - rotation_matrix = np.array([[cosval, 0, sinval], - [0, 1, 0], - [-sinval, 0, cosval]]) - shape_pc = batch_data[k, ...] - rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix) - return rotated_data - -def rotate_point_cloud_z(batch_data): - """ Randomly rotate the point clouds to augument the dataset - rotation is per shape based along up direction - Input: - BxNx3 array, original batch of point clouds - Return: - BxNx3 array, rotated batch of point clouds - """ - rotated_data = np.zeros(batch_data.shape, dtype=np.float32) - for k in range(batch_data.shape[0]): - rotation_angle = np.random.uniform() * 2 * np.pi - cosval = np.cos(rotation_angle) - sinval = np.sin(rotation_angle) - rotation_matrix = np.array([[cosval, sinval, 0], - [-sinval, cosval, 0], - [0, 0, 1]]) - shape_pc = batch_data[k, ...] - rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix) - return rotated_data - -def rotate_point_cloud_with_normal(batch_xyz_normal): - ''' Randomly rotate XYZ, normal point cloud. - Input: - batch_xyz_normal: B,N,6, first three channels are XYZ, last 3 all normal - Output: - B,N,6, rotated XYZ, normal point cloud - ''' - for k in range(batch_xyz_normal.shape[0]): - rotation_angle = np.random.uniform() * 2 * np.pi - cosval = np.cos(rotation_angle) - sinval = np.sin(rotation_angle) - rotation_matrix = np.array([[cosval, 0, sinval], - [0, 1, 0], - [-sinval, 0, cosval]]) - shape_pc = batch_xyz_normal[k,:,0:3] - shape_normal = batch_xyz_normal[k,:,3:6] - batch_xyz_normal[k,:,0:3] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix) - batch_xyz_normal[k,:,3:6] = np.dot(shape_normal.reshape((-1, 3)), rotation_matrix) - return batch_xyz_normal - -def rotate_perturbation_point_cloud_with_normal(batch_data, angle_sigma=0.06, angle_clip=0.18): - """ Randomly perturb the point clouds by small rotations - Input: - BxNx6 array, original batch of point clouds and point normals - Return: - BxNx3 array, rotated batch of point clouds - """ - rotated_data = np.zeros(batch_data.shape, dtype=np.float32) - for k in range(batch_data.shape[0]): - angles = np.clip(angle_sigma*np.random.randn(3), -angle_clip, angle_clip) - Rx = np.array([[1,0,0], - [0,np.cos(angles[0]),-np.sin(angles[0])], - [0,np.sin(angles[0]),np.cos(angles[0])]]) - Ry = np.array([[np.cos(angles[1]),0,np.sin(angles[1])], - [0,1,0], - [-np.sin(angles[1]),0,np.cos(angles[1])]]) - Rz = np.array([[np.cos(angles[2]),-np.sin(angles[2]),0], - [np.sin(angles[2]),np.cos(angles[2]),0], - [0,0,1]]) - R = np.dot(Rz, np.dot(Ry,Rx)) - shape_pc = batch_data[k,:,0:3] - shape_normal = batch_data[k,:,3:6] - rotated_data[k,:,0:3] = np.dot(shape_pc.reshape((-1, 3)), R) - rotated_data[k,:,3:6] = np.dot(shape_normal.reshape((-1, 3)), R) - return rotated_data - - -def rotate_point_cloud_by_angle(batch_data, rotation_angle): - """ Rotate the point cloud along up direction with certain angle. - Input: - BxNx3 array, original batch of point clouds - Return: - BxNx3 array, rotated batch of point clouds - """ - rotated_data = np.zeros(batch_data.shape, dtype=np.float32) - for k in range(batch_data.shape[0]): - #rotation_angle = np.random.uniform() * 2 * np.pi - cosval = np.cos(rotation_angle) - sinval = np.sin(rotation_angle) - rotation_matrix = np.array([[cosval, 0, sinval], - [0, 1, 0], - [-sinval, 0, cosval]]) - shape_pc = batch_data[k,:,0:3] - rotated_data[k,:,0:3] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix) - return rotated_data - -def rotate_point_cloud_by_angle_with_normal(batch_data, rotation_angle): - """ Rotate the point cloud along up direction with certain angle. - Input: - BxNx6 array, original batch of point clouds with normal - scalar, angle of rotation - Return: - BxNx6 array, rotated batch of point clouds iwth normal - """ - rotated_data = np.zeros(batch_data.shape, dtype=np.float32) - for k in range(batch_data.shape[0]): - #rotation_angle = np.random.uniform() * 2 * np.pi - cosval = np.cos(rotation_angle) - sinval = np.sin(rotation_angle) - rotation_matrix = np.array([[cosval, 0, sinval], - [0, 1, 0], - [-sinval, 0, cosval]]) - shape_pc = batch_data[k,:,0:3] - shape_normal = batch_data[k,:,3:6] - rotated_data[k,:,0:3] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix) - rotated_data[k,:,3:6] = np.dot(shape_normal.reshape((-1,3)), rotation_matrix) - return rotated_data - - - -def rotate_perturbation_point_cloud(batch_data, angle_sigma=0.06, angle_clip=0.18): - """ Randomly perturb the point clouds by small rotations - Input: - BxNx3 array, original batch of point clouds - Return: - BxNx3 array, rotated batch of point clouds - """ - rotated_data = np.zeros(batch_data.shape, dtype=np.float32) - for k in range(batch_data.shape[0]): - angles = np.clip(angle_sigma*np.random.randn(3), -angle_clip, angle_clip) - Rx = np.array([[1,0,0], - [0,np.cos(angles[0]),-np.sin(angles[0])], - [0,np.sin(angles[0]),np.cos(angles[0])]]) - Ry = np.array([[np.cos(angles[1]),0,np.sin(angles[1])], - [0,1,0], - [-np.sin(angles[1]),0,np.cos(angles[1])]]) - Rz = np.array([[np.cos(angles[2]),-np.sin(angles[2]),0], - [np.sin(angles[2]),np.cos(angles[2]),0], - [0,0,1]]) - R = np.dot(Rz, np.dot(Ry,Rx)) - shape_pc = batch_data[k, ...] - rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), R) - return rotated_data - - -def jitter_point_cloud(batch_data, sigma=0.01, clip=0.05): - """ Randomly jitter points. jittering is per point. - Input: - BxNx3 array, original batch of point clouds - Return: - BxNx3 array, jittered batch of point clouds - """ - B, N, C = batch_data.shape - assert(clip > 0) - jittered_data = np.clip(sigma * np.random.randn(B, N, C), -1*clip, clip) - jittered_data += batch_data - return jittered_data - -def shift_point_cloud(batch_data, shift_range=0.1): - """ Randomly shift point cloud. Shift is per point cloud. - Input: - BxNx3 array, original batch of point clouds - Return: - BxNx3 array, shifted batch of point clouds - """ - B, N, C = batch_data.shape - shifts = np.random.uniform(-shift_range, shift_range, (B,3)) - for batch_index in range(B): - batch_data[batch_index,:,:] += shifts[batch_index,:] - return batch_data - - -def random_scale_point_cloud(batch_data, scale_low=0.8, scale_high=1.25): - """ Randomly scale the point cloud. Scale is per point cloud. - Input: - BxNx3 array, original batch of point clouds - Return: - BxNx3 array, scaled batch of point clouds - """ - B, N, C = batch_data.shape - scales = np.random.uniform(scale_low, scale_high, B) - for batch_index in range(B): - batch_data[batch_index,:,:] *= scales[batch_index] - return batch_data - -def random_point_dropout(batch_pc, max_dropout_ratio=0.875): - ''' batch_pc: BxNx3 ''' - for b in range(batch_pc.shape[0]): - dropout_ratio = np.random.random()*max_dropout_ratio # 0~0.875 - drop_idx = np.where(np.random.random((batch_pc.shape[1]))<=dropout_ratio)[0] - if len(drop_idx)>0: - batch_pc[b,drop_idx,:] = batch_pc[b,0,:] # set to the first point - return batch_pc - - - -- Gitee From d52d19c271c85b85060b62fc555f308ac0df9cc2 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:30:33 +0000 Subject: [PATCH 45/69] 3d Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../tf_ops/interpolate.cpp" | 169 +++++++++++ .../tf_ops/tf_interpolate.cpp" | 267 ++++++++++++++++++ .../tf_ops/tf_interpolate.py" | 59 ++++ .../tf_ops/tf_interpolate_compile.sh" | 7 + .../tf_ops/tf_interpolate_op_test.py" | 24 ++ .../tf_ops/tf_interpolate_so.so" | Bin 0 -> 52296 bytes .../tf_ops/visu_interpolation.py" | 44 +++ 7 files changed, 570 insertions(+) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/interpolate.cpp" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate.cpp" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate.py" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate_compile.sh" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate_op_test.py" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate_so.so" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/visu_interpolation.py" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/interpolate.cpp" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/interpolate.cpp" new file mode 100644 index 0000000..b7d0dd0 --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/interpolate.cpp" @@ -0,0 +1,169 @@ +#include +#include +#include // memset +#include // rand, RAND_MAX +#include // sqrtf +#include +#include +using namespace std; +float randomf(){ + return (rand()+0.5)/(RAND_MAX+1.0); +} +static double get_time(){ + timespec tp; + clock_gettime(CLOCK_MONOTONIC,&tp); + return tp.tv_sec+tp.tv_nsec*1e-9; +} + +// Find three nearest neigbors with square distance +// input: xyz1 (b,n,3), xyz2(b,m,3) +// output: dist (b,n,3), idx (b,n,3) +void threenn_cpu(int b, int n, int m, const float *xyz1, const float *xyz2, float *dist, int *idx) { + for (int i=0;i +#include +#include // memset +#include // rand, RAND_MAX +#include // sqrtf +#include "tensorflow/core/framework/op.h" +#include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/shape_inference.h" +#include "tensorflow/core/framework/common_shape_fns.h" + +#pragma GCC diagnostic ignored "-Wunused-result" + +using namespace tensorflow; + +REGISTER_OP("ThreeNN") + .Input("xyz1: float32") + .Input("xyz2: float32") + .Output("dist: float32") + .Output("idx: int32") + .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { + c->set_output(0, c->input(0)); + c->set_output(1, c->input(0)); + return Status::OK(); + }); +REGISTER_OP("ThreeInterpolate") + .Input("points: float32") + .Input("idx: int32") + .Input("weight: float32") + .Output("out: float32") + .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { + ::tensorflow::shape_inference::ShapeHandle dims1; // (b,m,c) + c->WithRank(c->input(0), 3, &dims1); + ::tensorflow::shape_inference::ShapeHandle dims2; // (b,n,3) + c->WithRank(c->input(1), 3, &dims2); + // (b,n,c) + ::tensorflow::shape_inference::ShapeHandle output = c->MakeShape({c->Dim(dims1, 0), c->Dim(dims2, 1), c->Dim(dims1, 2)}); + c->set_output(0, output); + return Status::OK(); + }); +REGISTER_OP("ThreeInterpolateGrad") + .Input("points: float32") + .Input("idx: int32") + .Input("weight: float32") + .Input("grad_out: float32") + .Output("grad_points: float32") + .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { + c->set_output(0, c->input(0)); + return Status::OK(); + }); + +float randomf(){ + return (rand()+0.5)/(RAND_MAX+1.0); +} +static double get_time(){ + timespec tp; + clock_gettime(CLOCK_MONOTONIC,&tp); + return tp.tv_sec+tp.tv_nsec*1e-9; +} + +// Find three nearest neigbors with square distance +// input: xyz1 (b,n,3), xyz2(b,m,3) +// output: dist (b,n,3), idx (b,n,3) +void threenn_cpu(int b, int n, int m, const float *xyz1, const float *xyz2, float *dist, int *idx) { + for (int i=0;iinput(0); + OP_REQUIRES(context, xyz1_tensor.dims()==3 && xyz1_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeNN expects (b,n,3) xyz1 shape.")); + int b = xyz1_tensor.shape().dim_size(0); + int n = xyz1_tensor.shape().dim_size(1); + + const Tensor& xyz2_tensor = context->input(1); + OP_REQUIRES(context, xyz2_tensor.dims()==3 && xyz2_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeNN expects (b,m,3) xyz2 shape.")); + int m = xyz2_tensor.shape().dim_size(1); + + Tensor *dist_tensor = nullptr; + OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape{b,n,3}, &dist_tensor)); + Tensor *idx_tensor = nullptr; + OP_REQUIRES_OK(context, context->allocate_output(1, TensorShape{b,n,3}, &idx_tensor)); + + auto xyz1_flat = xyz1_tensor.flat(); + const float *xyz1 = &(xyz1_flat(0)); + auto xyz2_flat = xyz2_tensor.flat(); + const float *xyz2 = &(xyz2_flat(0)); + auto dist_flat = dist_tensor->flat(); + float *dist = &(dist_flat(0)); + auto idx_flat = idx_tensor->flat(); + int *idx = &(idx_flat(0)); + threenn_cpu(b,n,m,xyz1,xyz2,dist,idx); + } +}; +REGISTER_KERNEL_BUILDER(Name("ThreeNN").Device(DEVICE_CPU), ThreeNNOp); + + + +class ThreeInterpolateOp: public OpKernel{ + public: + explicit ThreeInterpolateOp(OpKernelConstruction * context):OpKernel(context){} + + void Compute(OpKernelContext * context) override { + const Tensor& points_tensor=context->input(0); + OP_REQUIRES(context, points_tensor.dims()==3, errors::InvalidArgument("ThreeInterpolate expects (b,m,c) points shape")); + int b = points_tensor.shape().dim_size(0); + int m = points_tensor.shape().dim_size(1); + int c = points_tensor.shape().dim_size(2); + + const Tensor& idx_tensor=context->input(1); + OP_REQUIRES(context,idx_tensor.dims()==3 && idx_tensor.shape().dim_size(0)==b && idx_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeInterpolate expects (b,n,3) idx shape")); + int n = idx_tensor.shape().dim_size(1); + const Tensor& weight_tensor=context->input(2); + OP_REQUIRES(context,weight_tensor.dims()==3 && weight_tensor.shape().dim_size(0)==b && weight_tensor.shape().dim_size(1)==n && weight_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeInterpolate expects (b,n,3) weight shape")); + + Tensor * out_tensor = nullptr; + OP_REQUIRES_OK(context, context->allocate_output(0,TensorShape{b,n,c}, &out_tensor)); + + auto points_flat = points_tensor.flat(); + const float *points = &(points_flat(0)); + auto idx_flat = idx_tensor.flat(); + const int *idx = &(idx_flat(0)); + auto weight_flat = weight_tensor.flat(); + const float *weight = &(weight_flat(0)); + auto out_flat = out_tensor->flat(); + float *out = &(out_flat(0)); + threeinterpolate_cpu(b,m,c,n,points,idx,weight,out); + } +}; +REGISTER_KERNEL_BUILDER(Name("ThreeInterpolate").Device(DEVICE_CPU),ThreeInterpolateOp); + + +class ThreeInterpolateGradOp: public OpKernel{ + public: + explicit ThreeInterpolateGradOp(OpKernelConstruction * context):OpKernel(context){} + + void Compute(OpKernelContext * context) override { + const Tensor& points_tensor=context->input(0); + OP_REQUIRES(context, points_tensor.dims()==3, errors::InvalidArgument("ThreeInterpolateGrad expects (b,m,c) points shape")); + int b = points_tensor.shape().dim_size(0); + int m = points_tensor.shape().dim_size(1); + int c = points_tensor.shape().dim_size(2); + + const Tensor& idx_tensor=context->input(1); + OP_REQUIRES(context,idx_tensor.dims()==3 && idx_tensor.shape().dim_size(0)==b, errors::InvalidArgument("ThreeInterpolateGrad expects (b,n,3) idx shape")); + int n = idx_tensor.shape().dim_size(1); + const Tensor& weight_tensor=context->input(2); + OP_REQUIRES(context,weight_tensor.dims()==3 && weight_tensor.shape().dim_size(0)==b && weight_tensor.shape().dim_size(1)==n && weight_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeInterpolateGrad expects (b,n,3) weight shape")); + + const Tensor& grad_out_tensor=context->input(3); + OP_REQUIRES(context,grad_out_tensor.dims()==3 && grad_out_tensor.shape().dim_size(0)==b && grad_out_tensor.shape().dim_size(1)==n && grad_out_tensor.shape().dim_size(2)==c, errors::InvalidArgument("ThreeInterpolateGrad expects (b,n,c) grad_out shape")); + + Tensor * grad_points_tensor = nullptr; + OP_REQUIRES_OK(context, context->allocate_output(0,TensorShape{b,m,c}, &grad_points_tensor)); + + auto points_flat = points_tensor.flat(); + const float *points = &(points_flat(0)); + auto idx_flat = idx_tensor.flat(); + const int *idx = &(idx_flat(0)); + auto weight_flat = weight_tensor.flat(); + const float *weight = &(weight_flat(0)); + auto grad_out_flat = grad_out_tensor.flat(); + const float *grad_out = &(grad_out_flat(0)); + auto grad_points_flat = grad_points_tensor->flat(); + float *grad_points = &(grad_points_flat(0)); + memset(grad_points, 0, sizeof(float)*b*m*c); + threeinterpolate_grad_cpu(b,n,c,m,grad_out,idx,weight,grad_points); + } +}; +REGISTER_KERNEL_BUILDER(Name("ThreeInterpolateGrad").Device(DEVICE_CPU),ThreeInterpolateGradOp); + + diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate.py" new file mode 100644 index 0000000..2ef1edd --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate.py" @@ -0,0 +1,59 @@ +import tensorflow as tf +from tensorflow.python.framework import ops +import sys +import os +BASE_DIR = os.path.dirname(__file__) +sys.path.append(BASE_DIR) +interpolate_module=tf.load_op_library(os.path.join(BASE_DIR, 'tf_interpolate_so.so')) +def three_nn(xyz1, xyz2): + ''' + Input: + xyz1: (b,n,3) float32 array, unknown points + xyz2: (b,m,3) float32 array, known points + Output: + dist: (b,n,3) float32 array, distances to known points + idx: (b,n,3) int32 array, indices to known points + ''' + return interpolate_module.three_nn(xyz1, xyz2) +ops.NoGradient('ThreeNN') +def three_interpolate(points, idx, weight): + ''' + Input: + points: (b,m,c) float32 array, known points + idx: (b,n,3) int32 array, indices to known points + weight: (b,n,3) float32 array, weights on known points + Output: + out: (b,n,c) float32 array, interpolated point values + ''' + return interpolate_module.three_interpolate(points, idx, weight) +@tf.RegisterGradient('ThreeInterpolate') +def _three_interpolate_grad(op, grad_out): + points = op.inputs[0] + idx = op.inputs[1] + weight = op.inputs[2] + return [interpolate_module.three_interpolate_grad(points, idx, weight, grad_out), None, None] + +if __name__=='__main__': + import numpy as np + import time + np.random.seed(100) + pts = np.random.random((32,128,64)).astype('float32') + tmp1 = np.random.random((32,512,3)).astype('float32') + tmp2 = np.random.random((32,128,3)).astype('float32') + with tf.device('/cpu:0'): + points = tf.constant(pts) + xyz1 = tf.constant(tmp1) + xyz2 = tf.constant(tmp2) + dist, idx = three_nn(xyz1, xyz2) + weight = tf.ones_like(dist)/3.0 + interpolated_points = three_interpolate(points, idx, weight) + with tf.Session('') as sess: + now = time.time() + for _ in range(100): + ret = sess.run(interpolated_points) + print (time.time() - now) + print (ret.shape, ret.dtype) + #print ret + + + diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate_compile.sh" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate_compile.sh" new file mode 100644 index 0000000..8af3cf7 --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate_compile.sh" @@ -0,0 +1,7 @@ +# TF1.2 +#g++ -std=c++11 tf_interpolate.cpp -o tf_interpolate_so.so -shared -fPIC -I /usr/local/lib/python2.7/dist-packages/tensorflow/include -I /usr/local/cuda-8.0/include -lcudart -L /usr/local/cuda-8.0/lib64/ -O2 -D_GLIBCXX_USE_CXX11_ABI=0 + +# TF1.4 +#g++ -std=c++11 tf_interpolate.cpp -o tf_interpolate_so.so -shared -fPIC -I /usr/local/lib/python2.7/dist-packages/tensorflow/include -I /usr/local/cuda-8.0/include -I /usr/local/lib/python2.7/dist-packages/tensorflow/include/external/nsync/public -lcudart -L /usr/local/cuda-8.0/lib64/ -L/usr/local/lib/python2.7/dist-packages/tensorflow -ltensorflow_framework -O2 -D_GLIBCXX_USE_CXX11_ABI=0 + +g++ -std=c++11 tf_interpolate.cpp -o tf_interpolate_so.so -shared -fPIC -I /home/cc/anaconda3/envs/tf1.8.0/lib/python3.6/site-packages/tensorflow/include -I /usr/local/cuda-9.0/include -I /home/cc/anaconda3/envs/tf1.8.0/lib/python3.6/site-packages/tensorflow/include/external/nsync/public -lcudart -L /usr/local/cuda-9.0/lib64/ -L/home/cc/anaconda3/envs/tf1.8.0/lib/python3.6/site-packages/tensorflow -l:libtensorflow_framework.so -O2 -D_GLIBCXX_USE_CXX11_ABI=0 diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate_op_test.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate_op_test.py" new file mode 100644 index 0000000..030456d --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate_op_test.py" @@ -0,0 +1,24 @@ +import tensorflow as tf +import numpy as np +from tf_interpolate import three_nn, three_interpolate + +class GroupPointTest(tf.test.TestCase): + def test(self): + pass + + def test_grad(self): + with self.test_session(): + points = tf.constant(np.random.random((1,8,16)).astype('float32')) + print (points) + xyz1 = tf.constant(np.random.random((1,128,3)).astype('float32')) + xyz2 = tf.constant(np.random.random((1,8,3)).astype('float32')) + dist, idx = three_nn(xyz1, xyz2) + weight = tf.ones_like(dist)/3.0 + interpolated_points = three_interpolate(points, idx, weight) + print(interpolated_points) + err = tf.test.compute_gradient_error(points, (1,8,16), interpolated_points, (1,128,16)) + print (err) + self.assertLess(err, 1e-4) + +if __name__=='__main__': + tf.test.main() diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate_so.so" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate_so.so" new file mode 100644 index 0000000000000000000000000000000000000000..66d67ca3c5d2c52602144e24fc4a95565690b9be GIT binary patch literal 52296 zcmeIb3wTu3)jxc4BM~tZ6>PkprM5;vpuW~prM_sbp!IUbU_n%*YRUh%_TFb^&&(OX zzW?w0p67c!I_$H4XYIAuUVH7eFXxh*E%v6|oE$|v@|3F-qV`SaNS(N_bDKCpQm51? zh4}Z&%DIfUAbBa{?4mvnX{n+Z89-%>!_TpSkw}f~B$3iZ9E(J%<;w*|(E1^|N~tj= zJt(T8rA6$jqU0l`-%fEoJuMS_T1vchINpl1)44Cq$c*$ZAx}%Sd|yMpgz7x&G5;+Q zSLccHve{*G9%s~2Q9P^LL|mfFv*ylMW`ycm9$I;C!Iw6F_LC_`magtGK7J47yb!;+ z_>o;s!q2Eon7Z!tndk58pVv5{B)?9{Yg9~hnFOOUx7?}}^(jR~MS16y=KrC6kt=uk zNw+F5)GCd}LghbCJri6^UJKGzB5=&dZvlP_@mq|a6Tjv7t-!AxzfSzR@VgGbmH5%o zjh`RC0DeLIuE#Hg-zxmp;77-eJYoOdgnw_wuMfXl@uQ<3zxDY2`dd%W+_JN_V13z( z+wOnXI^%Rl|7DY>PpbDiW2;ABJW%}6$MN$PFMNOAY5SWSE_m8HV`tmLTOQn6x%5Hv zr24hLSyFbO>ek-(UM&0T9@o_$wruNv>dMXc|15Co%$H4jUi_x7Y02PMa{?`Y`pNZA z9l87AL%$2;{A|8w>RZ--O}fQ4dFuT)zIRXI6`eUhGe5q5=inu`jeIrq>%A*(dU^3V zKfU%B#ovk@eSGNs;^Z-zd4_C68{p+5hAA?WJKDL+Hs zp46R&v%h!8b!0J$>4;==pJ$a*Ls;o}4QY-1P8$S?u}WS@;LW>&v+= zi#|`nAN0ySEsOkJS^Vn3EbY4_OS$t=s9w3>&*J}kv*=l#1>cyZKi-g~+?%rKe_a;) zcW054lcj&XmBp_PWzqjq^b5WIIF`kKuFoPTi~&>6&Sz%nclT$h@AFyY&&Xnjnk@Zs zZ5ID2%i<@`WwFD>S@fKjh5y_v@#C&EjvaEOGn#Ec)z(pXjxF zQ_^z~H!xCrUTs7~y?(4tz`CKZL)Xr%Y5FVjfEEO2;JN zOwMdhLv4244*L*3vXs+Tp`GaXDSm`I()b4iUMFz489NZ>VgFN<{XPMRO4SN)@lqkN804_KI># zIjnq!l*$dJ$yq+0^N$FAqu@6~9?2<9v*)#ld(1v_IpckT|9!;a^ORy`Ux))nf&T*i zh4@S6aA2~)|BiB*9cXfj<58X{!|)%%6&D8zMY}jzz0hc3j-M>}pB8wDz(KNO3=AY^ z*EO7G6J(rDf!*}V=r|P)M0S2Ct-bb&au24pSEnd?wZr%99xv#?{L=y&^seYE~KTj)8QW}mreLT0x#`)@#fOO!K}O?MiF zWS+c>g!mn4ayAP7htl}JC*&U#@~fc-9p68N%h@FQ*KHhDt{5-smF92TMZJv8oUvTU z8D!;_X6SiemUb@^_(36Gi^+QtB=y?;ccT9eH}Z0C66M}4aAO0_sZ+N(>n7n+qF zl%9qaZakR+3(xMqcLJii-5YW`$hs zQi0hRsL*WJw1Tr<7lpTmE1mN^L9@w8jUA4J0&6Vu=CrzOZVSZwS9fPMYeR*_F}KxO zVba>PA>c>;9(TkW@FVkl|7x$l9m2ys-EahFXK$}F=m~`b?_O`3n@Fttw2AY$DbjjLeAG;eF9yjTc>c=NsteF+-Ts$r0^0x=N zJ60)l0&!+@gnZfWce;Z;Ua!|Nr=!*6baXh-@fKH@*hO#B{R9`WIp%m7C-GHf@O8p< z^qEsTB9o1lFi&fCt@Jc>dE6^og1pK$uh(L6w1(%+X>;NStzfw{1Jr2wE6gHqvnL!z zlL_O)h$2^Oz!$bqrS!|sl7CuLAhf_0;=TNH<*c3SS)-qRxyIesJ zbm>4O_q#o{joxkwspz8UK~V1no6{c% zb-R2{FT$dW6$ZA}D#&OIwxEXHp6>3zDi5XZ;2NrMYuH?Yp6%8`gU#W#Sm3^G&Q>#= zi^ZANNQJ2*5L)dDwI?JyBd$)KpM|Gdk^vPX9!qz3GCZ|L%+L>gAh6o$3v_zjPERNl z2B;@i&09Ytcw6y`R;cSREh#~mbovdW&_qj`^O6iHzt=nAq8N9^h^Pq!kCXsTZvRt8z+=-f^ zv}&g_z-^+ro1Q7>fpaRas^GS@IT4L4t<{J~a6C2`VEhQLb_LPJ!pvVZ{j{zuwW2l; zkbCzu(L&X&kx+vxf}w9NytAg>7wmG)?di@4{NN)e;bTMf=+Mm@uL}7bfa0NI}q}i zP4he*4S^nvTkVM%T1q_1V9T&@Qo|~Hpi_)0O|FQ`$H$cHLy0*(gPs906gV*^FE-HnxtKTG{0F`A~Sc-rv4}f;oCjL!dk8ig>X$X{GVbX79$N55rfh zb82VAXExP`LasHfKCjzjV@@8aauH(lEo#H)mEJ(2q-r*Hw%a1Ec`@@7}dY-aw)4fc7J zt7S7fro!ja=NM)>VSSQov}x3&Js9xQCp6M>BjV|eWHg?+DvhJc>xUpd2Tif3xr!Tz zxy(#g*keO|&1MO-SZdn6-OezUvYB((6Uoj8ifpbSdv}J~<|;NLgL69rJrPy|8lBP< zbkG`1^TKpLHdjh@p;caOBQ5c;L<@vkdLk_ycCTM7t&gpWsUgzagymOH2<nDq_c|tT)vQP#e3pHFntYsunls&s9kN-X4U9z_ zyLm4EN*>b8%&|mt>~*wOWUK))$B+z}wX{BCf@qphO|w8Rg5C|Dkkf~a8MrKhXELrQ zN1n{EW=>h>>P?5xb+CZe8xqshNtAJhD-k&+03R^PlOq`$!~EqQFjt60qR;O&_X(fmUks+gCw~tF%R1b z*v|^VQhcs?j7^Koo>k3v&ywOW>g%j_%%(=m0$YQ{+2ELe0z#@AJ*zM>>PpHev5Kwm zGuPJU%9U(q58E^pUe=hWX<>LC+d&MuLgotc5gYAlVCMnF!M?SL8f7-OqlsD|v1fR! z?WPl`r8XgLO__^j%m+zQo9zU67J$b|3uD&`?@Y*qsrK-L&2gFiEY+5se8> zB~xW!6AVL*u9#GXk|rUUt$Le_tgvhsTC>GSmMi;3XOQ-ru)(y_vqr4P6`yxSB(cBi z>J`lI(V>2 zHmjk*IknuR%(B~NHaIKFD-&lGMa*f-O(X#r7%j|&H+l-`B8*3?LYps9Ob8j&_X7SpL2d* zB0ncQc@*L?u>UXqQ&_3p>|O|eZpCrY8r&%;QoaX_jvQF(Msd9k^hB>0cVG&X2L#r0 zmKS$&^OZ*geb+bd!5y+Z<+oD)YsEE^7rl83Dqg5;7ubuBti&CfamsE%N2c6~Td%pw zUXfpWPEorbyZLI8g69>*eG(1dbrr{#r{O`tKP>JqY5W?k-EX1WFsI|E-FFEJIfO6A zPdl{xu7qEXpT@7b+;o_DRzbfualilV?{6!KTmhiePcyAWG zM8ex7Io2$AUl!ab;o{aflQvnxdnEZL2^Y6E+0_~ecT4<>B)nU~mrM9M3Gb3{zk~-R z{CWxRy~D^TbXxYTs&=VQ!Y61T()AKvEaAfvjz?<}$3_VkU7B6nB;j95a4X6~67G=j z%@V#$!XKCLDH1*+;R_^ur-WBX_$~>TZ?*1`@EH>SJ_)ara8<%*O87wu7q@KL)guyq zt;D}k#4Gau3naXB*vKdvq3D`+7$saDStd((p2m#SB;ndPN0)0P9DzA;)JZt`igs8f zTs#WFE;%Heo=et_MG}6i1|nT9;j1LPOTsbGB#xkj7@SPG~E#bQ)e42#sk?OUNO-S=e?!9iB-}3H>m|Hd z!ma)Li>$GNx2`cN*1?fT?$|DC|F)v7368O+yYKI4-=H zXd0s9yBK{w(KIy2M;Lt;(KIB-H#7P)qG>3OZ({U#qGLNKXc~IseT@DDs&$b& z#e#KRF3~h(#vP1)gJ>Em<8_RFg=iWg<0eMGKr~JD;zmY4OEe9Y@e)Qq zO>{BQ3Zu6WO;f%2!J_~!`~}e^MDJtt!$hA-^e#sKkZ2kL<0Fi|m*~@p-puGbiKd}2 zzKPMd5lus2e3;R<5=~Rccpsy0AeyF*@gSo^M3)l1oYB`2O+#PY!RQr4)6f>LWAtL8 z&nDW$=y^obP!=~bdN$ECRK-geJ(Fk}isA~RrxQ&>Q2gM(sQwj18;Rb>=!=P_At%0z z(dQFQLrr{y(Pt5T0nwWoeHzg;}8${C(60c+QD@4=K5jQdV1)^!lh#MLGEYUPn#7h|cG|@Cf#1%$w zA^LKn4}QklpJ)@&`xyN&(KHmqcQN{hMAMWzKEmjGiJnUIW=7vhGz|suO^m*cXqt-0 zhZ%h<(G=q2eT=?=XbSD|AfrP>*ATs&(bo}OOSFU0D~P609j{~bVxq4g+QjI2L{kWk z8yP*D=ov(pFnT7@6msJVqo)&1p*DW-Q`Y`O*Acys(H9d|3Y$AYqYVf$UI^l z9E;4d#%2{+qn}!bdR6NcGnPVF;G6ps_VJJ(^VG{**DOc=n45-A? z%oUq16g&D)rkHhTX<3o_I#=gE5qpUBX}T6GSTfH@q%}6#8vTd5jEacvPzOmY#{2W_ zLj~86?4d~~p;#FHHTdCgU`mU&OVD3elac*ndzd6NJD@N#G(b@M+>6nz4Zd zo+bqTjw`JGh0>v_YlOfvk^+Crirk_8ogks%LN0KC3!F{@&kzEALf`XP#-}$Ix4^)vYH%F?`7cV?$LK>z7zi~teWgl!>G`2 zT8+wl>rlbZky>kySfg)6&V-?FaDarurz{)Wm&~wiD@jSx$iIg24ONAhm?=k*-}=FZAFZb&@01jI0WL3i40)Fdd5#HEOR0v342wsE{p%OjFBlxF zpI_fLzcsqH>|iuf_MzeS80QK|z2e^9fdzgS&hw|(28{{#qvFr%d52M6~gCmC9&!QFpfhP5p)mWqd-5h<+K6Gi( z;0qrZO0FOo4f*u%oFc>RCh*#$Bj&>v^9$xNi}|k|^@hoZ=%07#pEpJ?Px^C*plU#>J3YG;mle@XMgy> zaLr%wgBsWT!7zF22ZlvY!TkegrbCx@Y|2yzMdbz zpl{;$9)5?d{aYtM@~LHI*8XQEG)9+wXzSm`5MSAm=uJoHTBPi=#_0UdY_)$g41R)< z#h&xNJrCw=Mw$~TtEunbk<(a<<}eKY7MGVSt6y5btp3`1=T>M!MMpozziPZR(Qjav z4`41NqvkRE`*)ZvemyR;UY4|%+}`%s+Oiru+M|xOyq#LU0=73ia%e|m*>37XR`+&l zZ))p3L-{uAMvdAR-D!B_=_IWV)sJDizI-~Ep!vfOiGlo~Ir^zR`bUfT z&ACIBH66q7hk8TF>GaQ%{8*1dC+?zHbAHTH^!26Cj7oJMHazkyoN;L4X4_C@pUu6^ z>i&am=*soBA^RX^6o?R%Z$d7bIlmAs`z(Icv`+z;Fb>3XoH#X~d?epmyQk-Q8`_Op ztUhOVbM0$}!4Ql`8t&_dEpn)N_So}$WvVUbDO=7CT-aUPNDXu`_@o9proG#RsnN;o zQG3}g!|iin@94qCp&Q0v^5@9ps35DIW@*@cRnpG;tnR0@E7bdRI$a-0(d_&*&h4?K zyq%9UV;Yxl1-Z-oUUSahU?*zmy#paEKjCVa{29-A|if0upA|A4w3?O~sSK1cB!28KzW%7=|%>_wnYC9M840MxPa z9|uDJaEKZue=|XwsUd8&PxbtcH^kNnIe%%c9W@LthJDcl&*hNb`&c_zb5H~Nr^cSw zUZ$e1&+)bxm)aJ?9he6XRV~NrY|CRbhTpQkkK$1D?-+$sW}>}-F%y0MeYE*d)gLGW zEn#V}{^SsaWXxwiz+iot`p<7@H-SQ?E&4ASUp_$4?UlqemUUsS9cwaK(WXV z1k)A(x{#wtEzKtEW+v<;A?)W&7=8aBZ0Ro^GW#lFRc@|eA>3)7+rVl|R$ z+rc-t>`~P)csVK9zpYp5MTz-_b;owJ=@{y))>GreB4o3}hQV2cf|^WUG*Hw56wXHV zXYA3#>PxJuVWahmFs7ZpvQopqFG=0O!x2n$%B}s|G0Nj|Y}JwYQ3Tjze3Z)jGZUhf z$HFuMrYLVBFV7a;ZjT;SJEZdL7(!9rKgbYNo;_NfQr^$AmFG$+@5eMOB=;haqu*iiB$@jSvH%HfLWR^f_me#^OFN*x%uKUab+IPdYL9-bZWqv84SGlY zrGTyglr+b%855zzauxB=@Ypv>;VGCq^6?7uV6Vj}riX>`KcccJ{J3tzbwq(r`4qBH z{V_F8bnlXQ9n-kLCFLK|$zPwA->cX^(Cd+Tu$^Ua)m&>1`Wg`xE&3V6ePoTkM0*Gs!4d%kk@7B>u|8H=W{q80 zrha3T)mUwT!Wgq@ffo_bXWZ(|NT?o|!YPuQ?Ql1dOY=Z#S`ki#j+nzn^V+-f7AG7iyr2q*N5CY`uAhG z*MdO)6>D^-ZOUubDSNC>9m&ID(lGD@g_WU7#kOMy78PUG+JlC5Bf#uYSYl$CI)TcJ zEyGCAT+|%hXN^rPvqhi51j#mKPjk*cBIB&FTw8RUt@hQ3(H?!84POVWnEPNP%r8^_ zrc!pIV)@AUe9KT{K~eLRx9zpBM^3dzci5wc?9sQ?1z@v9N1CxX9sFwq&bwFrI|{Z( zyP-%AMBP*ZH!76|5Y3-|kIszY_sWluDDftWF+8q3j#E0mVy)e7_{L>8L$vPsOLO!f zRy-E-d-1CPbqrf;KaQ-#z8TsT3rQO64Ffkply%Cd=EJ1N$zHoGh`X_7j71|HE=^f;bJ~jHHrZ3{jACQSe zA=lR7dTpe)Mb9a#-`;pmsWSQq7V#6YL#}=Wxs%Sv>P72mYGdBOhcW&69&O8G=0jhI zvqpb6nW&)~F&ZcK1}!_$p&O_kv4RaKe7j|%g39f%uwBRPmhUJy--Jq2`VdQLwYFAI?)XUEZs4<2$v|k4P&cn z&B1;gnIu+lHiVcD+hbiuTg-xd|3%}Nb!crK#1O}0i0A-xn=)YEykH|fgI zjShRP7Y<#AX$_T6cNT21S*lelH&*aVV%#wp6vgATH+kBZJYBDy_K%c;X;>q73~tm; zql2t2u>vQ_>>ueuiI3wV6m8~69T28}tl$EUAg&H}fkz$XdOvp+=^du=vunrTL%323 zqwpbxF+$j8;vqGngPSR<@$Ip(X3T{KXn%@^y{iF00AxQ+vxGX&QArO{ z!7)aPd2BukH4NUvS~sPe5$pOe?Kl72FG#ve@MbY$B{A=@A_vb&=~X0 zLr^(&4tWJ4EP2m*U&{jDuu~87z;IoR_}L;%tJBub^=vF@;Rv zqqn5-jR&84*&A@O=ytdm^W|8QtZoxK>;z-&bqtI!4ZK?&BhV%tek4(tJL}*Po<(7~K!61tQN);N@(r8}Tl& zk=UNmJmVFZX7ty#=#KbmSRCo-op5|>jMZBngltKpNxeG$DRhjViL7uD3nZg?T1p~D ze!DFPbD}%Cz=6_OWuAjV@r3|lGmi{e$D*^3jJ}%mh7+F;Hp0G%`Rd0M4Pc?M*O7_| z;ok-J6J~+ER!j&LcKH|+>L>S-95!Fw4P(=U`by-H^}`JCmi-&yXwrOOplm$=+QKMU zR%28!1Ig!IjJC)N+5N-85q3+l$4K`)h&y@DV=3-=kU;FAQ-5YVPD5+Uu!S%wIti1O zvq&k*|4ig*x^aZfgQ|77u%(5%F=}liRavd~T4M`Tbq!S$cs**a?td*+e_Hsc3t8o` z;(8vkleH83>qAvPAn_9sY%zsHbxGfhWz9gD*aLc&^c$)gr94*)l@3z@LsehGU7syq z6WAN9P<9h)7H<>v2heo%u&e;$($O(-Kf)SaU9`6F16ZXI<+FJFvAT3j)|lTftvZ0a zZ(H(DP+w-xp{f^2-v*&u6W6zsFiqczq`u!47Cit+Pv7XY3_aVSC)tzs(;TE;38{Cy z{j{&6wUFUP(&H?l+r!+x{e)dCuzLtY6JMLA5a!>5m}O6k6lwR1$ZjfiY!S!Vy!<1! z{JmxWCgzBVdF?&K7c*NR`@hfYwa$^Vi>%Qbb?p& zP9g4Z8wN)RzGWK~!B(rH{sKuVXo8jlQdHV+Gt;${#Ag zDz9+Nfth(qFv40;Uf!EgIA0%s*ve-<*fT-7dgcA)71IC4QhH(mclJp5Lx!H2DJs;T zpbDoZb{G(k)KRP()dwM-&u>KidCjxyV>tnRe*P-g`FJKt)n^Imb4F61#M%vubF6=` z*NgZJ-;UgDB)pNLIdgstNnvXZ~5pk*$X{RoLf7oJQPwQ<6h0W18c3m8?HnP~uv^h>y zQ!hdRix#u78QBwKvtjUdstm?wpedUr@S6jw@-pT!1qdl@++JE{MZEgysmOqkbl3k- z>r}-V`SO+rnRVY$|LY~PCb42);||=az%b9AE6O*~^5r$KQSNH!6Yqv(76(c0S|RsJ zF87;4?oWi=`H(yMAn5_Ux+wR_N4Z`IF^IQS%aLKrBZB+eFi*S(;xS%`>veSf=i>Tm z)_b{@hJm*rNz!!j<cVQnu+8Y3qi-;$#MPd6 z$r_6Z-8cGN0^Ln$Bcab?zNr^KC|pSKqp_?P!Q(WvHV+<~(4iroqQnmMx4V;30+VO; zCq#pR+L8RTME#D&!J-B>@phtyBneYG!{7zrWs;iRAFF<_+OWSicnZY~F6Dr_2q+Iu zhvAMQBpp!ewUR_MDpCVNFpEYZ@NyMPHK|T5JNloROH$o!m}u5wn-QbEVeo3YPNmZI zH(u0AoebEDE>miC=a*5#)={9Sv*TNRljS@hFT1O=ye$=23+{Q{{@dGQFMEqE;z7KwCbHop{VCuEI$tkdp%3=#ffdf*$ z-%vhua}&QA#IK0hvRwTSDCwYElX2<^sAY*Qx`zToBUW!$;UdKacz_fX?ja3t*+yO> z1qHGG$GoKm{jI&2OtM$qKvo>8dL2*RA>eHJGc)cx>H{y3PQ;2h@B^uLeRc`OfqyWA z?p4cC{%9ZD7Z7q^WrFsqm0WIA$bDYOZH8PP2L%6G!Jp6huMzw^1^>C=kB8CswEl?} zR41d8@Fs=;@cM zBUH{LRyf6lBF3Axzi^m@!_iFYoy7eP(+cZ>WJGv5dAdpc*PqF*aHK?VxPW=aUNsB@ zjK0bIjY7kIs_4~(y)pXy=yQqqq{Rc;>BlXd1L#4w?0*RLmf`{Cx&ROo#-I`p2|YBs z*FwTWyOJTnO1t*oCV=;5788u>V3GwRA&u9+6tY;~-415ncb9_E?k+2{)owEkJ_1;q zNvJcELYasXHJA{A2y#h6#F=1Z*-MkzKUVXYV9dj9wL1)h|AI@4p8v6`{)yU-CPQx% zOp^}^nWqZzq;HX7a2}+Qsf)~q)yI&76){5tnVRbN84mduCUBf6>N+aHO(Rn+wwvu` zlMRD;#DPf=Zu>t`)<;EBcoEUWUKXT4;{c47WDRoQQ;!nbTAhn7N@ zn7=GSplTdX_1LTKh6IpPP9pNIr+@jSS@s|nyb2vYJ?w|C2 zSMXO6e*;1d?a5i8UHqT_O3aUHkiQNYw)|6YuVzt%*U~WXtJL_m_#%pLXYlye1|luK zeI_DSi@J}>d5=Xbif@w{Z(4kNUT{0qUl8|`j5~XLI~VoPT$#ccHv7iZUaB+Ae<(%! zW0k;WMx*LQs0e06?~baYZ;!r7;|Rag#qU{TA!(02qOd;jgCnTB6yw-3)*8FaccD?i zV|Nj(Qt(7O%@_07jJ5WFVekm$?B801>pkz-V&5$z8RQt)Ip3>>psFo4P{x{=Cb^V_ z;zM)nr)B%VNjdgV3h`5zc=AI@epkAD;!emv4f09UP9bVUh$HV{-a)CRN#59KxXRPIl}z@>qHRYzu@iCdpgKHvP0sqwU>dV9UKJ8^e zd^Q^2BL+|6>ok{p{cc}RyGMrTvz076J3;Ag`g~=1*Z(W!Ccc()c{ueey;}JV^j&5n zefZkbKAkLNyb3lku3gK{VW0FbkWHVya=w41Kd{=54bD8S$xLW1?Q=%G^dcjVi@O8AwQ%KeShy5nm ze%#5lW>ZKPe^0m5?c4ZA?v>s(H<+gz@!5P=WNHOb z6-lZcpMFbH-uB+vfAVA?2>rX8R%Q{3j1m}zA~5;$&g?GLc>W= zXx0+UC`Jb&!GPHzG8#Sjp1&LBn7rar|D{u>7^#Xz=KJN@___sOg~k`@z}K}B-{miN z2T?|PrR0LTFLh5b@|tiFDCt=IOfgtA!F(e8+{%e8>GH`I$J2=naJ+o!x}6Yznr-$sOGpvfzcXQ6! zc~=Z$csJuh`_p4%KgD_bjo}&S(6WWmlI*>C<0tBE1r6J~mBgKf4U64=L@J^&xFX`dy?0NH-(BpHh^w8|et-N3aET z1nETVmYE7rK2myH{7vl9J&N=$Y}Ot`T87)Rrg11ADZM>DfVZBIKvX-jLq)9`rOF86_C|`EK1@NvtBd-StIxXP z;;CilQ%1_)h~N5W#>OB%ag^X}DSo@KXoh*%K^TRd=YVOvgbmp?;3mU4v zxMW>!eQ{}jp1s)U8DCu5SX@$HTvUIuq7Z)@eotupmmi;>Z279jZ#XtT(-%IRk2x3Som+rxAqU5&WDAFK7r;=0hax!7ncE}g~Lm=9RN_bAG}nD|fy7h0har^|sq z2mE!|N|{+)a!2mW;?mpmP{>gJ%;L!#3hIkZ>&Dd=*Yp?W-J3JM*i>IUd1kQ@OkkeL z?NEoT4?#`@v4H(up5w@wlanDvAsPE2V-w;9`ZynXCKYwUE6Si4 z*a*L6e^)74S4+a~jS0KYDt6>oqD=nF*w}qIS7xWycNOP{{6T)#_aY#$ggAXbh}Z%*?@d)e?u(AdH&C}a#=r{ zRlGSb_iNA_s-UMeo=kD12&R1#{HNiZ_X9hNBSw&9OBL6`IM4T{$XZamDUZb(EAltK zhS&@qWodDV5lIPHmjh$@X6WbZL%t`#!}?-daS5eXu_lT*88G^LT={V@X4q*YFV`(QnDt~6<;U1^DW0MT(0N*(7Wi_DoCQIN z^ixsR9>it`&hx*GvRK^U`jN~w$h1P{Z8+DHxk8rNcryLHWo13`-2XZ9EJvOo>_E>n z(ccivKbzNC#217R5nqyiZZF=IlXpgL@t&Mn#rtyRBdrH=t*+-oE3_ScyHUog7?bJm z;pHDiXH51b{W3;!^4`lS-kCEKvX>yOFCNKRqFcr+t&AGfeLco&`WtciFJ&ns87Ety zecoNU#VX1;n6ngVJ&RV^ln2{v$VW6;R{;C>k83QhOGNj2DZ0;MK3Id;Q1pAuyGS=X z*X*@u{&faslla8_Stc4B(uCT4ff}6QGN~3cgBX zUygEn4$r}n^KQve?npsop1Duu6#V)Wod+T z173gT3c}C0C+89ju{i~g<|^;!VEBD7w*Y_qk#aMxg1L0waOn#uXk%`{T}8^iT;%>F zx8UI-<>9X++V@3R>m((b-w!$8 zA6GDPlCpOkt{)y(@XATb9mM)zVZol0lqc!*rNV-jPErPn3U;5Q{PrX+=BrKALmtR0 z*ifjvn|HpV+?`+WgF@w%e4Gvz6#Sx4xqsY!+^D3n<$|e-;&J|HobnPi=es%1E#s8G zk_8Urh94WJ{4)=yx93+rj!)g@<8*I+<$dFn2dEO83o36PhmJ!JqJ43EVSz6!@P!4w zu)r4<_`(8TSl|l_d|`nvEbxT|GAy9|9SrU7UTA;kB7^Y@AjJY$v9Y6pK9q+;qv>yy z$Ta;;6q%;KfkMXwo+SPrJ090$$9^AA*elb=Dh?kO>9}f6pCQtVL|QGX@^KxiFBPv?-A*PB7IDx z+eP}4NZ%IeVUdoTCdwD-MIx;hX_H77h_pkbt3MfoDVNTk&w zZ4&7Mk#>l5l}Oi#^d6BuDALD7x?Q9%iS%ue9v11i>7sm*UL?|Lkv55Rfk-<9VOkj52@Sj0dbLr*JeS zQx+l*QFW&lSS23%n_F63lVHTCJVlFVsd&Dk#l2L#K+)nyDn3rp;!Y}Fn22Agc#)#T zxm5h5M0`)h$1A%;KTX9?R(1)yrs5NneY*H5iZ))P@)s*gw~X-fJjI~+=;lt^!R`q4 zq=6vwlv9=GZqma~ON>kM)ja&=D{VYV#V0CVVjN4wCnAk}!v$FF%3W`Wrm`u`KRJGuoSmKf=w62M{#g12a=%=R)+6ExkqZO^-A^F=pun~9 zcfG*1_7cOBLa%ZWe@PuqaM*dGyp5Fb<#im;@N-cx;hWR&Hh~vi!THznMESPBOBQiJ zYvz{)K9Yu)U;t<3Uc>p@g`7@-mo#yJ?#0mYr!4q149HB5;HUdFblfQLVWB78E1~0Y zq&aAp{}k;4_1LjP;18zZFAMyEG<;Ox_ow0i68MkO@N?0=RPGPcaJtP&_kGT>A% zZCV4yMgsqVz|-g3zXDGBXnw2NXB)#a``4(Dqs`;B z(m!VWnf)}I{%4?qIhbAU)ambf^(|xkr3pQ?a_JeO9G!mJ#PH1i-U1v?NoT~R6+(V` zzYYj|qi8Q}z8eG1?uBcLi1Y#A=P8JXVGd~XTDm8#$N!HkcsVA_di;yB;NQ-IKb!?W z0Nj|SJCP@2LVZ5`&@q=^yif4V0e%|r;YJRyXGCz?EBFsCFHSmT(8{Qvf#g9 z_yomN&&zvV1c*NYr+Pgk`USs==K+8lH9|_`S@0uFzQia_C!pi#=~>C}35v0SGrEL6 zOSAAlngt&PejZkl4za!w(`cn=f_}Ls;MBgRCA?hrJP1ysz)4Q8m~WH{{WoTj^Q@3# zZ03yY`6HaRo}yoFCvYk^IGghud7`Yz!e4~Yqi44(v*0VT;NQxEKbi&Kl?DFfUBls@3BT}1q$=b7kufXOdbN<tkKDP>fZN3VU9S`C} zPoEcroH`L#wRvC;Dy7GN9&pk#Jq`yMKb%qY>x)IX^c?{`IrnD4f5Y(1b;pxg_&)-U z|1*w@Q7F?h&7QM?>*+Hf_)9!o6g?Y6#~*-`{fCA9wf^{q;7_;Psc1MVw@#Fcu+NTK z;Ckh@3VxH|$MlaKtAOj(YjYMke-m;x3pqw1=br+1q}kzQYLGLs{fchCxV<;^K_-ydE81;Cki$1h}4_ z^iZW9ei@dydid-tco;b8Go0q2K*-EY3=2)BL%X)(Ypp zEyA2CqcvRVoaYIeP0nT~-VO@}!XC@KIjt_6+fp8Mg(BrgWns?!-* zaUI@r%yPZ*IM=I^*Js&VX4czL$(|KXn^Wh#M|UU^j`VbNl)II7PeT7lx6|zl_&s68 z>1+=;JAHu_E}yeK5(tHzuAW}S9q0~HmufGcR$W`8&*bcYVVo{<6DM9^4XshwyROdm zp6>27$Rbh9_$jPTXVbj;W{cA@x6z5{;cRPe(9T7bTAG?#Ep5)W`k8hMI2x~+TiFn(7bp}15aKMj7kF0U7Vm>8zk*bzp zqoTKuik`B9t_ksdrSd_l+wX0_`3m&E{72%DThraeFKfl-do=RlHxd z(#zX3U10<%$|HdmOATIZX0Ih%yvUEZR_k;|x!8-h;&wzo0&uu zUL(JVkN)9tb=%y)&F(IEa3tjNM#2~{*r;J4Hw@CNs4SE${I*Yra!xTQ1V>O}sEDj3G&kzVd~vR6>u z!5%LT$D9tw952KEp6)FeX3H3&slT5-8cf zx;xb>uy%J~l_zQLigGr9=!NWB_TD!IY4Wd{Mpwke2>7|X*F{B86w8#A>J5^IWpO+1 z&U(QfHE3IaCa;oRVMR|zc0Wk1ZJKM=u;zl+=4s8Yl^#~TmLTsoHlNpmfjK;HP8*sZ z0}?!ynodkrDok(|Pj?XgH{=N-lm|liV1p&2xMbgLte|Fft_X)I@P`m$DRr-Oy1Q07 z=_?7zHmIad5cWiph-ekrlGUvDh0RqqKl}PbeWF*wPqmV-um) zfG=$Ert8?Y0Io(&*y~#1U1c^;!@17|H6v(g1jvp6s)f#7Khx$km$L~Xx2JuqUD${M!~}eJ!1wTDE?7R_mvfed6;t!AW{$>(sVnKRI?Cd2GlW z+gc;lt1xc~glxxI&?Ms~jTEg@ofZo`z8SsPZdh!*^94zRUWK}KV+h4ry^pCf(TSv)scIo;S^xst765Co_KYs}NM0dpRk)nR^S zuCSp$*~EuM;E6D0ZNfz}8{E<4pa`wWu@IGQ)st@TL{O|Si-6ed35OBfL{AXYk`!Oa z>@PgprFv^ZvboA0=oC_$FpBulsx%Rxo|-W^%7{JT%J6De5D_Ar5+Bm*k*QM^tAUtQ zrRbkA22)RCaWRtk0FN$DiWGCDG`7)lwzg5s;{zdd&6WJ16!QM_k?SrH{ zIt{Mr#!npzFe%7B62NJb!#yT4nBpII#LqT*=tYBsA7qIj)0zUI1+EZxsJYoBPiw$Z zuLnCM=>c7fYSSe~a>PF8h?23qqiheH^Yl~zj)*3Kd zGw6+2(=dg?$I!x9vxOR55ixP8srLoDT=b==;~HMS=kPY53I1_rm&fj=@?7f}lc~gv zE_GcjCM#1bo4h_BHnqa_{`Lh}*C1qj$Rn{ygB2rNHQV@>(kcwDdh;60eyi9fgw408 z4O0Zx>E(@18XN*^w2jX6N@4C?jqMchz(cfYiQZg$9yq7+>A2v=78>oKwN_)%f(eSY zvf%4;Hjmb=v$iGozuhWRD9nlKU;!`IE>ft4=`$mq?AynSAhmNntKoa_K{1+V^Im3i zEzs(Zb<$pIqFBB4U^nK)nFE+Sg0KNqCz{j7gEs$9-*%8|ow7kQi#cVJ$3b&LZHAsMGZDb3vt*w`A{$$lf36*xbbn8oN*%K$y!tD1 z*_0U*iDS-3vy4-GR?2LmGa>q1<5L^@=>I+IWZo0VxJy&B&>QKR=kl+_;s9$(GYeQ^ zYrdE6H)J$*VlO0pa;}XKvV9Ydp=Z~O9lt6sn~l&WqW-AFcRJCRjuZH++&+&hWPvd& zoHk5j@uQn4$8@FCiIG+rz5OlW2DGPzpr81xAuX@u&92l%O=eppXPGItN!WZ-wqa(@ zj8?3rZ7J4tROf1%_pv(#*y#6RzaLw+v|o_8^^z=4XRE-}Xjx!uus9nW^Vv|8oC16< zPqrN^)TUN1Egr$ViG5%4&V{a!pCUM|{8G1o;U;?Rqm57h+gnEIuAk~5W)nW-$!y-_ zw>X+8I*M&Hihw83Xx#SM%#>J1Wt*|4?8RWwG*5pgEG1l~)_@hEl!w=JM_enAMnXL8 z($4V3T2DE4HOg1e=GJB2c7;)0u5g!9-oD0i2xg>d3p$m4TS25}bj zMU-;3I4Q?zIlj57l+zSlDW^L!U<$B>Z@H&S+=J3*1-H(+X;)O-BmdtdcnpBOQzjleNW3Sa zy)U9YU!avC9uv?=r5tDYq+meHuf0#ArMtv)HQMt;+H**n0J_hQM;7snI6t)aPPB9n z9_^rm$|X@+emoAsjyn8HvbFr$`zczgy@$t`@sChPBNZ#o=$$w%zxE!BmY$ZTzs9Gf z^O2w4vD0YneHSg=BuXOvNl#6G0W0k|p=(-x?Y%xN)!yq%x1UyjzsNsR2-M!6(NgWb zLduUvcM|1m`Fnv;74g_KJ2qFPrj$oMNLTrw$CBjy+WRw+WR6}dUAzaI{${0j0~ma*WLrtQapy7a-`Rvo?D{&YweeKU#OdB z^J#_^Qj8+ilK;SooL_sdNK5aci_(!^{*Q4@&ad8Z)k@@aG~G83lNUwaOD zlgOXWpJq;_5CCB&O`u4%{8Q3!Ex(+mRgvY`rImj=7l6mflm`4XeYE&R{ZrGObMk-p fJiyb}@SK=@Xys}cJsYQ&|FNx{qa-au8masrv^e4* literal 0 HcmV?d00001 diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/visu_interpolation.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/visu_interpolation.py" new file mode 100644 index 0000000..bc82820 --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/visu_interpolation.py" @@ -0,0 +1,44 @@ +''' Visualize part segmentation ''' +import os +import sys +ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +sys.path.append('/home/rqi/Projects/toolkits/visualization') +from show3d_balls import showpoints +import numpy as np +from tf_interpolate import three_nn, three_interpolate +import tensorflow as tf + + +pts2 = np.array([[0,0,1],[1,0,0],[0,1,0],[1,1,0]]).astype('float32') +xyz1 = np.random.random((100,3)).astype('float32') +xyz2 = np.array([[0,0,0],[1,0,0],[0,1,0],[1,1,1]]).astype('float32') + +def fun(xyz1,xyz2,pts2): + with tf.device('/cpu:0'): + points = tf.constant(np.expand_dims(pts2,0)) + xyz1 = tf.constant(np.expand_dims(xyz1,0)) + xyz2 = tf.constant(np.expand_dims(xyz2,0)) + dist, idx = three_nn(xyz1, xyz2) + #weight = tf.ones_like(dist)/3.0 + dist = tf.maximum(dist, 1e-10) + norm = tf.reduce_sum((1.0/dist),axis=2,keep_dims=True) + norm = tf.tile(norm, [1,1,3]) + print (norm) + weight = (1.0/dist) / norm + interpolated_points = three_interpolate(points, idx, weight) + with tf.Session('') as sess: + tmp,pts1,d,w = sess.run([xyz1, interpolated_points, dist, weight]) + #print w + pts1 = pts1.squeeze() + return pts1 + +pts1 = fun(xyz1,xyz2,pts2) +all_pts = np.zeros((104,3)) +all_pts[0:100,:] = pts1 +all_pts[100:,:] = pts2 +all_xyz = np.zeros((104,3)) +all_xyz[0:100,:]=xyz1 +all_xyz[100:,:]=xyz2 +showpoints(xyz2, pts2, ballradius=8) +showpoints(xyz1, pts1, ballradius=8) +showpoints(all_xyz, all_pts, ballradius=8) -- Gitee From 0ede0e3c89ce160077fd5d66fe6c81c3b135718f Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:31:06 +0000 Subject: [PATCH 46/69] =?UTF-8?q?=E6=96=B0=E5=BB=BA=203d=5Finterpolation?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../tf_ops/3d_interpolation/.keep" | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/.keep" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/.keep" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/.keep" new file mode 100644 index 0000000..e69de29 -- Gitee From d09575750846d44014ba942e7517f9a9ec653a07 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:31:25 +0000 Subject: [PATCH 47/69] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20co?= =?UTF-8?q?de/2022=5Fautumn/=E5=B4=94=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E?= =?UTF-8?q?3D=E7=82=B9=E4=BA=91=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0?= =?UTF-8?q?=E7=9A=84=E9=9C=87=E5=AE=B3=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86?= =?UTF-8?q?=E5=88=AB/tf=5Fops/interpolate.cpp?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../tf_ops/interpolate.cpp" | 169 ------------------ 1 file changed, 169 deletions(-) delete mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/interpolate.cpp" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/interpolate.cpp" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/interpolate.cpp" deleted file mode 100644 index b7d0dd0..0000000 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/interpolate.cpp" +++ /dev/null @@ -1,169 +0,0 @@ -#include -#include -#include // memset -#include // rand, RAND_MAX -#include // sqrtf -#include -#include -using namespace std; -float randomf(){ - return (rand()+0.5)/(RAND_MAX+1.0); -} -static double get_time(){ - timespec tp; - clock_gettime(CLOCK_MONOTONIC,&tp); - return tp.tv_sec+tp.tv_nsec*1e-9; -} - -// Find three nearest neigbors with square distance -// input: xyz1 (b,n,3), xyz2(b,m,3) -// output: dist (b,n,3), idx (b,n,3) -void threenn_cpu(int b, int n, int m, const float *xyz1, const float *xyz2, float *dist, int *idx) { - for (int i=0;i Date: Fri, 28 Oct 2022 11:31:31 +0000 Subject: [PATCH 48/69] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20co?= =?UTF-8?q?de/2022=5Fautumn/=E5=B4=94=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E?= =?UTF-8?q?3D=E7=82=B9=E4=BA=91=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0?= =?UTF-8?q?=E7=9A=84=E9=9C=87=E5=AE=B3=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86?= =?UTF-8?q?=E5=88=AB/tf=5Fops/tf=5Finterpolate.cpp?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../tf_ops/tf_interpolate.cpp" | 267 ------------------ 1 file changed, 267 deletions(-) delete mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate.cpp" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate.cpp" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate.cpp" deleted file mode 100644 index 75cf8c8..0000000 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate.cpp" +++ /dev/null @@ -1,267 +0,0 @@ -#include -#include -#include // memset -#include // rand, RAND_MAX -#include // sqrtf -#include "tensorflow/core/framework/op.h" -#include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/framework/shape_inference.h" -#include "tensorflow/core/framework/common_shape_fns.h" - -#pragma GCC diagnostic ignored "-Wunused-result" - -using namespace tensorflow; - -REGISTER_OP("ThreeNN") - .Input("xyz1: float32") - .Input("xyz2: float32") - .Output("dist: float32") - .Output("idx: int32") - .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { - c->set_output(0, c->input(0)); - c->set_output(1, c->input(0)); - return Status::OK(); - }); -REGISTER_OP("ThreeInterpolate") - .Input("points: float32") - .Input("idx: int32") - .Input("weight: float32") - .Output("out: float32") - .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { - ::tensorflow::shape_inference::ShapeHandle dims1; // (b,m,c) - c->WithRank(c->input(0), 3, &dims1); - ::tensorflow::shape_inference::ShapeHandle dims2; // (b,n,3) - c->WithRank(c->input(1), 3, &dims2); - // (b,n,c) - ::tensorflow::shape_inference::ShapeHandle output = c->MakeShape({c->Dim(dims1, 0), c->Dim(dims2, 1), c->Dim(dims1, 2)}); - c->set_output(0, output); - return Status::OK(); - }); -REGISTER_OP("ThreeInterpolateGrad") - .Input("points: float32") - .Input("idx: int32") - .Input("weight: float32") - .Input("grad_out: float32") - .Output("grad_points: float32") - .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { - c->set_output(0, c->input(0)); - return Status::OK(); - }); - -float randomf(){ - return (rand()+0.5)/(RAND_MAX+1.0); -} -static double get_time(){ - timespec tp; - clock_gettime(CLOCK_MONOTONIC,&tp); - return tp.tv_sec+tp.tv_nsec*1e-9; -} - -// Find three nearest neigbors with square distance -// input: xyz1 (b,n,3), xyz2(b,m,3) -// output: dist (b,n,3), idx (b,n,3) -void threenn_cpu(int b, int n, int m, const float *xyz1, const float *xyz2, float *dist, int *idx) { - for (int i=0;iinput(0); - OP_REQUIRES(context, xyz1_tensor.dims()==3 && xyz1_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeNN expects (b,n,3) xyz1 shape.")); - int b = xyz1_tensor.shape().dim_size(0); - int n = xyz1_tensor.shape().dim_size(1); - - const Tensor& xyz2_tensor = context->input(1); - OP_REQUIRES(context, xyz2_tensor.dims()==3 && xyz2_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeNN expects (b,m,3) xyz2 shape.")); - int m = xyz2_tensor.shape().dim_size(1); - - Tensor *dist_tensor = nullptr; - OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape{b,n,3}, &dist_tensor)); - Tensor *idx_tensor = nullptr; - OP_REQUIRES_OK(context, context->allocate_output(1, TensorShape{b,n,3}, &idx_tensor)); - - auto xyz1_flat = xyz1_tensor.flat(); - const float *xyz1 = &(xyz1_flat(0)); - auto xyz2_flat = xyz2_tensor.flat(); - const float *xyz2 = &(xyz2_flat(0)); - auto dist_flat = dist_tensor->flat(); - float *dist = &(dist_flat(0)); - auto idx_flat = idx_tensor->flat(); - int *idx = &(idx_flat(0)); - threenn_cpu(b,n,m,xyz1,xyz2,dist,idx); - } -}; -REGISTER_KERNEL_BUILDER(Name("ThreeNN").Device(DEVICE_CPU), ThreeNNOp); - - - -class ThreeInterpolateOp: public OpKernel{ - public: - explicit ThreeInterpolateOp(OpKernelConstruction * context):OpKernel(context){} - - void Compute(OpKernelContext * context) override { - const Tensor& points_tensor=context->input(0); - OP_REQUIRES(context, points_tensor.dims()==3, errors::InvalidArgument("ThreeInterpolate expects (b,m,c) points shape")); - int b = points_tensor.shape().dim_size(0); - int m = points_tensor.shape().dim_size(1); - int c = points_tensor.shape().dim_size(2); - - const Tensor& idx_tensor=context->input(1); - OP_REQUIRES(context,idx_tensor.dims()==3 && idx_tensor.shape().dim_size(0)==b && idx_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeInterpolate expects (b,n,3) idx shape")); - int n = idx_tensor.shape().dim_size(1); - const Tensor& weight_tensor=context->input(2); - OP_REQUIRES(context,weight_tensor.dims()==3 && weight_tensor.shape().dim_size(0)==b && weight_tensor.shape().dim_size(1)==n && weight_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeInterpolate expects (b,n,3) weight shape")); - - Tensor * out_tensor = nullptr; - OP_REQUIRES_OK(context, context->allocate_output(0,TensorShape{b,n,c}, &out_tensor)); - - auto points_flat = points_tensor.flat(); - const float *points = &(points_flat(0)); - auto idx_flat = idx_tensor.flat(); - const int *idx = &(idx_flat(0)); - auto weight_flat = weight_tensor.flat(); - const float *weight = &(weight_flat(0)); - auto out_flat = out_tensor->flat(); - float *out = &(out_flat(0)); - threeinterpolate_cpu(b,m,c,n,points,idx,weight,out); - } -}; -REGISTER_KERNEL_BUILDER(Name("ThreeInterpolate").Device(DEVICE_CPU),ThreeInterpolateOp); - - -class ThreeInterpolateGradOp: public OpKernel{ - public: - explicit ThreeInterpolateGradOp(OpKernelConstruction * context):OpKernel(context){} - - void Compute(OpKernelContext * context) override { - const Tensor& points_tensor=context->input(0); - OP_REQUIRES(context, points_tensor.dims()==3, errors::InvalidArgument("ThreeInterpolateGrad expects (b,m,c) points shape")); - int b = points_tensor.shape().dim_size(0); - int m = points_tensor.shape().dim_size(1); - int c = points_tensor.shape().dim_size(2); - - const Tensor& idx_tensor=context->input(1); - OP_REQUIRES(context,idx_tensor.dims()==3 && idx_tensor.shape().dim_size(0)==b, errors::InvalidArgument("ThreeInterpolateGrad expects (b,n,3) idx shape")); - int n = idx_tensor.shape().dim_size(1); - const Tensor& weight_tensor=context->input(2); - OP_REQUIRES(context,weight_tensor.dims()==3 && weight_tensor.shape().dim_size(0)==b && weight_tensor.shape().dim_size(1)==n && weight_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeInterpolateGrad expects (b,n,3) weight shape")); - - const Tensor& grad_out_tensor=context->input(3); - OP_REQUIRES(context,grad_out_tensor.dims()==3 && grad_out_tensor.shape().dim_size(0)==b && grad_out_tensor.shape().dim_size(1)==n && grad_out_tensor.shape().dim_size(2)==c, errors::InvalidArgument("ThreeInterpolateGrad expects (b,n,c) grad_out shape")); - - Tensor * grad_points_tensor = nullptr; - OP_REQUIRES_OK(context, context->allocate_output(0,TensorShape{b,m,c}, &grad_points_tensor)); - - auto points_flat = points_tensor.flat(); - const float *points = &(points_flat(0)); - auto idx_flat = idx_tensor.flat(); - const int *idx = &(idx_flat(0)); - auto weight_flat = weight_tensor.flat(); - const float *weight = &(weight_flat(0)); - auto grad_out_flat = grad_out_tensor.flat(); - const float *grad_out = &(grad_out_flat(0)); - auto grad_points_flat = grad_points_tensor->flat(); - float *grad_points = &(grad_points_flat(0)); - memset(grad_points, 0, sizeof(float)*b*m*c); - threeinterpolate_grad_cpu(b,n,c,m,grad_out,idx,weight,grad_points); - } -}; -REGISTER_KERNEL_BUILDER(Name("ThreeInterpolateGrad").Device(DEVICE_CPU),ThreeInterpolateGradOp); - - -- Gitee From b59bf3eeaae6b81ddc457cb12ef3bbd76432d858 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:31:36 +0000 Subject: [PATCH 49/69] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20co?= =?UTF-8?q?de/2022=5Fautumn/=E5=B4=94=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E?= =?UTF-8?q?3D=E7=82=B9=E4=BA=91=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0?= =?UTF-8?q?=E7=9A=84=E9=9C=87=E5=AE=B3=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86?= =?UTF-8?q?=E5=88=AB/tf=5Fops/tf=5Finterpolate.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../tf_ops/tf_interpolate.py" | 59 ------------------- 1 file changed, 59 deletions(-) delete mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate.py" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate.py" deleted file mode 100644 index 2ef1edd..0000000 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate.py" +++ /dev/null @@ -1,59 +0,0 @@ -import tensorflow as tf -from tensorflow.python.framework import ops -import sys -import os -BASE_DIR = os.path.dirname(__file__) -sys.path.append(BASE_DIR) -interpolate_module=tf.load_op_library(os.path.join(BASE_DIR, 'tf_interpolate_so.so')) -def three_nn(xyz1, xyz2): - ''' - Input: - xyz1: (b,n,3) float32 array, unknown points - xyz2: (b,m,3) float32 array, known points - Output: - dist: (b,n,3) float32 array, distances to known points - idx: (b,n,3) int32 array, indices to known points - ''' - return interpolate_module.three_nn(xyz1, xyz2) -ops.NoGradient('ThreeNN') -def three_interpolate(points, idx, weight): - ''' - Input: - points: (b,m,c) float32 array, known points - idx: (b,n,3) int32 array, indices to known points - weight: (b,n,3) float32 array, weights on known points - Output: - out: (b,n,c) float32 array, interpolated point values - ''' - return interpolate_module.three_interpolate(points, idx, weight) -@tf.RegisterGradient('ThreeInterpolate') -def _three_interpolate_grad(op, grad_out): - points = op.inputs[0] - idx = op.inputs[1] - weight = op.inputs[2] - return [interpolate_module.three_interpolate_grad(points, idx, weight, grad_out), None, None] - -if __name__=='__main__': - import numpy as np - import time - np.random.seed(100) - pts = np.random.random((32,128,64)).astype('float32') - tmp1 = np.random.random((32,512,3)).astype('float32') - tmp2 = np.random.random((32,128,3)).astype('float32') - with tf.device('/cpu:0'): - points = tf.constant(pts) - xyz1 = tf.constant(tmp1) - xyz2 = tf.constant(tmp2) - dist, idx = three_nn(xyz1, xyz2) - weight = tf.ones_like(dist)/3.0 - interpolated_points = three_interpolate(points, idx, weight) - with tf.Session('') as sess: - now = time.time() - for _ in range(100): - ret = sess.run(interpolated_points) - print (time.time() - now) - print (ret.shape, ret.dtype) - #print ret - - - -- Gitee From bdd3effe9b86a742e8c755abf46ad56ae9b1014a Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:31:41 +0000 Subject: [PATCH 50/69] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20co?= =?UTF-8?q?de/2022=5Fautumn/=E5=B4=94=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E?= =?UTF-8?q?3D=E7=82=B9=E4=BA=91=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0?= =?UTF-8?q?=E7=9A=84=E9=9C=87=E5=AE=B3=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86?= =?UTF-8?q?=E5=88=AB/tf=5Fops/tf=5Finterpolate=5Fcompile.sh?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../tf_ops/tf_interpolate_compile.sh" | 7 ------- 1 file changed, 7 deletions(-) delete mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate_compile.sh" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate_compile.sh" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate_compile.sh" deleted file mode 100644 index 8af3cf7..0000000 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate_compile.sh" +++ /dev/null @@ -1,7 +0,0 @@ -# TF1.2 -#g++ -std=c++11 tf_interpolate.cpp -o tf_interpolate_so.so -shared -fPIC -I /usr/local/lib/python2.7/dist-packages/tensorflow/include -I /usr/local/cuda-8.0/include -lcudart -L /usr/local/cuda-8.0/lib64/ -O2 -D_GLIBCXX_USE_CXX11_ABI=0 - -# TF1.4 -#g++ -std=c++11 tf_interpolate.cpp -o tf_interpolate_so.so -shared -fPIC -I /usr/local/lib/python2.7/dist-packages/tensorflow/include -I /usr/local/cuda-8.0/include -I /usr/local/lib/python2.7/dist-packages/tensorflow/include/external/nsync/public -lcudart -L /usr/local/cuda-8.0/lib64/ -L/usr/local/lib/python2.7/dist-packages/tensorflow -ltensorflow_framework -O2 -D_GLIBCXX_USE_CXX11_ABI=0 - -g++ -std=c++11 tf_interpolate.cpp -o tf_interpolate_so.so -shared -fPIC -I /home/cc/anaconda3/envs/tf1.8.0/lib/python3.6/site-packages/tensorflow/include -I /usr/local/cuda-9.0/include -I /home/cc/anaconda3/envs/tf1.8.0/lib/python3.6/site-packages/tensorflow/include/external/nsync/public -lcudart -L /usr/local/cuda-9.0/lib64/ -L/home/cc/anaconda3/envs/tf1.8.0/lib/python3.6/site-packages/tensorflow -l:libtensorflow_framework.so -O2 -D_GLIBCXX_USE_CXX11_ABI=0 -- Gitee From b28c228414f0d4693939bc73e02852d0fb757fa9 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:31:45 +0000 Subject: [PATCH 51/69] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20co?= =?UTF-8?q?de/2022=5Fautumn/=E5=B4=94=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E?= =?UTF-8?q?3D=E7=82=B9=E4=BA=91=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0?= =?UTF-8?q?=E7=9A=84=E9=9C=87=E5=AE=B3=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86?= =?UTF-8?q?=E5=88=AB/tf=5Fops/tf=5Finterpolate=5Fop=5Ftest.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../tf_ops/tf_interpolate_op_test.py" | 24 ------------------- 1 file changed, 24 deletions(-) delete mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate_op_test.py" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate_op_test.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate_op_test.py" deleted file mode 100644 index 030456d..0000000 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate_op_test.py" +++ /dev/null @@ -1,24 +0,0 @@ -import tensorflow as tf -import numpy as np -from tf_interpolate import three_nn, three_interpolate - -class GroupPointTest(tf.test.TestCase): - def test(self): - pass - - def test_grad(self): - with self.test_session(): - points = tf.constant(np.random.random((1,8,16)).astype('float32')) - print (points) - xyz1 = tf.constant(np.random.random((1,128,3)).astype('float32')) - xyz2 = tf.constant(np.random.random((1,8,3)).astype('float32')) - dist, idx = three_nn(xyz1, xyz2) - weight = tf.ones_like(dist)/3.0 - interpolated_points = three_interpolate(points, idx, weight) - print(interpolated_points) - err = tf.test.compute_gradient_error(points, (1,8,16), interpolated_points, (1,128,16)) - print (err) - self.assertLess(err, 1e-4) - -if __name__=='__main__': - tf.test.main() -- Gitee From 669b1a2641582e83dee981ba320ff59ba47f019b Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:31:50 +0000 Subject: [PATCH 52/69] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20co?= =?UTF-8?q?de/2022=5Fautumn/=E5=B4=94=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E?= =?UTF-8?q?3D=E7=82=B9=E4=BA=91=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0?= =?UTF-8?q?=E7=9A=84=E9=9C=87=E5=AE=B3=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86?= =?UTF-8?q?=E5=88=AB/tf=5Fops/tf=5Finterpolate=5Fso.so?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../tf_ops/tf_interpolate_so.so" | Bin 52296 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate_so.so" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate_so.so" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/tf_interpolate_so.so" deleted file mode 100644 index 66d67ca3c5d2c52602144e24fc4a95565690b9be..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 52296 zcmeIb3wTu3)jxc4BM~tZ6>PkprM5;vpuW~prM_sbp!IUbU_n%*YRUh%_TFb^&&(OX zzW?w0p67c!I_$H4XYIAuUVH7eFXxh*E%v6|oE$|v@|3F-qV`SaNS(N_bDKCpQm51? zh4}Z&%DIfUAbBa{?4mvnX{n+Z89-%>!_TpSkw}f~B$3iZ9E(J%<;w*|(E1^|N~tj= zJt(T8rA6$jqU0l`-%fEoJuMS_T1vchINpl1)44Cq$c*$ZAx}%Sd|yMpgz7x&G5;+Q zSLccHve{*G9%s~2Q9P^LL|mfFv*ylMW`ycm9$I;C!Iw6F_LC_`magtGK7J47yb!;+ z_>o;s!q2Eon7Z!tndk58pVv5{B)?9{Yg9~hnFOOUx7?}}^(jR~MS16y=KrC6kt=uk zNw+F5)GCd}LghbCJri6^UJKGzB5=&dZvlP_@mq|a6Tjv7t-!AxzfSzR@VgGbmH5%o zjh`RC0DeLIuE#Hg-zxmp;77-eJYoOdgnw_wuMfXl@uQ<3zxDY2`dd%W+_JN_V13z( z+wOnXI^%Rl|7DY>PpbDiW2;ABJW%}6$MN$PFMNOAY5SWSE_m8HV`tmLTOQn6x%5Hv zr24hLSyFbO>ek-(UM&0T9@o_$wruNv>dMXc|15Co%$H4jUi_x7Y02PMa{?`Y`pNZA z9l87AL%$2;{A|8w>RZ--O}fQ4dFuT)zIRXI6`eUhGe5q5=inu`jeIrq>%A*(dU^3V zKfU%B#ovk@eSGNs;^Z-zd4_C68{p+5hAA?WJKDL+Hs zp46R&v%h!8b!0J$>4;==pJ$a*Ls;o}4QY-1P8$S?u}WS@;LW>&v+= zi#|`nAN0ySEsOkJS^Vn3EbY4_OS$t=s9w3>&*J}kv*=l#1>cyZKi-g~+?%rKe_a;) zcW054lcj&XmBp_PWzqjq^b5WIIF`kKuFoPTi~&>6&Sz%nclT$h@AFyY&&Xnjnk@Zs zZ5ID2%i<@`WwFD>S@fKjh5y_v@#C&EjvaEOGn#Ec)z(pXjxF zQ_^z~H!xCrUTs7~y?(4tz`CKZL)Xr%Y5FVjfEEO2;JN zOwMdhLv4244*L*3vXs+Tp`GaXDSm`I()b4iUMFz489NZ>VgFN<{XPMRO4SN)@lqkN804_KI># zIjnq!l*$dJ$yq+0^N$FAqu@6~9?2<9v*)#ld(1v_IpckT|9!;a^ORy`Ux))nf&T*i zh4@S6aA2~)|BiB*9cXfj<58X{!|)%%6&D8zMY}jzz0hc3j-M>}pB8wDz(KNO3=AY^ z*EO7G6J(rDf!*}V=r|P)M0S2Ct-bb&au24pSEnd?wZr%99xv#?{L=y&^seYE~KTj)8QW}mreLT0x#`)@#fOO!K}O?MiF zWS+c>g!mn4ayAP7htl}JC*&U#@~fc-9p68N%h@FQ*KHhDt{5-smF92TMZJv8oUvTU z8D!;_X6SiemUb@^_(36Gi^+QtB=y?;ccT9eH}Z0C66M}4aAO0_sZ+N(>n7n+qF zl%9qaZakR+3(xMqcLJii-5YW`$hs zQi0hRsL*WJw1Tr<7lpTmE1mN^L9@w8jUA4J0&6Vu=CrzOZVSZwS9fPMYeR*_F}KxO zVba>PA>c>;9(TkW@FVkl|7x$l9m2ys-EahFXK$}F=m~`b?_O`3n@Fttw2AY$DbjjLeAG;eF9yjTc>c=NsteF+-Ts$r0^0x=N zJ60)l0&!+@gnZfWce;Z;Ua!|Nr=!*6baXh-@fKH@*hO#B{R9`WIp%m7C-GHf@O8p< z^qEsTB9o1lFi&fCt@Jc>dE6^og1pK$uh(L6w1(%+X>;NStzfw{1Jr2wE6gHqvnL!z zlL_O)h$2^Oz!$bqrS!|sl7CuLAhf_0;=TNH<*c3SS)-qRxyIesJ zbm>4O_q#o{joxkwspz8UK~V1no6{c% zb-R2{FT$dW6$ZA}D#&OIwxEXHp6>3zDi5XZ;2NrMYuH?Yp6%8`gU#W#Sm3^G&Q>#= zi^ZANNQJ2*5L)dDwI?JyBd$)KpM|Gdk^vPX9!qz3GCZ|L%+L>gAh6o$3v_zjPERNl z2B;@i&09Ytcw6y`R;cSREh#~mbovdW&_qj`^O6iHzt=nAq8N9^h^Pq!kCXsTZvRt8z+=-f^ zv}&g_z-^+ro1Q7>fpaRas^GS@IT4L4t<{J~a6C2`VEhQLb_LPJ!pvVZ{j{zuwW2l; zkbCzu(L&X&kx+vxf}w9NytAg>7wmG)?di@4{NN)e;bTMf=+Mm@uL}7bfa0NI}q}i zP4he*4S^nvTkVM%T1q_1V9T&@Qo|~Hpi_)0O|FQ`$H$cHLy0*(gPs906gV*^FE-HnxtKTG{0F`A~Sc-rv4}f;oCjL!dk8ig>X$X{GVbX79$N55rfh zb82VAXExP`LasHfKCjzjV@@8aauH(lEo#H)mEJ(2q-r*Hw%a1Ec`@@7}dY-aw)4fc7J zt7S7fro!ja=NM)>VSSQov}x3&Js9xQCp6M>BjV|eWHg?+DvhJc>xUpd2Tif3xr!Tz zxy(#g*keO|&1MO-SZdn6-OezUvYB((6Uoj8ifpbSdv}J~<|;NLgL69rJrPy|8lBP< zbkG`1^TKpLHdjh@p;caOBQ5c;L<@vkdLk_ycCTM7t&gpWsUgzagymOH2<nDq_c|tT)vQP#e3pHFntYsunls&s9kN-X4U9z_ zyLm4EN*>b8%&|mt>~*wOWUK))$B+z}wX{BCf@qphO|w8Rg5C|Dkkf~a8MrKhXELrQ zN1n{EW=>h>>P?5xb+CZe8xqshNtAJhD-k&+03R^PlOq`$!~EqQFjt60qR;O&_X(fmUks+gCw~tF%R1b z*v|^VQhcs?j7^Koo>k3v&ywOW>g%j_%%(=m0$YQ{+2ELe0z#@AJ*zM>>PpHev5Kwm zGuPJU%9U(q58E^pUe=hWX<>LC+d&MuLgotc5gYAlVCMnF!M?SL8f7-OqlsD|v1fR! z?WPl`r8XgLO__^j%m+zQo9zU67J$b|3uD&`?@Y*qsrK-L&2gFiEY+5se8> zB~xW!6AVL*u9#GXk|rUUt$Le_tgvhsTC>GSmMi;3XOQ-ru)(y_vqr4P6`yxSB(cBi z>J`lI(V>2 zHmjk*IknuR%(B~NHaIKFD-&lGMa*f-O(X#r7%j|&H+l-`B8*3?LYps9Ob8j&_X7SpL2d* zB0ncQc@*L?u>UXqQ&_3p>|O|eZpCrY8r&%;QoaX_jvQF(Msd9k^hB>0cVG&X2L#r0 zmKS$&^OZ*geb+bd!5y+Z<+oD)YsEE^7rl83Dqg5;7ubuBti&CfamsE%N2c6~Td%pw zUXfpWPEorbyZLI8g69>*eG(1dbrr{#r{O`tKP>JqY5W?k-EX1WFsI|E-FFEJIfO6A zPdl{xu7qEXpT@7b+;o_DRzbfualilV?{6!KTmhiePcyAWG zM8ex7Io2$AUl!ab;o{aflQvnxdnEZL2^Y6E+0_~ecT4<>B)nU~mrM9M3Gb3{zk~-R z{CWxRy~D^TbXxYTs&=VQ!Y61T()AKvEaAfvjz?<}$3_VkU7B6nB;j95a4X6~67G=j z%@V#$!XKCLDH1*+;R_^ur-WBX_$~>TZ?*1`@EH>SJ_)ara8<%*O87wu7q@KL)guyq zt;D}k#4Gau3naXB*vKdvq3D`+7$saDStd((p2m#SB;ndPN0)0P9DzA;)JZt`igs8f zTs#WFE;%Heo=et_MG}6i1|nT9;j1LPOTsbGB#xkj7@SPG~E#bQ)e42#sk?OUNO-S=e?!9iB-}3H>m|Hd z!ma)Li>$GNx2`cN*1?fT?$|DC|F)v7368O+yYKI4-=H zXd0s9yBK{w(KIy2M;Lt;(KIB-H#7P)qG>3OZ({U#qGLNKXc~IseT@DDs&$b& z#e#KRF3~h(#vP1)gJ>Em<8_RFg=iWg<0eMGKr~JD;zmY4OEe9Y@e)Qq zO>{BQ3Zu6WO;f%2!J_~!`~}e^MDJtt!$hA-^e#sKkZ2kL<0Fi|m*~@p-puGbiKd}2 zzKPMd5lus2e3;R<5=~Rccpsy0AeyF*@gSo^M3)l1oYB`2O+#PY!RQr4)6f>LWAtL8 z&nDW$=y^obP!=~bdN$ECRK-geJ(Fk}isA~RrxQ&>Q2gM(sQwj18;Rb>=!=P_At%0z z(dQFQLrr{y(Pt5T0nwWoeHzg;}8${C(60c+QD@4=K5jQdV1)^!lh#MLGEYUPn#7h|cG|@Cf#1%$w zA^LKn4}QklpJ)@&`xyN&(KHmqcQN{hMAMWzKEmjGiJnUIW=7vhGz|suO^m*cXqt-0 zhZ%h<(G=q2eT=?=XbSD|AfrP>*ATs&(bo}OOSFU0D~P609j{~bVxq4g+QjI2L{kWk z8yP*D=ov(pFnT7@6msJVqo)&1p*DW-Q`Y`O*Acys(H9d|3Y$AYqYVf$UI^l z9E;4d#%2{+qn}!bdR6NcGnPVF;G6ps_VJJ(^VG{**DOc=n45-A? z%oUq16g&D)rkHhTX<3o_I#=gE5qpUBX}T6GSTfH@q%}6#8vTd5jEacvPzOmY#{2W_ zLj~86?4d~~p;#FHHTdCgU`mU&OVD3elac*ndzd6NJD@N#G(b@M+>6nz4Zd zo+bqTjw`JGh0>v_YlOfvk^+Crirk_8ogks%LN0KC3!F{@&kzEALf`XP#-}$Ix4^)vYH%F?`7cV?$LK>z7zi~teWgl!>G`2 zT8+wl>rlbZky>kySfg)6&V-?FaDarurz{)Wm&~wiD@jSx$iIg24ONAhm?=k*-}=FZAFZb&@01jI0WL3i40)Fdd5#HEOR0v342wsE{p%OjFBlxF zpI_fLzcsqH>|iuf_MzeS80QK|z2e^9fdzgS&hw|(28{{#qvFr%d52M6~gCmC9&!QFpfhP5p)mWqd-5h<+K6Gi( z;0qrZO0FOo4f*u%oFc>RCh*#$Bj&>v^9$xNi}|k|^@hoZ=%07#pEpJ?Px^C*plU#>J3YG;mle@XMgy> zaLr%wgBsWT!7zF22ZlvY!TkegrbCx@Y|2yzMdbz zpl{;$9)5?d{aYtM@~LHI*8XQEG)9+wXzSm`5MSAm=uJoHTBPi=#_0UdY_)$g41R)< z#h&xNJrCw=Mw$~TtEunbk<(a<<}eKY7MGVSt6y5btp3`1=T>M!MMpozziPZR(Qjav z4`41NqvkRE`*)ZvemyR;UY4|%+}`%s+Oiru+M|xOyq#LU0=73ia%e|m*>37XR`+&l zZ))p3L-{uAMvdAR-D!B_=_IWV)sJDizI-~Ep!vfOiGlo~Ir^zR`bUfT z&ACIBH66q7hk8TF>GaQ%{8*1dC+?zHbAHTH^!26Cj7oJMHazkyoN;L4X4_C@pUu6^ z>i&am=*soBA^RX^6o?R%Z$d7bIlmAs`z(Icv`+z;Fb>3XoH#X~d?epmyQk-Q8`_Op ztUhOVbM0$}!4Ql`8t&_dEpn)N_So}$WvVUbDO=7CT-aUPNDXu`_@o9proG#RsnN;o zQG3}g!|iin@94qCp&Q0v^5@9ps35DIW@*@cRnpG;tnR0@E7bdRI$a-0(d_&*&h4?K zyq%9UV;Yxl1-Z-oUUSahU?*zmy#paEKjCVa{29-A|if0upA|A4w3?O~sSK1cB!28KzW%7=|%>_wnYC9M840MxPa z9|uDJaEKZue=|XwsUd8&PxbtcH^kNnIe%%c9W@LthJDcl&*hNb`&c_zb5H~Nr^cSw zUZ$e1&+)bxm)aJ?9he6XRV~NrY|CRbhTpQkkK$1D?-+$sW}>}-F%y0MeYE*d)gLGW zEn#V}{^SsaWXxwiz+iot`p<7@H-SQ?E&4ASUp_$4?UlqemUUsS9cwaK(WXV z1k)A(x{#wtEzKtEW+v<;A?)W&7=8aBZ0Ro^GW#lFRc@|eA>3)7+rVl|R$ z+rc-t>`~P)csVK9zpYp5MTz-_b;owJ=@{y))>GreB4o3}hQV2cf|^WUG*Hw56wXHV zXYA3#>PxJuVWahmFs7ZpvQopqFG=0O!x2n$%B}s|G0Nj|Y}JwYQ3Tjze3Z)jGZUhf z$HFuMrYLVBFV7a;ZjT;SJEZdL7(!9rKgbYNo;_NfQr^$AmFG$+@5eMOB=;haqu*iiB$@jSvH%HfLWR^f_me#^OFN*x%uKUab+IPdYL9-bZWqv84SGlY zrGTyglr+b%855zzauxB=@Ypv>;VGCq^6?7uV6Vj}riX>`KcccJ{J3tzbwq(r`4qBH z{V_F8bnlXQ9n-kLCFLK|$zPwA->cX^(Cd+Tu$^Ua)m&>1`Wg`xE&3V6ePoTkM0*Gs!4d%kk@7B>u|8H=W{q80 zrha3T)mUwT!Wgq@ffo_bXWZ(|NT?o|!YPuQ?Ql1dOY=Z#S`ki#j+nzn^V+-f7AG7iyr2q*N5CY`uAhG z*MdO)6>D^-ZOUubDSNC>9m&ID(lGD@g_WU7#kOMy78PUG+JlC5Bf#uYSYl$CI)TcJ zEyGCAT+|%hXN^rPvqhi51j#mKPjk*cBIB&FTw8RUt@hQ3(H?!84POVWnEPNP%r8^_ zrc!pIV)@AUe9KT{K~eLRx9zpBM^3dzci5wc?9sQ?1z@v9N1CxX9sFwq&bwFrI|{Z( zyP-%AMBP*ZH!76|5Y3-|kIszY_sWluDDftWF+8q3j#E0mVy)e7_{L>8L$vPsOLO!f zRy-E-d-1CPbqrf;KaQ-#z8TsT3rQO64Ffkply%Cd=EJ1N$zHoGh`X_7j71|HE=^f;bJ~jHHrZ3{jACQSe zA=lR7dTpe)Mb9a#-`;pmsWSQq7V#6YL#}=Wxs%Sv>P72mYGdBOhcW&69&O8G=0jhI zvqpb6nW&)~F&ZcK1}!_$p&O_kv4RaKe7j|%g39f%uwBRPmhUJy--Jq2`VdQLwYFAI?)XUEZs4<2$v|k4P&cn z&B1;gnIu+lHiVcD+hbiuTg-xd|3%}Nb!crK#1O}0i0A-xn=)YEykH|fgI zjShRP7Y<#AX$_T6cNT21S*lelH&*aVV%#wp6vgATH+kBZJYBDy_K%c;X;>q73~tm; zql2t2u>vQ_>>ueuiI3wV6m8~69T28}tl$EUAg&H}fkz$XdOvp+=^du=vunrTL%323 zqwpbxF+$j8;vqGngPSR<@$Ip(X3T{KXn%@^y{iF00AxQ+vxGX&QArO{ z!7)aPd2BukH4NUvS~sPe5$pOe?Kl72FG#ve@MbY$B{A=@A_vb&=~X0 zLr^(&4tWJ4EP2m*U&{jDuu~87z;IoR_}L;%tJBub^=vF@;Rv zqqn5-jR&84*&A@O=ytdm^W|8QtZoxK>;z-&bqtI!4ZK?&BhV%tek4(tJL}*Po<(7~K!61tQN);N@(r8}Tl& zk=UNmJmVFZX7ty#=#KbmSRCo-op5|>jMZBngltKpNxeG$DRhjViL7uD3nZg?T1p~D ze!DFPbD}%Cz=6_OWuAjV@r3|lGmi{e$D*^3jJ}%mh7+F;Hp0G%`Rd0M4Pc?M*O7_| z;ok-J6J~+ER!j&LcKH|+>L>S-95!Fw4P(=U`by-H^}`JCmi-&yXwrOOplm$=+QKMU zR%28!1Ig!IjJC)N+5N-85q3+l$4K`)h&y@DV=3-=kU;FAQ-5YVPD5+Uu!S%wIti1O zvq&k*|4ig*x^aZfgQ|77u%(5%F=}liRavd~T4M`Tbq!S$cs**a?td*+e_Hsc3t8o` z;(8vkleH83>qAvPAn_9sY%zsHbxGfhWz9gD*aLc&^c$)gr94*)l@3z@LsehGU7syq z6WAN9P<9h)7H<>v2heo%u&e;$($O(-Kf)SaU9`6F16ZXI<+FJFvAT3j)|lTftvZ0a zZ(H(DP+w-xp{f^2-v*&u6W6zsFiqczq`u!47Cit+Pv7XY3_aVSC)tzs(;TE;38{Cy z{j{&6wUFUP(&H?l+r!+x{e)dCuzLtY6JMLA5a!>5m}O6k6lwR1$ZjfiY!S!Vy!<1! z{JmxWCgzBVdF?&K7c*NR`@hfYwa$^Vi>%Qbb?p& zP9g4Z8wN)RzGWK~!B(rH{sKuVXo8jlQdHV+Gt;${#Ag zDz9+Nfth(qFv40;Uf!EgIA0%s*ve-<*fT-7dgcA)71IC4QhH(mclJp5Lx!H2DJs;T zpbDoZb{G(k)KRP()dwM-&u>KidCjxyV>tnRe*P-g`FJKt)n^Imb4F61#M%vubF6=` z*NgZJ-;UgDB)pNLIdgstNnvXZ~5pk*$X{RoLf7oJQPwQ<6h0W18c3m8?HnP~uv^h>y zQ!hdRix#u78QBwKvtjUdstm?wpedUr@S6jw@-pT!1qdl@++JE{MZEgysmOqkbl3k- z>r}-V`SO+rnRVY$|LY~PCb42);||=az%b9AE6O*~^5r$KQSNH!6Yqv(76(c0S|RsJ zF87;4?oWi=`H(yMAn5_Ux+wR_N4Z`IF^IQS%aLKrBZB+eFi*S(;xS%`>veSf=i>Tm z)_b{@hJm*rNz!!j<cVQnu+8Y3qi-;$#MPd6 z$r_6Z-8cGN0^Ln$Bcab?zNr^KC|pSKqp_?P!Q(WvHV+<~(4iroqQnmMx4V;30+VO; zCq#pR+L8RTME#D&!J-B>@phtyBneYG!{7zrWs;iRAFF<_+OWSicnZY~F6Dr_2q+Iu zhvAMQBpp!ewUR_MDpCVNFpEYZ@NyMPHK|T5JNloROH$o!m}u5wn-QbEVeo3YPNmZI zH(u0AoebEDE>miC=a*5#)={9Sv*TNRljS@hFT1O=ye$=23+{Q{{@dGQFMEqE;z7KwCbHop{VCuEI$tkdp%3=#ffdf*$ z-%vhua}&QA#IK0hvRwTSDCwYElX2<^sAY*Qx`zToBUW!$;UdKacz_fX?ja3t*+yO> z1qHGG$GoKm{jI&2OtM$qKvo>8dL2*RA>eHJGc)cx>H{y3PQ;2h@B^uLeRc`OfqyWA z?p4cC{%9ZD7Z7q^WrFsqm0WIA$bDYOZH8PP2L%6G!Jp6huMzw^1^>C=kB8CswEl?} zR41d8@Fs=;@cM zBUH{LRyf6lBF3Axzi^m@!_iFYoy7eP(+cZ>WJGv5dAdpc*PqF*aHK?VxPW=aUNsB@ zjK0bIjY7kIs_4~(y)pXy=yQqqq{Rc;>BlXd1L#4w?0*RLmf`{Cx&ROo#-I`p2|YBs z*FwTWyOJTnO1t*oCV=;5788u>V3GwRA&u9+6tY;~-415ncb9_E?k+2{)owEkJ_1;q zNvJcELYasXHJA{A2y#h6#F=1Z*-MkzKUVXYV9dj9wL1)h|AI@4p8v6`{)yU-CPQx% zOp^}^nWqZzq;HX7a2}+Qsf)~q)yI&76){5tnVRbN84mduCUBf6>N+aHO(Rn+wwvu` zlMRD;#DPf=Zu>t`)<;EBcoEUWUKXT4;{c47WDRoQQ;!nbTAhn7N@ zn7=GSplTdX_1LTKh6IpPP9pNIr+@jSS@s|nyb2vYJ?w|C2 zSMXO6e*;1d?a5i8UHqT_O3aUHkiQNYw)|6YuVzt%*U~WXtJL_m_#%pLXYlye1|luK zeI_DSi@J}>d5=Xbif@w{Z(4kNUT{0qUl8|`j5~XLI~VoPT$#ccHv7iZUaB+Ae<(%! zW0k;WMx*LQs0e06?~baYZ;!r7;|Rag#qU{TA!(02qOd;jgCnTB6yw-3)*8FaccD?i zV|Nj(Qt(7O%@_07jJ5WFVekm$?B801>pkz-V&5$z8RQt)Ip3>>psFo4P{x{=Cb^V_ z;zM)nr)B%VNjdgV3h`5zc=AI@epkAD;!emv4f09UP9bVUh$HV{-a)CRN#59KxXRPIl}z@>qHRYzu@iCdpgKHvP0sqwU>dV9UKJ8^e zd^Q^2BL+|6>ok{p{cc}RyGMrTvz076J3;Ag`g~=1*Z(W!Ccc()c{ueey;}JV^j&5n zefZkbKAkLNyb3lku3gK{VW0FbkWHVya=w41Kd{=54bD8S$xLW1?Q=%G^dcjVi@O8AwQ%KeShy5nm ze%#5lW>ZKPe^0m5?c4ZA?v>s(H<+gz@!5P=WNHOb z6-lZcpMFbH-uB+vfAVA?2>rX8R%Q{3j1m}zA~5;$&g?GLc>W= zXx0+UC`Jb&!GPHzG8#Sjp1&LBn7rar|D{u>7^#Xz=KJN@___sOg~k`@z}K}B-{miN z2T?|PrR0LTFLh5b@|tiFDCt=IOfgtA!F(e8+{%e8>GH`I$J2=naJ+o!x}6Yznr-$sOGpvfzcXQ6! zc~=Z$csJuh`_p4%KgD_bjo}&S(6WWmlI*>C<0tBE1r6J~mBgKf4U64=L@J^&xFX`dy?0NH-(BpHh^w8|et-N3aET z1nETVmYE7rK2myH{7vl9J&N=$Y}Ot`T87)Rrg11ADZM>DfVZBIKvX-jLq)9`rOF86_C|`EK1@NvtBd-StIxXP z;;CilQ%1_)h~N5W#>OB%ag^X}DSo@KXoh*%K^TRd=YVOvgbmp?;3mU4v zxMW>!eQ{}jp1s)U8DCu5SX@$HTvUIuq7Z)@eotupmmi;>Z279jZ#XtT(-%IRk2x3Som+rxAqU5&WDAFK7r;=0hax!7ncE}g~Lm=9RN_bAG}nD|fy7h0har^|sq z2mE!|N|{+)a!2mW;?mpmP{>gJ%;L!#3hIkZ>&Dd=*Yp?W-J3JM*i>IUd1kQ@OkkeL z?NEoT4?#`@v4H(up5w@wlanDvAsPE2V-w;9`ZynXCKYwUE6Si4 z*a*L6e^)74S4+a~jS0KYDt6>oqD=nF*w}qIS7xWycNOP{{6T)#_aY#$ggAXbh}Z%*?@d)e?u(AdH&C}a#=r{ zRlGSb_iNA_s-UMeo=kD12&R1#{HNiZ_X9hNBSw&9OBL6`IM4T{$XZamDUZb(EAltK zhS&@qWodDV5lIPHmjh$@X6WbZL%t`#!}?-daS5eXu_lT*88G^LT={V@X4q*YFV`(QnDt~6<;U1^DW0MT(0N*(7Wi_DoCQIN z^ixsR9>it`&hx*GvRK^U`jN~w$h1P{Z8+DHxk8rNcryLHWo13`-2XZ9EJvOo>_E>n z(ccivKbzNC#217R5nqyiZZF=IlXpgL@t&Mn#rtyRBdrH=t*+-oE3_ScyHUog7?bJm z;pHDiXH51b{W3;!^4`lS-kCEKvX>yOFCNKRqFcr+t&AGfeLco&`WtciFJ&ns87Ety zecoNU#VX1;n6ngVJ&RV^ln2{v$VW6;R{;C>k83QhOGNj2DZ0;MK3Id;Q1pAuyGS=X z*X*@u{&faslla8_Stc4B(uCT4ff}6QGN~3cgBX zUygEn4$r}n^KQve?npsop1Duu6#V)Wod+T z173gT3c}C0C+89ju{i~g<|^;!VEBD7w*Y_qk#aMxg1L0waOn#uXk%`{T}8^iT;%>F zx8UI-<>9X++V@3R>m((b-w!$8 zA6GDPlCpOkt{)y(@XATb9mM)zVZol0lqc!*rNV-jPErPn3U;5Q{PrX+=BrKALmtR0 z*ifjvn|HpV+?`+WgF@w%e4Gvz6#Sx4xqsY!+^D3n<$|e-;&J|HobnPi=es%1E#s8G zk_8Urh94WJ{4)=yx93+rj!)g@<8*I+<$dFn2dEO83o36PhmJ!JqJ43EVSz6!@P!4w zu)r4<_`(8TSl|l_d|`nvEbxT|GAy9|9SrU7UTA;kB7^Y@AjJY$v9Y6pK9q+;qv>yy z$Ta;;6q%;KfkMXwo+SPrJ090$$9^AA*elb=Dh?kO>9}f6pCQtVL|QGX@^KxiFBPv?-A*PB7IDx z+eP}4NZ%IeVUdoTCdwD-MIx;hX_H77h_pkbt3MfoDVNTk&w zZ4&7Mk#>l5l}Oi#^d6BuDALD7x?Q9%iS%ue9v11i>7sm*UL?|Lkv55Rfk-<9VOkj52@Sj0dbLr*JeS zQx+l*QFW&lSS23%n_F63lVHTCJVlFVsd&Dk#l2L#K+)nyDn3rp;!Y}Fn22Agc#)#T zxm5h5M0`)h$1A%;KTX9?R(1)yrs5NneY*H5iZ))P@)s*gw~X-fJjI~+=;lt^!R`q4 zq=6vwlv9=GZqma~ON>kM)ja&=D{VYV#V0CVVjN4wCnAk}!v$FF%3W`Wrm`u`KRJGuoSmKf=w62M{#g12a=%=R)+6ExkqZO^-A^F=pun~9 zcfG*1_7cOBLa%ZWe@PuqaM*dGyp5Fb<#im;@N-cx;hWR&Hh~vi!THznMESPBOBQiJ zYvz{)K9Yu)U;t<3Uc>p@g`7@-mo#yJ?#0mYr!4q149HB5;HUdFblfQLVWB78E1~0Y zq&aAp{}k;4_1LjP;18zZFAMyEG<;Ox_ow0i68MkO@N?0=RPGPcaJtP&_kGT>A% zZCV4yMgsqVz|-g3zXDGBXnw2NXB)#a``4(Dqs`;B z(m!VWnf)}I{%4?qIhbAU)ambf^(|xkr3pQ?a_JeO9G!mJ#PH1i-U1v?NoT~R6+(V` zzYYj|qi8Q}z8eG1?uBcLi1Y#A=P8JXVGd~XTDm8#$N!HkcsVA_di;yB;NQ-IKb!?W z0Nj|SJCP@2LVZ5`&@q=^yif4V0e%|r;YJRyXGCz?EBFsCFHSmT(8{Qvf#g9 z_yomN&&zvV1c*NYr+Pgk`USs==K+8lH9|_`S@0uFzQia_C!pi#=~>C}35v0SGrEL6 zOSAAlngt&PejZkl4za!w(`cn=f_}Ls;MBgRCA?hrJP1ysz)4Q8m~WH{{WoTj^Q@3# zZ03yY`6HaRo}yoFCvYk^IGghud7`Yz!e4~Yqi44(v*0VT;NQxEKbi&Kl?DFfUBls@3BT}1q$=b7kufXOdbN<tkKDP>fZN3VU9S`C} zPoEcroH`L#wRvC;Dy7GN9&pk#Jq`yMKb%qY>x)IX^c?{`IrnD4f5Y(1b;pxg_&)-U z|1*w@Q7F?h&7QM?>*+Hf_)9!o6g?Y6#~*-`{fCA9wf^{q;7_;Psc1MVw@#Fcu+NTK z;Ckh@3VxH|$MlaKtAOj(YjYMke-m;x3pqw1=br+1q}kzQYLGLs{fchCxV<;^K_-ydE81;Cki$1h}4_ z^iZW9ei@dydid-tco;b8Go0q2K*-EY3=2)BL%X)(Ypp zEyA2CqcvRVoaYIeP0nT~-VO@}!XC@KIjt_6+fp8Mg(BrgWns?!-* zaUI@r%yPZ*IM=I^*Js&VX4czL$(|KXn^Wh#M|UU^j`VbNl)II7PeT7lx6|zl_&s68 z>1+=;JAHu_E}yeK5(tHzuAW}S9q0~HmufGcR$W`8&*bcYVVo{<6DM9^4XshwyROdm zp6>27$Rbh9_$jPTXVbj;W{cA@x6z5{;cRPe(9T7bTAG?#Ep5)W`k8hMI2x~+TiFn(7bp}15aKMj7kF0U7Vm>8zk*bzp zqoTKuik`B9t_ksdrSd_l+wX0_`3m&E{72%DThraeFKfl-do=RlHxd z(#zX3U10<%$|HdmOATIZX0Ih%yvUEZR_k;|x!8-h;&wzo0&uu zUL(JVkN)9tb=%y)&F(IEa3tjNM#2~{*r;J4Hw@CNs4SE${I*Yra!xTQ1V>O}sEDj3G&kzVd~vR6>u z!5%LT$D9tw952KEp6)FeX3H3&slT5-8cf zx;xb>uy%J~l_zQLigGr9=!NWB_TD!IY4Wd{Mpwke2>7|X*F{B86w8#A>J5^IWpO+1 z&U(QfHE3IaCa;oRVMR|zc0Wk1ZJKM=u;zl+=4s8Yl^#~TmLTsoHlNpmfjK;HP8*sZ z0}?!ynodkrDok(|Pj?XgH{=N-lm|liV1p&2xMbgLte|Fft_X)I@P`m$DRr-Oy1Q07 z=_?7zHmIad5cWiph-ekrlGUvDh0RqqKl}PbeWF*wPqmV-um) zfG=$Ert8?Y0Io(&*y~#1U1c^;!@17|H6v(g1jvp6s)f#7Khx$km$L~Xx2JuqUD${M!~}eJ!1wTDE?7R_mvfed6;t!AW{$>(sVnKRI?Cd2GlW z+gc;lt1xc~glxxI&?Ms~jTEg@ofZo`z8SsPZdh!*^94zRUWK}KV+h4ry^pCf(TSv)scIo;S^xst765Co_KYs}NM0dpRk)nR^S zuCSp$*~EuM;E6D0ZNfz}8{E<4pa`wWu@IGQ)st@TL{O|Si-6ed35OBfL{AXYk`!Oa z>@PgprFv^ZvboA0=oC_$FpBulsx%Rxo|-W^%7{JT%J6De5D_Ar5+Bm*k*QM^tAUtQ zrRbkA22)RCaWRtk0FN$DiWGCDG`7)lwzg5s;{zdd&6WJ16!QM_k?SrH{ zIt{Mr#!npzFe%7B62NJb!#yT4nBpII#LqT*=tYBsA7qIj)0zUI1+EZxsJYoBPiw$Z zuLnCM=>c7fYSSe~a>PF8h?23qqiheH^Yl~zj)*3Kd zGw6+2(=dg?$I!x9vxOR55ixP8srLoDT=b==;~HMS=kPY53I1_rm&fj=@?7f}lc~gv zE_GcjCM#1bo4h_BHnqa_{`Lh}*C1qj$Rn{ygB2rNHQV@>(kcwDdh;60eyi9fgw408 z4O0Zx>E(@18XN*^w2jX6N@4C?jqMchz(cfYiQZg$9yq7+>A2v=78>oKwN_)%f(eSY zvf%4;Hjmb=v$iGozuhWRD9nlKU;!`IE>ft4=`$mq?AynSAhmNntKoa_K{1+V^Im3i zEzs(Zb<$pIqFBB4U^nK)nFE+Sg0KNqCz{j7gEs$9-*%8|ow7kQi#cVJ$3b&LZHAsMGZDb3vt*w`A{$$lf36*xbbn8oN*%K$y!tD1 z*_0U*iDS-3vy4-GR?2LmGa>q1<5L^@=>I+IWZo0VxJy&B&>QKR=kl+_;s9$(GYeQ^ zYrdE6H)J$*VlO0pa;}XKvV9Ydp=Z~O9lt6sn~l&WqW-AFcRJCRjuZH++&+&hWPvd& zoHk5j@uQn4$8@FCiIG+rz5OlW2DGPzpr81xAuX@u&92l%O=eppXPGItN!WZ-wqa(@ zj8?3rZ7J4tROf1%_pv(#*y#6RzaLw+v|o_8^^z=4XRE-}Xjx!uus9nW^Vv|8oC16< zPqrN^)TUN1Egr$ViG5%4&V{a!pCUM|{8G1o;U;?Rqm57h+gnEIuAk~5W)nW-$!y-_ zw>X+8I*M&Hihw83Xx#SM%#>J1Wt*|4?8RWwG*5pgEG1l~)_@hEl!w=JM_enAMnXL8 z($4V3T2DE4HOg1e=GJB2c7;)0u5g!9-oD0i2xg>d3p$m4TS25}bj zMU-;3I4Q?zIlj57l+zSlDW^L!U<$B>Z@H&S+=J3*1-H(+X;)O-BmdtdcnpBOQzjleNW3Sa zy)U9YU!avC9uv?=r5tDYq+meHuf0#ArMtv)HQMt;+H**n0J_hQM;7snI6t)aPPB9n z9_^rm$|X@+emoAsjyn8HvbFr$`zczgy@$t`@sChPBNZ#o=$$w%zxE!BmY$ZTzs9Gf z^O2w4vD0YneHSg=BuXOvNl#6G0W0k|p=(-x?Y%xN)!yq%x1UyjzsNsR2-M!6(NgWb zLduUvcM|1m`Fnv;74g_KJ2qFPrj$oMNLTrw$CBjy+WRw+WR6}dUAzaI{${0j0~ma*WLrtQapy7a-`Rvo?D{&YweeKU#OdB z^J#_^Qj8+ilK;SooL_sdNK5aci_(!^{*Q4@&ad8Z)k@@aG~G83lNUwaOD zlgOXWpJq;_5CCB&O`u4%{8Q3!Ex(+mRgvY`rImj=7l6mflm`4XeYE&R{ZrGObMk-p fJiyb}@SK=@Xys}cJsYQ&|FNx{qa-au8masrv^e4* -- Gitee From 30e050ed7a84049db96b58729b138c3fc37acc2a Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:31:54 +0000 Subject: [PATCH 53/69] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20co?= =?UTF-8?q?de/2022=5Fautumn/=E5=B4=94=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E?= =?UTF-8?q?3D=E7=82=B9=E4=BA=91=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0?= =?UTF-8?q?=E7=9A=84=E9=9C=87=E5=AE=B3=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86?= =?UTF-8?q?=E5=88=AB/tf=5Fops/visu=5Finterpolation.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../tf_ops/visu_interpolation.py" | 44 ------------------- 1 file changed, 44 deletions(-) delete mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/visu_interpolation.py" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/visu_interpolation.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/visu_interpolation.py" deleted file mode 100644 index bc82820..0000000 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/visu_interpolation.py" +++ /dev/null @@ -1,44 +0,0 @@ -''' Visualize part segmentation ''' -import os -import sys -ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -sys.path.append('/home/rqi/Projects/toolkits/visualization') -from show3d_balls import showpoints -import numpy as np -from tf_interpolate import three_nn, three_interpolate -import tensorflow as tf - - -pts2 = np.array([[0,0,1],[1,0,0],[0,1,0],[1,1,0]]).astype('float32') -xyz1 = np.random.random((100,3)).astype('float32') -xyz2 = np.array([[0,0,0],[1,0,0],[0,1,0],[1,1,1]]).astype('float32') - -def fun(xyz1,xyz2,pts2): - with tf.device('/cpu:0'): - points = tf.constant(np.expand_dims(pts2,0)) - xyz1 = tf.constant(np.expand_dims(xyz1,0)) - xyz2 = tf.constant(np.expand_dims(xyz2,0)) - dist, idx = three_nn(xyz1, xyz2) - #weight = tf.ones_like(dist)/3.0 - dist = tf.maximum(dist, 1e-10) - norm = tf.reduce_sum((1.0/dist),axis=2,keep_dims=True) - norm = tf.tile(norm, [1,1,3]) - print (norm) - weight = (1.0/dist) / norm - interpolated_points = three_interpolate(points, idx, weight) - with tf.Session('') as sess: - tmp,pts1,d,w = sess.run([xyz1, interpolated_points, dist, weight]) - #print w - pts1 = pts1.squeeze() - return pts1 - -pts1 = fun(xyz1,xyz2,pts2) -all_pts = np.zeros((104,3)) -all_pts[0:100,:] = pts1 -all_pts[100:,:] = pts2 -all_xyz = np.zeros((104,3)) -all_xyz[0:100,:]=xyz1 -all_xyz[100:,:]=xyz2 -showpoints(xyz2, pts2, ballradius=8) -showpoints(xyz1, pts1, ballradius=8) -showpoints(all_xyz, all_pts, ballradius=8) -- Gitee From 79522fd140267bc981a278a0f907719c428744de Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:32:32 +0000 Subject: [PATCH 54/69] =?UTF-8?q?3D-=E6=8F=92=E5=80=BC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../tf_ops/3d_interpolation/interpolate.cpp" | 169 +++++++++++ .../3d_interpolation/tf_interpolate.cpp" | 267 ++++++++++++++++++ .../3d_interpolation/tf_interpolate.py" | 59 ++++ .../tf_interpolate_compile.sh" | 7 + .../tf_interpolate_op_test.py" | 24 ++ .../3d_interpolation/tf_interpolate_so.so" | Bin 0 -> 52296 bytes .../3d_interpolation/visu_interpolation.py" | 44 +++ 7 files changed, 570 insertions(+) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/interpolate.cpp" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/tf_interpolate.cpp" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/tf_interpolate.py" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/tf_interpolate_compile.sh" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/tf_interpolate_op_test.py" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/tf_interpolate_so.so" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/visu_interpolation.py" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/interpolate.cpp" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/interpolate.cpp" new file mode 100644 index 0000000..b7d0dd0 --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/interpolate.cpp" @@ -0,0 +1,169 @@ +#include +#include +#include // memset +#include // rand, RAND_MAX +#include // sqrtf +#include +#include +using namespace std; +float randomf(){ + return (rand()+0.5)/(RAND_MAX+1.0); +} +static double get_time(){ + timespec tp; + clock_gettime(CLOCK_MONOTONIC,&tp); + return tp.tv_sec+tp.tv_nsec*1e-9; +} + +// Find three nearest neigbors with square distance +// input: xyz1 (b,n,3), xyz2(b,m,3) +// output: dist (b,n,3), idx (b,n,3) +void threenn_cpu(int b, int n, int m, const float *xyz1, const float *xyz2, float *dist, int *idx) { + for (int i=0;i +#include +#include // memset +#include // rand, RAND_MAX +#include // sqrtf +#include "tensorflow/core/framework/op.h" +#include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/shape_inference.h" +#include "tensorflow/core/framework/common_shape_fns.h" + +#pragma GCC diagnostic ignored "-Wunused-result" + +using namespace tensorflow; + +REGISTER_OP("ThreeNN") + .Input("xyz1: float32") + .Input("xyz2: float32") + .Output("dist: float32") + .Output("idx: int32") + .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { + c->set_output(0, c->input(0)); + c->set_output(1, c->input(0)); + return Status::OK(); + }); +REGISTER_OP("ThreeInterpolate") + .Input("points: float32") + .Input("idx: int32") + .Input("weight: float32") + .Output("out: float32") + .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { + ::tensorflow::shape_inference::ShapeHandle dims1; // (b,m,c) + c->WithRank(c->input(0), 3, &dims1); + ::tensorflow::shape_inference::ShapeHandle dims2; // (b,n,3) + c->WithRank(c->input(1), 3, &dims2); + // (b,n,c) + ::tensorflow::shape_inference::ShapeHandle output = c->MakeShape({c->Dim(dims1, 0), c->Dim(dims2, 1), c->Dim(dims1, 2)}); + c->set_output(0, output); + return Status::OK(); + }); +REGISTER_OP("ThreeInterpolateGrad") + .Input("points: float32") + .Input("idx: int32") + .Input("weight: float32") + .Input("grad_out: float32") + .Output("grad_points: float32") + .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { + c->set_output(0, c->input(0)); + return Status::OK(); + }); + +float randomf(){ + return (rand()+0.5)/(RAND_MAX+1.0); +} +static double get_time(){ + timespec tp; + clock_gettime(CLOCK_MONOTONIC,&tp); + return tp.tv_sec+tp.tv_nsec*1e-9; +} + +// Find three nearest neigbors with square distance +// input: xyz1 (b,n,3), xyz2(b,m,3) +// output: dist (b,n,3), idx (b,n,3) +void threenn_cpu(int b, int n, int m, const float *xyz1, const float *xyz2, float *dist, int *idx) { + for (int i=0;iinput(0); + OP_REQUIRES(context, xyz1_tensor.dims()==3 && xyz1_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeNN expects (b,n,3) xyz1 shape.")); + int b = xyz1_tensor.shape().dim_size(0); + int n = xyz1_tensor.shape().dim_size(1); + + const Tensor& xyz2_tensor = context->input(1); + OP_REQUIRES(context, xyz2_tensor.dims()==3 && xyz2_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeNN expects (b,m,3) xyz2 shape.")); + int m = xyz2_tensor.shape().dim_size(1); + + Tensor *dist_tensor = nullptr; + OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape{b,n,3}, &dist_tensor)); + Tensor *idx_tensor = nullptr; + OP_REQUIRES_OK(context, context->allocate_output(1, TensorShape{b,n,3}, &idx_tensor)); + + auto xyz1_flat = xyz1_tensor.flat(); + const float *xyz1 = &(xyz1_flat(0)); + auto xyz2_flat = xyz2_tensor.flat(); + const float *xyz2 = &(xyz2_flat(0)); + auto dist_flat = dist_tensor->flat(); + float *dist = &(dist_flat(0)); + auto idx_flat = idx_tensor->flat(); + int *idx = &(idx_flat(0)); + threenn_cpu(b,n,m,xyz1,xyz2,dist,idx); + } +}; +REGISTER_KERNEL_BUILDER(Name("ThreeNN").Device(DEVICE_CPU), ThreeNNOp); + + + +class ThreeInterpolateOp: public OpKernel{ + public: + explicit ThreeInterpolateOp(OpKernelConstruction * context):OpKernel(context){} + + void Compute(OpKernelContext * context) override { + const Tensor& points_tensor=context->input(0); + OP_REQUIRES(context, points_tensor.dims()==3, errors::InvalidArgument("ThreeInterpolate expects (b,m,c) points shape")); + int b = points_tensor.shape().dim_size(0); + int m = points_tensor.shape().dim_size(1); + int c = points_tensor.shape().dim_size(2); + + const Tensor& idx_tensor=context->input(1); + OP_REQUIRES(context,idx_tensor.dims()==3 && idx_tensor.shape().dim_size(0)==b && idx_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeInterpolate expects (b,n,3) idx shape")); + int n = idx_tensor.shape().dim_size(1); + const Tensor& weight_tensor=context->input(2); + OP_REQUIRES(context,weight_tensor.dims()==3 && weight_tensor.shape().dim_size(0)==b && weight_tensor.shape().dim_size(1)==n && weight_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeInterpolate expects (b,n,3) weight shape")); + + Tensor * out_tensor = nullptr; + OP_REQUIRES_OK(context, context->allocate_output(0,TensorShape{b,n,c}, &out_tensor)); + + auto points_flat = points_tensor.flat(); + const float *points = &(points_flat(0)); + auto idx_flat = idx_tensor.flat(); + const int *idx = &(idx_flat(0)); + auto weight_flat = weight_tensor.flat(); + const float *weight = &(weight_flat(0)); + auto out_flat = out_tensor->flat(); + float *out = &(out_flat(0)); + threeinterpolate_cpu(b,m,c,n,points,idx,weight,out); + } +}; +REGISTER_KERNEL_BUILDER(Name("ThreeInterpolate").Device(DEVICE_CPU),ThreeInterpolateOp); + + +class ThreeInterpolateGradOp: public OpKernel{ + public: + explicit ThreeInterpolateGradOp(OpKernelConstruction * context):OpKernel(context){} + + void Compute(OpKernelContext * context) override { + const Tensor& points_tensor=context->input(0); + OP_REQUIRES(context, points_tensor.dims()==3, errors::InvalidArgument("ThreeInterpolateGrad expects (b,m,c) points shape")); + int b = points_tensor.shape().dim_size(0); + int m = points_tensor.shape().dim_size(1); + int c = points_tensor.shape().dim_size(2); + + const Tensor& idx_tensor=context->input(1); + OP_REQUIRES(context,idx_tensor.dims()==3 && idx_tensor.shape().dim_size(0)==b, errors::InvalidArgument("ThreeInterpolateGrad expects (b,n,3) idx shape")); + int n = idx_tensor.shape().dim_size(1); + const Tensor& weight_tensor=context->input(2); + OP_REQUIRES(context,weight_tensor.dims()==3 && weight_tensor.shape().dim_size(0)==b && weight_tensor.shape().dim_size(1)==n && weight_tensor.shape().dim_size(2)==3, errors::InvalidArgument("ThreeInterpolateGrad expects (b,n,3) weight shape")); + + const Tensor& grad_out_tensor=context->input(3); + OP_REQUIRES(context,grad_out_tensor.dims()==3 && grad_out_tensor.shape().dim_size(0)==b && grad_out_tensor.shape().dim_size(1)==n && grad_out_tensor.shape().dim_size(2)==c, errors::InvalidArgument("ThreeInterpolateGrad expects (b,n,c) grad_out shape")); + + Tensor * grad_points_tensor = nullptr; + OP_REQUIRES_OK(context, context->allocate_output(0,TensorShape{b,m,c}, &grad_points_tensor)); + + auto points_flat = points_tensor.flat(); + const float *points = &(points_flat(0)); + auto idx_flat = idx_tensor.flat(); + const int *idx = &(idx_flat(0)); + auto weight_flat = weight_tensor.flat(); + const float *weight = &(weight_flat(0)); + auto grad_out_flat = grad_out_tensor.flat(); + const float *grad_out = &(grad_out_flat(0)); + auto grad_points_flat = grad_points_tensor->flat(); + float *grad_points = &(grad_points_flat(0)); + memset(grad_points, 0, sizeof(float)*b*m*c); + threeinterpolate_grad_cpu(b,n,c,m,grad_out,idx,weight,grad_points); + } +}; +REGISTER_KERNEL_BUILDER(Name("ThreeInterpolateGrad").Device(DEVICE_CPU),ThreeInterpolateGradOp); + + diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/tf_interpolate.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/tf_interpolate.py" new file mode 100644 index 0000000..2ef1edd --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/tf_interpolate.py" @@ -0,0 +1,59 @@ +import tensorflow as tf +from tensorflow.python.framework import ops +import sys +import os +BASE_DIR = os.path.dirname(__file__) +sys.path.append(BASE_DIR) +interpolate_module=tf.load_op_library(os.path.join(BASE_DIR, 'tf_interpolate_so.so')) +def three_nn(xyz1, xyz2): + ''' + Input: + xyz1: (b,n,3) float32 array, unknown points + xyz2: (b,m,3) float32 array, known points + Output: + dist: (b,n,3) float32 array, distances to known points + idx: (b,n,3) int32 array, indices to known points + ''' + return interpolate_module.three_nn(xyz1, xyz2) +ops.NoGradient('ThreeNN') +def three_interpolate(points, idx, weight): + ''' + Input: + points: (b,m,c) float32 array, known points + idx: (b,n,3) int32 array, indices to known points + weight: (b,n,3) float32 array, weights on known points + Output: + out: (b,n,c) float32 array, interpolated point values + ''' + return interpolate_module.three_interpolate(points, idx, weight) +@tf.RegisterGradient('ThreeInterpolate') +def _three_interpolate_grad(op, grad_out): + points = op.inputs[0] + idx = op.inputs[1] + weight = op.inputs[2] + return [interpolate_module.three_interpolate_grad(points, idx, weight, grad_out), None, None] + +if __name__=='__main__': + import numpy as np + import time + np.random.seed(100) + pts = np.random.random((32,128,64)).astype('float32') + tmp1 = np.random.random((32,512,3)).astype('float32') + tmp2 = np.random.random((32,128,3)).astype('float32') + with tf.device('/cpu:0'): + points = tf.constant(pts) + xyz1 = tf.constant(tmp1) + xyz2 = tf.constant(tmp2) + dist, idx = three_nn(xyz1, xyz2) + weight = tf.ones_like(dist)/3.0 + interpolated_points = three_interpolate(points, idx, weight) + with tf.Session('') as sess: + now = time.time() + for _ in range(100): + ret = sess.run(interpolated_points) + print (time.time() - now) + print (ret.shape, ret.dtype) + #print ret + + + diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/tf_interpolate_compile.sh" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/tf_interpolate_compile.sh" new file mode 100644 index 0000000..8af3cf7 --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/tf_interpolate_compile.sh" @@ -0,0 +1,7 @@ +# TF1.2 +#g++ -std=c++11 tf_interpolate.cpp -o tf_interpolate_so.so -shared -fPIC -I /usr/local/lib/python2.7/dist-packages/tensorflow/include -I /usr/local/cuda-8.0/include -lcudart -L /usr/local/cuda-8.0/lib64/ -O2 -D_GLIBCXX_USE_CXX11_ABI=0 + +# TF1.4 +#g++ -std=c++11 tf_interpolate.cpp -o tf_interpolate_so.so -shared -fPIC -I /usr/local/lib/python2.7/dist-packages/tensorflow/include -I /usr/local/cuda-8.0/include -I /usr/local/lib/python2.7/dist-packages/tensorflow/include/external/nsync/public -lcudart -L /usr/local/cuda-8.0/lib64/ -L/usr/local/lib/python2.7/dist-packages/tensorflow -ltensorflow_framework -O2 -D_GLIBCXX_USE_CXX11_ABI=0 + +g++ -std=c++11 tf_interpolate.cpp -o tf_interpolate_so.so -shared -fPIC -I /home/cc/anaconda3/envs/tf1.8.0/lib/python3.6/site-packages/tensorflow/include -I /usr/local/cuda-9.0/include -I /home/cc/anaconda3/envs/tf1.8.0/lib/python3.6/site-packages/tensorflow/include/external/nsync/public -lcudart -L /usr/local/cuda-9.0/lib64/ -L/home/cc/anaconda3/envs/tf1.8.0/lib/python3.6/site-packages/tensorflow -l:libtensorflow_framework.so -O2 -D_GLIBCXX_USE_CXX11_ABI=0 diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/tf_interpolate_op_test.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/tf_interpolate_op_test.py" new file mode 100644 index 0000000..030456d --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/tf_interpolate_op_test.py" @@ -0,0 +1,24 @@ +import tensorflow as tf +import numpy as np +from tf_interpolate import three_nn, three_interpolate + +class GroupPointTest(tf.test.TestCase): + def test(self): + pass + + def test_grad(self): + with self.test_session(): + points = tf.constant(np.random.random((1,8,16)).astype('float32')) + print (points) + xyz1 = tf.constant(np.random.random((1,128,3)).astype('float32')) + xyz2 = tf.constant(np.random.random((1,8,3)).astype('float32')) + dist, idx = three_nn(xyz1, xyz2) + weight = tf.ones_like(dist)/3.0 + interpolated_points = three_interpolate(points, idx, weight) + print(interpolated_points) + err = tf.test.compute_gradient_error(points, (1,8,16), interpolated_points, (1,128,16)) + print (err) + self.assertLess(err, 1e-4) + +if __name__=='__main__': + tf.test.main() diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/tf_interpolate_so.so" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/tf_interpolate_so.so" new file mode 100644 index 0000000000000000000000000000000000000000..66d67ca3c5d2c52602144e24fc4a95565690b9be GIT binary patch literal 52296 zcmeIb3wTu3)jxc4BM~tZ6>PkprM5;vpuW~prM_sbp!IUbU_n%*YRUh%_TFb^&&(OX zzW?w0p67c!I_$H4XYIAuUVH7eFXxh*E%v6|oE$|v@|3F-qV`SaNS(N_bDKCpQm51? zh4}Z&%DIfUAbBa{?4mvnX{n+Z89-%>!_TpSkw}f~B$3iZ9E(J%<;w*|(E1^|N~tj= zJt(T8rA6$jqU0l`-%fEoJuMS_T1vchINpl1)44Cq$c*$ZAx}%Sd|yMpgz7x&G5;+Q zSLccHve{*G9%s~2Q9P^LL|mfFv*ylMW`ycm9$I;C!Iw6F_LC_`magtGK7J47yb!;+ z_>o;s!q2Eon7Z!tndk58pVv5{B)?9{Yg9~hnFOOUx7?}}^(jR~MS16y=KrC6kt=uk zNw+F5)GCd}LghbCJri6^UJKGzB5=&dZvlP_@mq|a6Tjv7t-!AxzfSzR@VgGbmH5%o zjh`RC0DeLIuE#Hg-zxmp;77-eJYoOdgnw_wuMfXl@uQ<3zxDY2`dd%W+_JN_V13z( z+wOnXI^%Rl|7DY>PpbDiW2;ABJW%}6$MN$PFMNOAY5SWSE_m8HV`tmLTOQn6x%5Hv zr24hLSyFbO>ek-(UM&0T9@o_$wruNv>dMXc|15Co%$H4jUi_x7Y02PMa{?`Y`pNZA z9l87AL%$2;{A|8w>RZ--O}fQ4dFuT)zIRXI6`eUhGe5q5=inu`jeIrq>%A*(dU^3V zKfU%B#ovk@eSGNs;^Z-zd4_C68{p+5hAA?WJKDL+Hs zp46R&v%h!8b!0J$>4;==pJ$a*Ls;o}4QY-1P8$S?u}WS@;LW>&v+= zi#|`nAN0ySEsOkJS^Vn3EbY4_OS$t=s9w3>&*J}kv*=l#1>cyZKi-g~+?%rKe_a;) zcW054lcj&XmBp_PWzqjq^b5WIIF`kKuFoPTi~&>6&Sz%nclT$h@AFyY&&Xnjnk@Zs zZ5ID2%i<@`WwFD>S@fKjh5y_v@#C&EjvaEOGn#Ec)z(pXjxF zQ_^z~H!xCrUTs7~y?(4tz`CKZL)Xr%Y5FVjfEEO2;JN zOwMdhLv4244*L*3vXs+Tp`GaXDSm`I()b4iUMFz489NZ>VgFN<{XPMRO4SN)@lqkN804_KI># zIjnq!l*$dJ$yq+0^N$FAqu@6~9?2<9v*)#ld(1v_IpckT|9!;a^ORy`Ux))nf&T*i zh4@S6aA2~)|BiB*9cXfj<58X{!|)%%6&D8zMY}jzz0hc3j-M>}pB8wDz(KNO3=AY^ z*EO7G6J(rDf!*}V=r|P)M0S2Ct-bb&au24pSEnd?wZr%99xv#?{L=y&^seYE~KTj)8QW}mreLT0x#`)@#fOO!K}O?MiF zWS+c>g!mn4ayAP7htl}JC*&U#@~fc-9p68N%h@FQ*KHhDt{5-smF92TMZJv8oUvTU z8D!;_X6SiemUb@^_(36Gi^+QtB=y?;ccT9eH}Z0C66M}4aAO0_sZ+N(>n7n+qF zl%9qaZakR+3(xMqcLJii-5YW`$hs zQi0hRsL*WJw1Tr<7lpTmE1mN^L9@w8jUA4J0&6Vu=CrzOZVSZwS9fPMYeR*_F}KxO zVba>PA>c>;9(TkW@FVkl|7x$l9m2ys-EahFXK$}F=m~`b?_O`3n@Fttw2AY$DbjjLeAG;eF9yjTc>c=NsteF+-Ts$r0^0x=N zJ60)l0&!+@gnZfWce;Z;Ua!|Nr=!*6baXh-@fKH@*hO#B{R9`WIp%m7C-GHf@O8p< z^qEsTB9o1lFi&fCt@Jc>dE6^og1pK$uh(L6w1(%+X>;NStzfw{1Jr2wE6gHqvnL!z zlL_O)h$2^Oz!$bqrS!|sl7CuLAhf_0;=TNH<*c3SS)-qRxyIesJ zbm>4O_q#o{joxkwspz8UK~V1no6{c% zb-R2{FT$dW6$ZA}D#&OIwxEXHp6>3zDi5XZ;2NrMYuH?Yp6%8`gU#W#Sm3^G&Q>#= zi^ZANNQJ2*5L)dDwI?JyBd$)KpM|Gdk^vPX9!qz3GCZ|L%+L>gAh6o$3v_zjPERNl z2B;@i&09Ytcw6y`R;cSREh#~mbovdW&_qj`^O6iHzt=nAq8N9^h^Pq!kCXsTZvRt8z+=-f^ zv}&g_z-^+ro1Q7>fpaRas^GS@IT4L4t<{J~a6C2`VEhQLb_LPJ!pvVZ{j{zuwW2l; zkbCzu(L&X&kx+vxf}w9NytAg>7wmG)?di@4{NN)e;bTMf=+Mm@uL}7bfa0NI}q}i zP4he*4S^nvTkVM%T1q_1V9T&@Qo|~Hpi_)0O|FQ`$H$cHLy0*(gPs906gV*^FE-HnxtKTG{0F`A~Sc-rv4}f;oCjL!dk8ig>X$X{GVbX79$N55rfh zb82VAXExP`LasHfKCjzjV@@8aauH(lEo#H)mEJ(2q-r*Hw%a1Ec`@@7}dY-aw)4fc7J zt7S7fro!ja=NM)>VSSQov}x3&Js9xQCp6M>BjV|eWHg?+DvhJc>xUpd2Tif3xr!Tz zxy(#g*keO|&1MO-SZdn6-OezUvYB((6Uoj8ifpbSdv}J~<|;NLgL69rJrPy|8lBP< zbkG`1^TKpLHdjh@p;caOBQ5c;L<@vkdLk_ycCTM7t&gpWsUgzagymOH2<nDq_c|tT)vQP#e3pHFntYsunls&s9kN-X4U9z_ zyLm4EN*>b8%&|mt>~*wOWUK))$B+z}wX{BCf@qphO|w8Rg5C|Dkkf~a8MrKhXELrQ zN1n{EW=>h>>P?5xb+CZe8xqshNtAJhD-k&+03R^PlOq`$!~EqQFjt60qR;O&_X(fmUks+gCw~tF%R1b z*v|^VQhcs?j7^Koo>k3v&ywOW>g%j_%%(=m0$YQ{+2ELe0z#@AJ*zM>>PpHev5Kwm zGuPJU%9U(q58E^pUe=hWX<>LC+d&MuLgotc5gYAlVCMnF!M?SL8f7-OqlsD|v1fR! z?WPl`r8XgLO__^j%m+zQo9zU67J$b|3uD&`?@Y*qsrK-L&2gFiEY+5se8> zB~xW!6AVL*u9#GXk|rUUt$Le_tgvhsTC>GSmMi;3XOQ-ru)(y_vqr4P6`yxSB(cBi z>J`lI(V>2 zHmjk*IknuR%(B~NHaIKFD-&lGMa*f-O(X#r7%j|&H+l-`B8*3?LYps9Ob8j&_X7SpL2d* zB0ncQc@*L?u>UXqQ&_3p>|O|eZpCrY8r&%;QoaX_jvQF(Msd9k^hB>0cVG&X2L#r0 zmKS$&^OZ*geb+bd!5y+Z<+oD)YsEE^7rl83Dqg5;7ubuBti&CfamsE%N2c6~Td%pw zUXfpWPEorbyZLI8g69>*eG(1dbrr{#r{O`tKP>JqY5W?k-EX1WFsI|E-FFEJIfO6A zPdl{xu7qEXpT@7b+;o_DRzbfualilV?{6!KTmhiePcyAWG zM8ex7Io2$AUl!ab;o{aflQvnxdnEZL2^Y6E+0_~ecT4<>B)nU~mrM9M3Gb3{zk~-R z{CWxRy~D^TbXxYTs&=VQ!Y61T()AKvEaAfvjz?<}$3_VkU7B6nB;j95a4X6~67G=j z%@V#$!XKCLDH1*+;R_^ur-WBX_$~>TZ?*1`@EH>SJ_)ara8<%*O87wu7q@KL)guyq zt;D}k#4Gau3naXB*vKdvq3D`+7$saDStd((p2m#SB;ndPN0)0P9DzA;)JZt`igs8f zTs#WFE;%Heo=et_MG}6i1|nT9;j1LPOTsbGB#xkj7@SPG~E#bQ)e42#sk?OUNO-S=e?!9iB-}3H>m|Hd z!ma)Li>$GNx2`cN*1?fT?$|DC|F)v7368O+yYKI4-=H zXd0s9yBK{w(KIy2M;Lt;(KIB-H#7P)qG>3OZ({U#qGLNKXc~IseT@DDs&$b& z#e#KRF3~h(#vP1)gJ>Em<8_RFg=iWg<0eMGKr~JD;zmY4OEe9Y@e)Qq zO>{BQ3Zu6WO;f%2!J_~!`~}e^MDJtt!$hA-^e#sKkZ2kL<0Fi|m*~@p-puGbiKd}2 zzKPMd5lus2e3;R<5=~Rccpsy0AeyF*@gSo^M3)l1oYB`2O+#PY!RQr4)6f>LWAtL8 z&nDW$=y^obP!=~bdN$ECRK-geJ(Fk}isA~RrxQ&>Q2gM(sQwj18;Rb>=!=P_At%0z z(dQFQLrr{y(Pt5T0nwWoeHzg;}8${C(60c+QD@4=K5jQdV1)^!lh#MLGEYUPn#7h|cG|@Cf#1%$w zA^LKn4}QklpJ)@&`xyN&(KHmqcQN{hMAMWzKEmjGiJnUIW=7vhGz|suO^m*cXqt-0 zhZ%h<(G=q2eT=?=XbSD|AfrP>*ATs&(bo}OOSFU0D~P609j{~bVxq4g+QjI2L{kWk z8yP*D=ov(pFnT7@6msJVqo)&1p*DW-Q`Y`O*Acys(H9d|3Y$AYqYVf$UI^l z9E;4d#%2{+qn}!bdR6NcGnPVF;G6ps_VJJ(^VG{**DOc=n45-A? z%oUq16g&D)rkHhTX<3o_I#=gE5qpUBX}T6GSTfH@q%}6#8vTd5jEacvPzOmY#{2W_ zLj~86?4d~~p;#FHHTdCgU`mU&OVD3elac*ndzd6NJD@N#G(b@M+>6nz4Zd zo+bqTjw`JGh0>v_YlOfvk^+Crirk_8ogks%LN0KC3!F{@&kzEALf`XP#-}$Ix4^)vYH%F?`7cV?$LK>z7zi~teWgl!>G`2 zT8+wl>rlbZky>kySfg)6&V-?FaDarurz{)Wm&~wiD@jSx$iIg24ONAhm?=k*-}=FZAFZb&@01jI0WL3i40)Fdd5#HEOR0v342wsE{p%OjFBlxF zpI_fLzcsqH>|iuf_MzeS80QK|z2e^9fdzgS&hw|(28{{#qvFr%d52M6~gCmC9&!QFpfhP5p)mWqd-5h<+K6Gi( z;0qrZO0FOo4f*u%oFc>RCh*#$Bj&>v^9$xNi}|k|^@hoZ=%07#pEpJ?Px^C*plU#>J3YG;mle@XMgy> zaLr%wgBsWT!7zF22ZlvY!TkegrbCx@Y|2yzMdbz zpl{;$9)5?d{aYtM@~LHI*8XQEG)9+wXzSm`5MSAm=uJoHTBPi=#_0UdY_)$g41R)< z#h&xNJrCw=Mw$~TtEunbk<(a<<}eKY7MGVSt6y5btp3`1=T>M!MMpozziPZR(Qjav z4`41NqvkRE`*)ZvemyR;UY4|%+}`%s+Oiru+M|xOyq#LU0=73ia%e|m*>37XR`+&l zZ))p3L-{uAMvdAR-D!B_=_IWV)sJDizI-~Ep!vfOiGlo~Ir^zR`bUfT z&ACIBH66q7hk8TF>GaQ%{8*1dC+?zHbAHTH^!26Cj7oJMHazkyoN;L4X4_C@pUu6^ z>i&am=*soBA^RX^6o?R%Z$d7bIlmAs`z(Icv`+z;Fb>3XoH#X~d?epmyQk-Q8`_Op ztUhOVbM0$}!4Ql`8t&_dEpn)N_So}$WvVUbDO=7CT-aUPNDXu`_@o9proG#RsnN;o zQG3}g!|iin@94qCp&Q0v^5@9ps35DIW@*@cRnpG;tnR0@E7bdRI$a-0(d_&*&h4?K zyq%9UV;Yxl1-Z-oUUSahU?*zmy#paEKjCVa{29-A|if0upA|A4w3?O~sSK1cB!28KzW%7=|%>_wnYC9M840MxPa z9|uDJaEKZue=|XwsUd8&PxbtcH^kNnIe%%c9W@LthJDcl&*hNb`&c_zb5H~Nr^cSw zUZ$e1&+)bxm)aJ?9he6XRV~NrY|CRbhTpQkkK$1D?-+$sW}>}-F%y0MeYE*d)gLGW zEn#V}{^SsaWXxwiz+iot`p<7@H-SQ?E&4ASUp_$4?UlqemUUsS9cwaK(WXV z1k)A(x{#wtEzKtEW+v<;A?)W&7=8aBZ0Ro^GW#lFRc@|eA>3)7+rVl|R$ z+rc-t>`~P)csVK9zpYp5MTz-_b;owJ=@{y))>GreB4o3}hQV2cf|^WUG*Hw56wXHV zXYA3#>PxJuVWahmFs7ZpvQopqFG=0O!x2n$%B}s|G0Nj|Y}JwYQ3Tjze3Z)jGZUhf z$HFuMrYLVBFV7a;ZjT;SJEZdL7(!9rKgbYNo;_NfQr^$AmFG$+@5eMOB=;haqu*iiB$@jSvH%HfLWR^f_me#^OFN*x%uKUab+IPdYL9-bZWqv84SGlY zrGTyglr+b%855zzauxB=@Ypv>;VGCq^6?7uV6Vj}riX>`KcccJ{J3tzbwq(r`4qBH z{V_F8bnlXQ9n-kLCFLK|$zPwA->cX^(Cd+Tu$^Ua)m&>1`Wg`xE&3V6ePoTkM0*Gs!4d%kk@7B>u|8H=W{q80 zrha3T)mUwT!Wgq@ffo_bXWZ(|NT?o|!YPuQ?Ql1dOY=Z#S`ki#j+nzn^V+-f7AG7iyr2q*N5CY`uAhG z*MdO)6>D^-ZOUubDSNC>9m&ID(lGD@g_WU7#kOMy78PUG+JlC5Bf#uYSYl$CI)TcJ zEyGCAT+|%hXN^rPvqhi51j#mKPjk*cBIB&FTw8RUt@hQ3(H?!84POVWnEPNP%r8^_ zrc!pIV)@AUe9KT{K~eLRx9zpBM^3dzci5wc?9sQ?1z@v9N1CxX9sFwq&bwFrI|{Z( zyP-%AMBP*ZH!76|5Y3-|kIszY_sWluDDftWF+8q3j#E0mVy)e7_{L>8L$vPsOLO!f zRy-E-d-1CPbqrf;KaQ-#z8TsT3rQO64Ffkply%Cd=EJ1N$zHoGh`X_7j71|HE=^f;bJ~jHHrZ3{jACQSe zA=lR7dTpe)Mb9a#-`;pmsWSQq7V#6YL#}=Wxs%Sv>P72mYGdBOhcW&69&O8G=0jhI zvqpb6nW&)~F&ZcK1}!_$p&O_kv4RaKe7j|%g39f%uwBRPmhUJy--Jq2`VdQLwYFAI?)XUEZs4<2$v|k4P&cn z&B1;gnIu+lHiVcD+hbiuTg-xd|3%}Nb!crK#1O}0i0A-xn=)YEykH|fgI zjShRP7Y<#AX$_T6cNT21S*lelH&*aVV%#wp6vgATH+kBZJYBDy_K%c;X;>q73~tm; zql2t2u>vQ_>>ueuiI3wV6m8~69T28}tl$EUAg&H}fkz$XdOvp+=^du=vunrTL%323 zqwpbxF+$j8;vqGngPSR<@$Ip(X3T{KXn%@^y{iF00AxQ+vxGX&QArO{ z!7)aPd2BukH4NUvS~sPe5$pOe?Kl72FG#ve@MbY$B{A=@A_vb&=~X0 zLr^(&4tWJ4EP2m*U&{jDuu~87z;IoR_}L;%tJBub^=vF@;Rv zqqn5-jR&84*&A@O=ytdm^W|8QtZoxK>;z-&bqtI!4ZK?&BhV%tek4(tJL}*Po<(7~K!61tQN);N@(r8}Tl& zk=UNmJmVFZX7ty#=#KbmSRCo-op5|>jMZBngltKpNxeG$DRhjViL7uD3nZg?T1p~D ze!DFPbD}%Cz=6_OWuAjV@r3|lGmi{e$D*^3jJ}%mh7+F;Hp0G%`Rd0M4Pc?M*O7_| z;ok-J6J~+ER!j&LcKH|+>L>S-95!Fw4P(=U`by-H^}`JCmi-&yXwrOOplm$=+QKMU zR%28!1Ig!IjJC)N+5N-85q3+l$4K`)h&y@DV=3-=kU;FAQ-5YVPD5+Uu!S%wIti1O zvq&k*|4ig*x^aZfgQ|77u%(5%F=}liRavd~T4M`Tbq!S$cs**a?td*+e_Hsc3t8o` z;(8vkleH83>qAvPAn_9sY%zsHbxGfhWz9gD*aLc&^c$)gr94*)l@3z@LsehGU7syq z6WAN9P<9h)7H<>v2heo%u&e;$($O(-Kf)SaU9`6F16ZXI<+FJFvAT3j)|lTftvZ0a zZ(H(DP+w-xp{f^2-v*&u6W6zsFiqczq`u!47Cit+Pv7XY3_aVSC)tzs(;TE;38{Cy z{j{&6wUFUP(&H?l+r!+x{e)dCuzLtY6JMLA5a!>5m}O6k6lwR1$ZjfiY!S!Vy!<1! z{JmxWCgzBVdF?&K7c*NR`@hfYwa$^Vi>%Qbb?p& zP9g4Z8wN)RzGWK~!B(rH{sKuVXo8jlQdHV+Gt;${#Ag zDz9+Nfth(qFv40;Uf!EgIA0%s*ve-<*fT-7dgcA)71IC4QhH(mclJp5Lx!H2DJs;T zpbDoZb{G(k)KRP()dwM-&u>KidCjxyV>tnRe*P-g`FJKt)n^Imb4F61#M%vubF6=` z*NgZJ-;UgDB)pNLIdgstNnvXZ~5pk*$X{RoLf7oJQPwQ<6h0W18c3m8?HnP~uv^h>y zQ!hdRix#u78QBwKvtjUdstm?wpedUr@S6jw@-pT!1qdl@++JE{MZEgysmOqkbl3k- z>r}-V`SO+rnRVY$|LY~PCb42);||=az%b9AE6O*~^5r$KQSNH!6Yqv(76(c0S|RsJ zF87;4?oWi=`H(yMAn5_Ux+wR_N4Z`IF^IQS%aLKrBZB+eFi*S(;xS%`>veSf=i>Tm z)_b{@hJm*rNz!!j<cVQnu+8Y3qi-;$#MPd6 z$r_6Z-8cGN0^Ln$Bcab?zNr^KC|pSKqp_?P!Q(WvHV+<~(4iroqQnmMx4V;30+VO; zCq#pR+L8RTME#D&!J-B>@phtyBneYG!{7zrWs;iRAFF<_+OWSicnZY~F6Dr_2q+Iu zhvAMQBpp!ewUR_MDpCVNFpEYZ@NyMPHK|T5JNloROH$o!m}u5wn-QbEVeo3YPNmZI zH(u0AoebEDE>miC=a*5#)={9Sv*TNRljS@hFT1O=ye$=23+{Q{{@dGQFMEqE;z7KwCbHop{VCuEI$tkdp%3=#ffdf*$ z-%vhua}&QA#IK0hvRwTSDCwYElX2<^sAY*Qx`zToBUW!$;UdKacz_fX?ja3t*+yO> z1qHGG$GoKm{jI&2OtM$qKvo>8dL2*RA>eHJGc)cx>H{y3PQ;2h@B^uLeRc`OfqyWA z?p4cC{%9ZD7Z7q^WrFsqm0WIA$bDYOZH8PP2L%6G!Jp6huMzw^1^>C=kB8CswEl?} zR41d8@Fs=;@cM zBUH{LRyf6lBF3Axzi^m@!_iFYoy7eP(+cZ>WJGv5dAdpc*PqF*aHK?VxPW=aUNsB@ zjK0bIjY7kIs_4~(y)pXy=yQqqq{Rc;>BlXd1L#4w?0*RLmf`{Cx&ROo#-I`p2|YBs z*FwTWyOJTnO1t*oCV=;5788u>V3GwRA&u9+6tY;~-415ncb9_E?k+2{)owEkJ_1;q zNvJcELYasXHJA{A2y#h6#F=1Z*-MkzKUVXYV9dj9wL1)h|AI@4p8v6`{)yU-CPQx% zOp^}^nWqZzq;HX7a2}+Qsf)~q)yI&76){5tnVRbN84mduCUBf6>N+aHO(Rn+wwvu` zlMRD;#DPf=Zu>t`)<;EBcoEUWUKXT4;{c47WDRoQQ;!nbTAhn7N@ zn7=GSplTdX_1LTKh6IpPP9pNIr+@jSS@s|nyb2vYJ?w|C2 zSMXO6e*;1d?a5i8UHqT_O3aUHkiQNYw)|6YuVzt%*U~WXtJL_m_#%pLXYlye1|luK zeI_DSi@J}>d5=Xbif@w{Z(4kNUT{0qUl8|`j5~XLI~VoPT$#ccHv7iZUaB+Ae<(%! zW0k;WMx*LQs0e06?~baYZ;!r7;|Rag#qU{TA!(02qOd;jgCnTB6yw-3)*8FaccD?i zV|Nj(Qt(7O%@_07jJ5WFVekm$?B801>pkz-V&5$z8RQt)Ip3>>psFo4P{x{=Cb^V_ z;zM)nr)B%VNjdgV3h`5zc=AI@epkAD;!emv4f09UP9bVUh$HV{-a)CRN#59KxXRPIl}z@>qHRYzu@iCdpgKHvP0sqwU>dV9UKJ8^e zd^Q^2BL+|6>ok{p{cc}RyGMrTvz076J3;Ag`g~=1*Z(W!Ccc()c{ueey;}JV^j&5n zefZkbKAkLNyb3lku3gK{VW0FbkWHVya=w41Kd{=54bD8S$xLW1?Q=%G^dcjVi@O8AwQ%KeShy5nm ze%#5lW>ZKPe^0m5?c4ZA?v>s(H<+gz@!5P=WNHOb z6-lZcpMFbH-uB+vfAVA?2>rX8R%Q{3j1m}zA~5;$&g?GLc>W= zXx0+UC`Jb&!GPHzG8#Sjp1&LBn7rar|D{u>7^#Xz=KJN@___sOg~k`@z}K}B-{miN z2T?|PrR0LTFLh5b@|tiFDCt=IOfgtA!F(e8+{%e8>GH`I$J2=naJ+o!x}6Yznr-$sOGpvfzcXQ6! zc~=Z$csJuh`_p4%KgD_bjo}&S(6WWmlI*>C<0tBE1r6J~mBgKf4U64=L@J^&xFX`dy?0NH-(BpHh^w8|et-N3aET z1nETVmYE7rK2myH{7vl9J&N=$Y}Ot`T87)Rrg11ADZM>DfVZBIKvX-jLq)9`rOF86_C|`EK1@NvtBd-StIxXP z;;CilQ%1_)h~N5W#>OB%ag^X}DSo@KXoh*%K^TRd=YVOvgbmp?;3mU4v zxMW>!eQ{}jp1s)U8DCu5SX@$HTvUIuq7Z)@eotupmmi;>Z279jZ#XtT(-%IRk2x3Som+rxAqU5&WDAFK7r;=0hax!7ncE}g~Lm=9RN_bAG}nD|fy7h0har^|sq z2mE!|N|{+)a!2mW;?mpmP{>gJ%;L!#3hIkZ>&Dd=*Yp?W-J3JM*i>IUd1kQ@OkkeL z?NEoT4?#`@v4H(up5w@wlanDvAsPE2V-w;9`ZynXCKYwUE6Si4 z*a*L6e^)74S4+a~jS0KYDt6>oqD=nF*w}qIS7xWycNOP{{6T)#_aY#$ggAXbh}Z%*?@d)e?u(AdH&C}a#=r{ zRlGSb_iNA_s-UMeo=kD12&R1#{HNiZ_X9hNBSw&9OBL6`IM4T{$XZamDUZb(EAltK zhS&@qWodDV5lIPHmjh$@X6WbZL%t`#!}?-daS5eXu_lT*88G^LT={V@X4q*YFV`(QnDt~6<;U1^DW0MT(0N*(7Wi_DoCQIN z^ixsR9>it`&hx*GvRK^U`jN~w$h1P{Z8+DHxk8rNcryLHWo13`-2XZ9EJvOo>_E>n z(ccivKbzNC#217R5nqyiZZF=IlXpgL@t&Mn#rtyRBdrH=t*+-oE3_ScyHUog7?bJm z;pHDiXH51b{W3;!^4`lS-kCEKvX>yOFCNKRqFcr+t&AGfeLco&`WtciFJ&ns87Ety zecoNU#VX1;n6ngVJ&RV^ln2{v$VW6;R{;C>k83QhOGNj2DZ0;MK3Id;Q1pAuyGS=X z*X*@u{&faslla8_Stc4B(uCT4ff}6QGN~3cgBX zUygEn4$r}n^KQve?npsop1Duu6#V)Wod+T z173gT3c}C0C+89ju{i~g<|^;!VEBD7w*Y_qk#aMxg1L0waOn#uXk%`{T}8^iT;%>F zx8UI-<>9X++V@3R>m((b-w!$8 zA6GDPlCpOkt{)y(@XATb9mM)zVZol0lqc!*rNV-jPErPn3U;5Q{PrX+=BrKALmtR0 z*ifjvn|HpV+?`+WgF@w%e4Gvz6#Sx4xqsY!+^D3n<$|e-;&J|HobnPi=es%1E#s8G zk_8Urh94WJ{4)=yx93+rj!)g@<8*I+<$dFn2dEO83o36PhmJ!JqJ43EVSz6!@P!4w zu)r4<_`(8TSl|l_d|`nvEbxT|GAy9|9SrU7UTA;kB7^Y@AjJY$v9Y6pK9q+;qv>yy z$Ta;;6q%;KfkMXwo+SPrJ090$$9^AA*elb=Dh?kO>9}f6pCQtVL|QGX@^KxiFBPv?-A*PB7IDx z+eP}4NZ%IeVUdoTCdwD-MIx;hX_H77h_pkbt3MfoDVNTk&w zZ4&7Mk#>l5l}Oi#^d6BuDALD7x?Q9%iS%ue9v11i>7sm*UL?|Lkv55Rfk-<9VOkj52@Sj0dbLr*JeS zQx+l*QFW&lSS23%n_F63lVHTCJVlFVsd&Dk#l2L#K+)nyDn3rp;!Y}Fn22Agc#)#T zxm5h5M0`)h$1A%;KTX9?R(1)yrs5NneY*H5iZ))P@)s*gw~X-fJjI~+=;lt^!R`q4 zq=6vwlv9=GZqma~ON>kM)ja&=D{VYV#V0CVVjN4wCnAk}!v$FF%3W`Wrm`u`KRJGuoSmKf=w62M{#g12a=%=R)+6ExkqZO^-A^F=pun~9 zcfG*1_7cOBLa%ZWe@PuqaM*dGyp5Fb<#im;@N-cx;hWR&Hh~vi!THznMESPBOBQiJ zYvz{)K9Yu)U;t<3Uc>p@g`7@-mo#yJ?#0mYr!4q149HB5;HUdFblfQLVWB78E1~0Y zq&aAp{}k;4_1LjP;18zZFAMyEG<;Ox_ow0i68MkO@N?0=RPGPcaJtP&_kGT>A% zZCV4yMgsqVz|-g3zXDGBXnw2NXB)#a``4(Dqs`;B z(m!VWnf)}I{%4?qIhbAU)ambf^(|xkr3pQ?a_JeO9G!mJ#PH1i-U1v?NoT~R6+(V` zzYYj|qi8Q}z8eG1?uBcLi1Y#A=P8JXVGd~XTDm8#$N!HkcsVA_di;yB;NQ-IKb!?W z0Nj|SJCP@2LVZ5`&@q=^yif4V0e%|r;YJRyXGCz?EBFsCFHSmT(8{Qvf#g9 z_yomN&&zvV1c*NYr+Pgk`USs==K+8lH9|_`S@0uFzQia_C!pi#=~>C}35v0SGrEL6 zOSAAlngt&PejZkl4za!w(`cn=f_}Ls;MBgRCA?hrJP1ysz)4Q8m~WH{{WoTj^Q@3# zZ03yY`6HaRo}yoFCvYk^IGghud7`Yz!e4~Yqi44(v*0VT;NQxEKbi&Kl?DFfUBls@3BT}1q$=b7kufXOdbN<tkKDP>fZN3VU9S`C} zPoEcroH`L#wRvC;Dy7GN9&pk#Jq`yMKb%qY>x)IX^c?{`IrnD4f5Y(1b;pxg_&)-U z|1*w@Q7F?h&7QM?>*+Hf_)9!o6g?Y6#~*-`{fCA9wf^{q;7_;Psc1MVw@#Fcu+NTK z;Ckh@3VxH|$MlaKtAOj(YjYMke-m;x3pqw1=br+1q}kzQYLGLs{fchCxV<;^K_-ydE81;Cki$1h}4_ z^iZW9ei@dydid-tco;b8Go0q2K*-EY3=2)BL%X)(Ypp zEyA2CqcvRVoaYIeP0nT~-VO@}!XC@KIjt_6+fp8Mg(BrgWns?!-* zaUI@r%yPZ*IM=I^*Js&VX4czL$(|KXn^Wh#M|UU^j`VbNl)II7PeT7lx6|zl_&s68 z>1+=;JAHu_E}yeK5(tHzuAW}S9q0~HmufGcR$W`8&*bcYVVo{<6DM9^4XshwyROdm zp6>27$Rbh9_$jPTXVbj;W{cA@x6z5{;cRPe(9T7bTAG?#Ep5)W`k8hMI2x~+TiFn(7bp}15aKMj7kF0U7Vm>8zk*bzp zqoTKuik`B9t_ksdrSd_l+wX0_`3m&E{72%DThraeFKfl-do=RlHxd z(#zX3U10<%$|HdmOATIZX0Ih%yvUEZR_k;|x!8-h;&wzo0&uu zUL(JVkN)9tb=%y)&F(IEa3tjNM#2~{*r;J4Hw@CNs4SE${I*Yra!xTQ1V>O}sEDj3G&kzVd~vR6>u z!5%LT$D9tw952KEp6)FeX3H3&slT5-8cf zx;xb>uy%J~l_zQLigGr9=!NWB_TD!IY4Wd{Mpwke2>7|X*F{B86w8#A>J5^IWpO+1 z&U(QfHE3IaCa;oRVMR|zc0Wk1ZJKM=u;zl+=4s8Yl^#~TmLTsoHlNpmfjK;HP8*sZ z0}?!ynodkrDok(|Pj?XgH{=N-lm|liV1p&2xMbgLte|Fft_X)I@P`m$DRr-Oy1Q07 z=_?7zHmIad5cWiph-ekrlGUvDh0RqqKl}PbeWF*wPqmV-um) zfG=$Ert8?Y0Io(&*y~#1U1c^;!@17|H6v(g1jvp6s)f#7Khx$km$L~Xx2JuqUD${M!~}eJ!1wTDE?7R_mvfed6;t!AW{$>(sVnKRI?Cd2GlW z+gc;lt1xc~glxxI&?Ms~jTEg@ofZo`z8SsPZdh!*^94zRUWK}KV+h4ry^pCf(TSv)scIo;S^xst765Co_KYs}NM0dpRk)nR^S zuCSp$*~EuM;E6D0ZNfz}8{E<4pa`wWu@IGQ)st@TL{O|Si-6ed35OBfL{AXYk`!Oa z>@PgprFv^ZvboA0=oC_$FpBulsx%Rxo|-W^%7{JT%J6De5D_Ar5+Bm*k*QM^tAUtQ zrRbkA22)RCaWRtk0FN$DiWGCDG`7)lwzg5s;{zdd&6WJ16!QM_k?SrH{ zIt{Mr#!npzFe%7B62NJb!#yT4nBpII#LqT*=tYBsA7qIj)0zUI1+EZxsJYoBPiw$Z zuLnCM=>c7fYSSe~a>PF8h?23qqiheH^Yl~zj)*3Kd zGw6+2(=dg?$I!x9vxOR55ixP8srLoDT=b==;~HMS=kPY53I1_rm&fj=@?7f}lc~gv zE_GcjCM#1bo4h_BHnqa_{`Lh}*C1qj$Rn{ygB2rNHQV@>(kcwDdh;60eyi9fgw408 z4O0Zx>E(@18XN*^w2jX6N@4C?jqMchz(cfYiQZg$9yq7+>A2v=78>oKwN_)%f(eSY zvf%4;Hjmb=v$iGozuhWRD9nlKU;!`IE>ft4=`$mq?AynSAhmNntKoa_K{1+V^Im3i zEzs(Zb<$pIqFBB4U^nK)nFE+Sg0KNqCz{j7gEs$9-*%8|ow7kQi#cVJ$3b&LZHAsMGZDb3vt*w`A{$$lf36*xbbn8oN*%K$y!tD1 z*_0U*iDS-3vy4-GR?2LmGa>q1<5L^@=>I+IWZo0VxJy&B&>QKR=kl+_;s9$(GYeQ^ zYrdE6H)J$*VlO0pa;}XKvV9Ydp=Z~O9lt6sn~l&WqW-AFcRJCRjuZH++&+&hWPvd& zoHk5j@uQn4$8@FCiIG+rz5OlW2DGPzpr81xAuX@u&92l%O=eppXPGItN!WZ-wqa(@ zj8?3rZ7J4tROf1%_pv(#*y#6RzaLw+v|o_8^^z=4XRE-}Xjx!uus9nW^Vv|8oC16< zPqrN^)TUN1Egr$ViG5%4&V{a!pCUM|{8G1o;U;?Rqm57h+gnEIuAk~5W)nW-$!y-_ zw>X+8I*M&Hihw83Xx#SM%#>J1Wt*|4?8RWwG*5pgEG1l~)_@hEl!w=JM_enAMnXL8 z($4V3T2DE4HOg1e=GJB2c7;)0u5g!9-oD0i2xg>d3p$m4TS25}bj zMU-;3I4Q?zIlj57l+zSlDW^L!U<$B>Z@H&S+=J3*1-H(+X;)O-BmdtdcnpBOQzjleNW3Sa zy)U9YU!avC9uv?=r5tDYq+meHuf0#ArMtv)HQMt;+H**n0J_hQM;7snI6t)aPPB9n z9_^rm$|X@+emoAsjyn8HvbFr$`zczgy@$t`@sChPBNZ#o=$$w%zxE!BmY$ZTzs9Gf z^O2w4vD0YneHSg=BuXOvNl#6G0W0k|p=(-x?Y%xN)!yq%x1UyjzsNsR2-M!6(NgWb zLduUvcM|1m`Fnv;74g_KJ2qFPrj$oMNLTrw$CBjy+WRw+WR6}dUAzaI{${0j0~ma*WLrtQapy7a-`Rvo?D{&YweeKU#OdB z^J#_^Qj8+ilK;SooL_sdNK5aci_(!^{*Q4@&ad8Z)k@@aG~G83lNUwaOD zlgOXWpJq;_5CCB&O`u4%{8Q3!Ex(+mRgvY`rImj=7l6mflm`4XeYE&R{ZrGObMk-p fJiyb}@SK=@Xys}cJsYQ&|FNx{qa-au8masrv^e4* literal 0 HcmV?d00001 diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/visu_interpolation.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/visu_interpolation.py" new file mode 100644 index 0000000..bc82820 --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/3d_interpolation/visu_interpolation.py" @@ -0,0 +1,44 @@ +''' Visualize part segmentation ''' +import os +import sys +ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +sys.path.append('/home/rqi/Projects/toolkits/visualization') +from show3d_balls import showpoints +import numpy as np +from tf_interpolate import three_nn, three_interpolate +import tensorflow as tf + + +pts2 = np.array([[0,0,1],[1,0,0],[0,1,0],[1,1,0]]).astype('float32') +xyz1 = np.random.random((100,3)).astype('float32') +xyz2 = np.array([[0,0,0],[1,0,0],[0,1,0],[1,1,1]]).astype('float32') + +def fun(xyz1,xyz2,pts2): + with tf.device('/cpu:0'): + points = tf.constant(np.expand_dims(pts2,0)) + xyz1 = tf.constant(np.expand_dims(xyz1,0)) + xyz2 = tf.constant(np.expand_dims(xyz2,0)) + dist, idx = three_nn(xyz1, xyz2) + #weight = tf.ones_like(dist)/3.0 + dist = tf.maximum(dist, 1e-10) + norm = tf.reduce_sum((1.0/dist),axis=2,keep_dims=True) + norm = tf.tile(norm, [1,1,3]) + print (norm) + weight = (1.0/dist) / norm + interpolated_points = three_interpolate(points, idx, weight) + with tf.Session('') as sess: + tmp,pts1,d,w = sess.run([xyz1, interpolated_points, dist, weight]) + #print w + pts1 = pts1.squeeze() + return pts1 + +pts1 = fun(xyz1,xyz2,pts2) +all_pts = np.zeros((104,3)) +all_pts[0:100,:] = pts1 +all_pts[100:,:] = pts2 +all_xyz = np.zeros((104,3)) +all_xyz[0:100,:]=xyz1 +all_xyz[100:,:]=xyz2 +showpoints(xyz2, pts2, ballradius=8) +showpoints(xyz1, pts1, ballradius=8) +showpoints(all_xyz, all_pts, ballradius=8) -- Gitee From 3bdc6ac2f1b0ff59070e0479a544ad74412fe1a6 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:32:52 +0000 Subject: [PATCH 55/69] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20grouping?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../tf_ops/grouping/.keep" | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/.keep" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/.keep" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/.keep" new file mode 100644 index 0000000..e69de29 -- Gitee From 54059a14677fa193961a7fb6973fa20c7179ba24 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:33:34 +0000 Subject: [PATCH 56/69] =?UTF-8?q?=E5=88=86=E7=BB=84?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../tf_ops/grouping/.gitignore" | 10 + .../tf_ops/grouping/tf_grouping.cpp" | 213 ++++++++++++++++++ .../tf_ops/grouping/tf_grouping.py" | 105 +++++++++ .../tf_ops/grouping/tf_grouping_compile.sh" | 11 + .../tf_ops/grouping/tf_grouping_g.cu" | 141 ++++++++++++ .../tf_ops/grouping/tf_grouping_g.cu.o" | Bin 0 -> 29608 bytes .../tf_ops/grouping/tf_grouping_op_test.py" | 28 +++ .../tf_ops/grouping/tf_grouping_so.so" | Bin 0 -> 84080 bytes 8 files changed, 508 insertions(+) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/.gitignore" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/tf_grouping.cpp" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/tf_grouping.py" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/tf_grouping_compile.sh" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/tf_grouping_g.cu" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/tf_grouping_g.cu.o" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/tf_grouping_op_test.py" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/tf_grouping_so.so" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/.gitignore" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/.gitignore" new file mode 100644 index 0000000..2f08276 --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/.gitignore" @@ -0,0 +1,10 @@ +a.out +query_ball_point +query_ball_point_block +query_ball_point_cuda +query_ball_point_grid +tf_grouping_g.cu.o +tf_grouping_so.so +selection_sort +selection_sort_cuda +selection_sort_const_cuda diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/tf_grouping.cpp" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/tf_grouping.cpp" new file mode 100644 index 0000000..924be5f --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/tf_grouping.cpp" @@ -0,0 +1,213 @@ +#include +#include +#include // memset +#include // rand, RAND_MAX +#include // sqrtf +#include "tensorflow/core/framework/op.h" +#include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/shape_inference.h" +#include "tensorflow/core/framework/common_shape_fns.h" +#include + +#pragma GCC diagnostic ignored "-Wunused-result" + +using namespace tensorflow; + +REGISTER_OP("QueryBallPoint") + .Attr("radius: float") + .Attr("nsample: int") + .Input("xyz1: float32") + .Input("xyz2: float32") + .Output("idx: int32") + .Output("pts_cnt: int32") + .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { + ::tensorflow::shape_inference::ShapeHandle dims2; // batch_size * npoint * 3 + c->WithRank(c->input(1), 3, &dims2); + int nsample; + TF_RETURN_IF_ERROR(c->GetAttr("nsample", &nsample)); + ::tensorflow::shape_inference::ShapeHandle output1 = c->MakeShape({c->Dim(dims2, 0), c->Dim(dims2, 1), nsample}); + c->set_output(0, output1); + ::tensorflow::shape_inference::ShapeHandle output2 = c->MakeShape({c->Dim(dims2, 0), c->Dim(dims2, 1)}); + c->set_output(1, output2); + return Status::OK(); + }); +REGISTER_OP("SelectionSort") + .Attr("k: int") + .Input("dist: float32") + .Output("outi: int32") + .Output("out: float32") + .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { + c->set_output(0, c->input(0)); + c->set_output(1, c->input(0)); + return Status::OK(); + }); +REGISTER_OP("GroupPoint") + .Input("points: float32") + .Input("idx: int32") + .Output("out: float32") + .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { + ::tensorflow::shape_inference::ShapeHandle dims1; // batch_size * ndataset * channels + c->WithRank(c->input(0), 3, &dims1); + ::tensorflow::shape_inference::ShapeHandle dims2; // batch_size * npoints * nsample + c->WithRank(c->input(1), 3, &dims2); + // batch_size * npoints * nsample * channels + ::tensorflow::shape_inference::ShapeHandle output = c->MakeShape({c->Dim(dims2, 0), c->Dim(dims2, 1), c->Dim(dims2, 2), c->Dim(dims1, 2)}); + c->set_output(0, output); + return Status::OK(); + }); +REGISTER_OP("GroupPointGrad") + .Input("points: float32") + .Input("idx: int32") + .Input("grad_out: float32") + .Output("grad_points: float32") + .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { + c->set_output(0, c->input(0)); + return Status::OK(); + }); + + +void queryBallPointLauncher(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt); +class QueryBallPointGpuOp : public OpKernel { + public: + explicit QueryBallPointGpuOp(OpKernelConstruction* context) : OpKernel(context) { + OP_REQUIRES_OK(context, context->GetAttr("radius", &radius_)); + OP_REQUIRES(context, radius_ > 0, errors::InvalidArgument("QueryBallPoint expects positive radius")); + + OP_REQUIRES_OK(context, context->GetAttr("nsample", &nsample_)); + OP_REQUIRES(context, nsample_ > 0, errors::InvalidArgument("QueryBallPoint expects positive nsample")); + } + + void Compute(OpKernelContext* context) override { + const Tensor& xyz1_tensor = context->input(0); + OP_REQUIRES(context, xyz1_tensor.dims()==3 && xyz1_tensor.shape().dim_size(2)==3, errors::InvalidArgument("QueryBallPoint expects (batch_size, ndataset, 3) xyz1 shape.")); + int b = xyz1_tensor.shape().dim_size(0); + int n = xyz1_tensor.shape().dim_size(1); + + const Tensor& xyz2_tensor = context->input(1); + OP_REQUIRES(context, xyz2_tensor.dims()==3 && xyz2_tensor.shape().dim_size(2)==3, errors::InvalidArgument("QueryBallPoint expects (batch_size, npoint, 3) xyz2 shape.")); + int m = xyz2_tensor.shape().dim_size(1); + + Tensor *idx_tensor = nullptr; + OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape{b,m,nsample_}, &idx_tensor)); + Tensor *pts_cnt_tensor = nullptr; + OP_REQUIRES_OK(context, context->allocate_output(1, TensorShape{b,m}, &pts_cnt_tensor)); + + auto xyz1_flat = xyz1_tensor.flat(); + const float *xyz1 = &(xyz1_flat(0)); + auto xyz2_flat = xyz2_tensor.flat(); + const float *xyz2 = &(xyz2_flat(0)); + auto idx_flat = idx_tensor->flat(); + int *idx = &(idx_flat(0)); + auto pts_cnt_flat = pts_cnt_tensor->flat(); + int *pts_cnt = &(pts_cnt_flat(0)); + queryBallPointLauncher(b,n,m,radius_,nsample_,xyz1,xyz2,idx,pts_cnt); + } + private: + float radius_; + int nsample_; +}; +REGISTER_KERNEL_BUILDER(Name("QueryBallPoint").Device(DEVICE_GPU), QueryBallPointGpuOp); + +void selectionSortLauncher(int b, int n, int m, int k, const float *dist, int *outi, float *out); +class SelectionSortGpuOp : public OpKernel { + public: + explicit SelectionSortGpuOp(OpKernelConstruction* context) : OpKernel(context) { + OP_REQUIRES_OK(context, context->GetAttr("k", &k_)); + OP_REQUIRES(context, k_ > 0, errors::InvalidArgument("SelectionSort expects positive k")); + } + + void Compute(OpKernelContext* context) override { + const Tensor& dist_tensor = context->input(0); + OP_REQUIRES(context, dist_tensor.dims()==3, errors::InvalidArgument("SelectionSort expects (b,m,n) dist shape.")); + int b = dist_tensor.shape().dim_size(0); + int m = dist_tensor.shape().dim_size(1); + int n = dist_tensor.shape().dim_size(2); + + Tensor *outi_tensor = nullptr; + OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape{b,m,n}, &outi_tensor)); + Tensor *out_tensor = nullptr; + OP_REQUIRES_OK(context, context->allocate_output(1, TensorShape{b,m,n}, &out_tensor)); + + auto dist_flat = dist_tensor.flat(); + const float *dist = &(dist_flat(0)); + auto outi_flat = outi_tensor->flat(); + int *outi = &(outi_flat(0)); + auto out_flat = out_tensor->flat(); + float *out = &(out_flat(0)); + selectionSortLauncher(b,n,m,k_,dist,outi,out); + } + private: + int k_; +}; +REGISTER_KERNEL_BUILDER(Name("SelectionSort").Device(DEVICE_GPU), SelectionSortGpuOp); + + +void groupPointLauncher(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out); +class GroupPointGpuOp: public OpKernel{ + public: + explicit GroupPointGpuOp(OpKernelConstruction * context):OpKernel(context){} + + void Compute(OpKernelContext * context) override { + const Tensor& points_tensor=context->input(0); + OP_REQUIRES(context, points_tensor.dims()==3, errors::InvalidArgument("GroupPoint expects (batch_size, num_points, channel) points shape")); + int b = points_tensor.shape().dim_size(0); + int n = points_tensor.shape().dim_size(1); + int c = points_tensor.shape().dim_size(2); + + const Tensor& idx_tensor=context->input(1); + OP_REQUIRES(context,idx_tensor.dims()==3 && idx_tensor.shape().dim_size(0)==b, errors::InvalidArgument("GroupPoint expects (batch_size, npoints, nsample) idx shape")); + int m = idx_tensor.shape().dim_size(1); + int nsample = idx_tensor.shape().dim_size(2); + + Tensor * out_tensor = nullptr; + OP_REQUIRES_OK(context, context->allocate_output(0,TensorShape{b,m,nsample,c}, &out_tensor)); + + auto points_flat = points_tensor.flat(); + const float *points = &(points_flat(0)); + auto idx_flat = idx_tensor.flat(); + const int *idx = &(idx_flat(0)); + auto out_flat = out_tensor->flat(); + float *out = &(out_flat(0)); + groupPointLauncher(b,n,c,m,nsample,points,idx,out); + } +}; +REGISTER_KERNEL_BUILDER(Name("GroupPoint").Device(DEVICE_GPU),GroupPointGpuOp); + +void groupPointGradLauncher(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points); +class GroupPointGradGpuOp: public OpKernel{ + public: + explicit GroupPointGradGpuOp(OpKernelConstruction * context):OpKernel(context){} + + void Compute(OpKernelContext * context) override { + const Tensor& points_tensor=context->input(0); + OP_REQUIRES(context, points_tensor.dims()==3, errors::InvalidArgument("GroupPointGrad expects (batch_size, num_points, channel) points shape")); + int b = points_tensor.shape().dim_size(0); + int n = points_tensor.shape().dim_size(1); + int c = points_tensor.shape().dim_size(2); + + const Tensor& idx_tensor=context->input(1); + OP_REQUIRES(context,idx_tensor.dims()==3 && idx_tensor.shape().dim_size(0)==b, errors::InvalidArgument("GroupPointGrad expects (batch_size, npoints, nsample) idx shape")); + int m = idx_tensor.shape().dim_size(1); + int nsample = idx_tensor.shape().dim_size(2); + + const Tensor& grad_out_tensor=context->input(2); + OP_REQUIRES(context,grad_out_tensor.dims()==4 && grad_out_tensor.shape().dim_size(0)==b && grad_out_tensor.shape().dim_size(1)==m && grad_out_tensor.shape().dim_size(2)==nsample && grad_out_tensor.shape().dim_size(3)==c, errors::InvalidArgument("GroupPointGrad expects (batch_size, npoints, nsample, channel) grad_out shape")); + + Tensor * grad_points_tensor = nullptr; + OP_REQUIRES_OK(context, context->allocate_output(0,TensorShape{b,n,c}, &grad_points_tensor)); + + auto points_flat = points_tensor.flat(); + const float *points = &(points_flat(0)); + auto idx_flat = idx_tensor.flat(); + const int *idx = &(idx_flat(0)); + auto grad_out_flat = grad_out_tensor.flat(); + const float *grad_out = &(grad_out_flat(0)); + auto grad_points_flat = grad_points_tensor->flat(); + float *grad_points = &(grad_points_flat(0)); + cudaMemset(grad_points, 0, sizeof(float)*b*n*c); + groupPointGradLauncher(b,n,c,m,nsample,grad_out,idx,grad_points); + } +}; +REGISTER_KERNEL_BUILDER(Name("GroupPointGrad").Device(DEVICE_GPU),GroupPointGradGpuOp); + + diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/tf_grouping.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/tf_grouping.py" new file mode 100644 index 0000000..45c96f5 --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/tf_grouping.py" @@ -0,0 +1,105 @@ +import tensorflow as tf +from tensorflow.python.framework import ops +import sys +import os +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) +grouping_module=tf.load_op_library(os.path.join(BASE_DIR, 'tf_grouping_so.so')) +def query_ball_point(radius, nsample, xyz1, xyz2): + ''' + Input: + radius: float32, ball search radius + nsample: int32, number of points selected in each ball region + xyz1: (batch_size, ndataset, 3) float32 array, input points + xyz2: (batch_size, npoint, 3) float32 array, query points + Output: + idx: (batch_size, npoint, nsample) int32 array, indices to input points + pts_cnt: (batch_size, npoint) int32 array, number of unique points in each local region + ''' + #return grouping_module.query_ball_point(radius, nsample, xyz1, xyz2) + return grouping_module.query_ball_point(xyz1, xyz2, radius, nsample) +ops.NoGradient('QueryBallPoint') +def select_top_k(k, dist): + ''' + Input: + k: int32, number of k SMALLEST elements selected + dist: (b,m,n) float32 array, distance matrix, m query points, n dataset points + Output: + idx: (b,m,n) int32 array, first k in n are indices to the top k + dist_out: (b,m,n) float32 array, first k in n are the top k + ''' + return grouping_module.selection_sort(dist, k) +ops.NoGradient('SelectionSort') +def group_point(points, idx): + ''' + Input: + points: (batch_size, ndataset, channel) float32 array, points to sample from + idx: (batch_size, npoint, nsample) int32 array, indices to points + Output: + out: (batch_size, npoint, nsample, channel) float32 array, values sampled from points + ''' + return grouping_module.group_point(points, idx) +@tf.RegisterGradient('GroupPoint') +def _group_point_grad(op, grad_out): + points = op.inputs[0] + idx = op.inputs[1] + return [grouping_module.group_point_grad(points, idx, grad_out), None] + +def knn_point(k, xyz1, xyz2): + ''' + Input: + k: int32, number of k in k-nn search + xyz1: (batch_size, ndataset, c) float32 array, input points + xyz2: (batch_size, npoint, c) float32 array, query points + Output: + val: (batch_size, npoint, k) float32 array, L2 distances + idx: (batch_size, npoint, k) int32 array, indices to input points + ''' + b = xyz1.get_shape()[0].value + n = xyz1.get_shape()[1].value + c = xyz1.get_shape()[2].value + m = xyz2.get_shape()[1].value + print (b, n, c, m) + print (xyz1, (b,1,n,c)) + xyz1 = tf.tile(tf.reshape(xyz1, (b,1,n,c)), [1,m,1,1]) + xyz2 = tf.tile(tf.reshape(xyz2, (b,m,1,c)), [1,1,n,1]) + dist = tf.reduce_sum((xyz1-xyz2)**2, -1) + print (dist, k) + outi, out = select_top_k(k, dist) + idx = tf.slice(outi, [0,0,0], [-1,-1,k]) + val = tf.slice(out, [0,0,0], [-1,-1,k]) + print (idx, val) + #val, idx = tf.nn.top_k(-dist, k=k) # ONLY SUPPORT CPU + return val, idx + +if __name__=='__main__': + knn=True + import numpy as np + import time + np.random.seed(100) + pts = np.random.random((32,512,64)).astype('float32') + tmp1 = np.random.random((32,512,3)).astype('float32') + tmp2 = np.random.random((32,128,3)).astype('float32') + with tf.device('/gpu:1'): + points = tf.constant(pts) + xyz1 = tf.constant(tmp1) + xyz2 = tf.constant(tmp2) + radius = 0.1 + nsample = 64 + if knn: + _, idx = knn_point(nsample, xyz1, xyz2) + grouped_points = group_point(points, idx) + else: + idx, _ = query_ball_point(radius, nsample, xyz1, xyz2) + grouped_points = group_point(points, idx) + #grouped_points_grad = tf.ones_like(grouped_points) + #points_grad = tf.gradients(grouped_points, points, grouped_points_grad) + with tf.Session('') as sess: + now = time.time() + for _ in range(100): + ret = sess.run(grouped_points) + print (time.time() - now) + print (ret.shape, ret.dtype) + print (ret) + + diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/tf_grouping_compile.sh" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/tf_grouping_compile.sh" new file mode 100644 index 0000000..7bd30ad --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/tf_grouping_compile.sh" @@ -0,0 +1,11 @@ +#/bin/bash10 +/usr/local/cuda-9.0/bin/nvcc tf_grouping_g.cu -o tf_grouping_g.cu.o -c -O2 -DGOOGLE_CUDA=1 -x cu -Xcompiler -fPIC + +# TF1.2 +#g++ -std=c++11 tf_grouping.cpp tf_grouping_g.cu.o -o tf_grouping_so.so -shared -fPIC -I /usr/local/lib/python2.7/dist-packages/tensorflow/include -I /usr/local/cuda-8.0/include -lcudart -L /usr/local/cuda-8.0/lib64/ -O2 -D_GLIBCXX_USE_CXX11_ABI=0 + +# TF1.4 +#g++ -std=c++11 tf_grouping.cpp tf_grouping_g.cu.o -o tf_grouping_so.so -shared -fPIC -I /usr/local/lib/python2.7/dist-packages/tensorflow/include -I /usr/local/cuda-11.0/include -I /usr/local/lib/python2.7/dist-packages/tensorflow/include/external/nsync/public -lcudart -L /usr/local/cuda-8.0/lib64/ -L/usr/local/lib/python2.7/dist-packages/tensorflow -ltensorflow_framework -O2 -D_GLIBCXX_USE_CXX11_ABI=0 + + +g++ -std=c++11 tf_grouping.cpp tf_grouping_g.cu.o -o tf_grouping_so.so -shared -fPIC -I /home/cc/anaconda3/envs/tf1.8.0/lib/python3.6/site-packages/tensorflow/include -I /usr/local/cuda-9.0/include -I /home/cc/anaconda3/envs/tf1.8.0/lib/python3.6/site-packages/tensorflow/include/external/nsync/public -lcudart -L /usr/local/cuda-9.0/lib64/ -L/home/cc/anaconda3/envs/tf1.8.0/lib/python3.6/site-packages/tensorflow -l:libtensorflow_framework.so -O2 -D_GLIBCXX_USE_CXX11_ABI=0 diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/tf_grouping_g.cu" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/tf_grouping_g.cu" new file mode 100644 index 0000000..578330d --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/tf_grouping_g.cu" @@ -0,0 +1,141 @@ +// input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3) +// output: idx (b,m,nsample), pts_cnt (b,m) +__global__ void query_ball_point_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) { + int batch_index = blockIdx.x; + xyz1 += n*3*batch_index; + xyz2 += m*3*batch_index; + idx += m*nsample*batch_index; + pts_cnt += m*batch_index; // counting how many unique points selected in local region + + int index = threadIdx.x; + int stride = blockDim.x; + + for (int j=index;j>>(b,n,m,radius,nsample,xyz1,xyz2,idx,pts_cnt); + //cudaDeviceSynchronize(); +} +void selectionSortLauncher(int b, int n, int m, int k, const float *dist, int *outi, float *out) { + selection_sort_gpu<<>>(b,n,m,k,dist,outi,out); + //cudaDeviceSynchronize(); +} +void groupPointLauncher(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out){ + group_point_gpu<<>>(b,n,c,m,nsample,points,idx,out); + //cudaDeviceSynchronize(); +} +void groupPointGradLauncher(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points){ + group_point_grad_gpu<<>>(b,n,c,m,nsample,grad_out,idx,grad_points); + //group_point_grad_gpu<<<1,1>>>(b,n,c,m,nsample,grad_out,idx,grad_points); + //cudaDeviceSynchronize(); +} diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/tf_grouping_g.cu.o" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/tf_grouping_g.cu.o" new file mode 100644 index 0000000000000000000000000000000000000000..56b51c99510d206b9274b7ca1a2a4280b149649b GIT binary patch literal 29608 zcmeHw3wRvGm2P$Syfl)gM=x8FN1CxlvPbqKRu+Rrz#B+LmSkJ9l^9!= zWH7mGB4jJZ;Soa|mQ93Lplm|2c{n6t6K*!E*kBS8E)bIq-`)X?B-W6p5hhMHB;5a0 zcaKLSDRv&;{qEi`1v6E3>YP)j&N+4J(WCZ<*KJ%c3Ia2dz}B*hH3Q1n&fOQwabAqG z5?0Jw?pQ;$*Xc`k$63pPpT!Q}TJ3MS3txu^s(pvAtFHdG39ZTx-;@iQt38J|R=YMH zZmxE2M9KCV5e|osX3goR}H>ZzH(vF;5m;9`1VQop-1${+Mr8+q?9(md66K+E!4TrW@YJ>n;Ff zx2C8g-F342AZQqCd{5)wUUm4H#?6h_H6DI`^RhP&=k~N-e zuC6AtAoo#yoiYw3m!j&T>bL2u32)ZO8{HeCW!>^$B~H!k)#tJz+m*Gc1|6 z@o-+X-rax$oO~Jd&|T?ld;SOUE1qr769jk8w&&*wo;TZ`_qH59JIk8#@P6GgfjKOi z)Z1cnSTvq(wR+8J7wyFz{c`Q+TX)fZ#k&1UFWS!r`{m8BUoqLwm=pWlu-Z=gXqH>Y zEjQCETHot5LCPwi1Q|DJ^>A!xcQS2 z|NfNugUMji<_^swE%MDje=uyuT7SOzF@ZT*{N23DL?#Gv6nFAvPJ_|671O!$VijxQ z-08#G`Wm$A%_93Ydv6Bn_-d^Zk7;?Oxst_Mj7j(|q>xe9zq5a!Z=iE4N=AOmjar@O z?fpA7HGXES_u?m3tCnajY{gAg~z<)LSI%jFRmuAdPH_b$k1^ecL-1CLF~j zmOm32H5H#i=f&}k>!RgC{zB))agFQz-r(?y@*D7PL(W3yML7rcjgrZ0V}{mDg&F07 zj52Tx2?!@$22Q#R97B4-Ntc0>E(6EV2;rp5z)6>ZV`!Oh(q-VJ%XS;~i;TquACt;_ z%(PIWwlEK4=N6~yW`_HRjJk>Jno;NWIaiRz7a6wX8FhidC>eEh$?MXhE10Rm!~&?A zTsC$FHoYCJO_k^Yf|(#6C?Q_}@&(fTt;{#0|I3s2qI`d@yy%_F+kyOc_)Dg|38zBa39%_x}}rdm!r(UEdyZ+`p#3YT&OL_^Sr~ObxvA&WHPKuddF# z9{Dpl{+0Ys)qpiI*u0otSPKpFUAF^#J4e-jvtG#_ujYYi1olhj@6JHz~Ibr*>%d=*)Qzv2fV;3xO@mcv=X6sqnmIn{4aXzkC3qY$;KS96!JZvNT;uo~&LsxbA+c(B`(l#spjcf+ z$5I=cx(4;TJ(8<{2wD=!Mm|2%R2S(1`Y324_{}GV5J3V}lc`2N&h)%9@xTtFaOF7kPyXf;Q~RF7^P*gghmY$?(4QP4 z|Jlm*JDJqv9v*3g7&L!|c+OIt9Sb zbas%X?kOWaB0I(PcjyFW-}K2U$yLbtC6XV}y0RJ)cq{#(&=+eb@DS2)t?Vo`E zC)c+Uzd7_okG|%WX9G7M*8huo>fn54%LBd7mS+Pu-w@`{EW7n`KC|sckufdLliYk{ z|9R~;$ob5c2livOwEcB|`U7@5!ue#$lVSfF;&&N#8|8d-c{1$J{Rwuf<$NwJ&vD|D zwttpf;Io4Bxr85O*ngb(&EZE1=QEEyR+@d-L0~^&%#&w1pV{(+IiDWB#on{-W#2M#{aZ z?;svxT^5+<=kH+M*5e=I9mT~m?4M5{gn22>AppdU`G)!A0X_C%e>kpPw0|e?5#Lfz zbb>Cuz9d|;;#vpVeb`qxk!$uBY5zuK6yI*s;@kKWJifsnAwSkH*R1)`0l4o%VqBa5 zS$c;2akL*4$Dc6bn$BmYJn8xWfWE$I`<;w9l9tDaYcu4r4o#B;Rg)L(&*jSYL&!sL zlE;W^oKJfG&#$LeqMJiMJgyx&A(8|X|7XZ`8S?ZJUz-24enT!JzRjs8%ny>^XUHMC z)(@hdx8dALxDUlot=;g;v_FnNQ2{*i$HkOmeWiT?_D8f1-(9-Y@V~`u5C+fmzX0$q z%l|~a{&#w~|7ESOiBwniy2|?>POZ!EKdc{V|J$qipT54P<6&wzdtK#xZsz7cf&KG3 z^q(1j_F}wPr;{hRy=Sejx?ENqABM31&saY+`E+^2)VetM<|SOye7-}^ydC>758%Eh zv3ooN`=#~GPH{i=V&6mh#eTG!@G!pz|LYwI`^nqhMdw+ZpX1nH^7HdJ=AUtXPVd)T z>uEx^Bu2D-BhDSD(s@wZr$LTV;*0$+0tnXc)H>cSr5eGX_IuxP@!WN)gYrajYX?6^ zrRe-d`)Ps8_2l%nNfqbUglmYOzu>ofsU7nLWOSaZ(eh*$<(LNtwLCe@;TM|55&kBs zJ~6VL*AdqgKJEuhe1D8_vu81GY2=|%7H2KmeFBe%gZ~Tg3nGy*&sp+P#s-m}c?a+* zt$xYeng}$-v42kAU!(*Q&=2}J?H6%w@KA0_Y4;Z1)LuHrEE;n$<4`z{P@%Sd?UG} z5a?PaUex5+0Pm&ylf;O2ew(V$&R^R3?LN=2YZB;m?kxBn`5ng7VFq8+&2&F<;RMd_ zIKL2FJMR?a5S;25dVFl;G{#By1ykC%Qd1Zg`0Do)*#DeMoEw7vP6E&5Nw}s)H9wn# zKJJ5`NpbY=Gas8GIs6k9CKK|~u@v$3cn;C|1M8=hN~{JP{(fHX=lqO*H2xl}Yu*^g zOKSaWVAF460mA-I>rZr3+wOT7Ob9!S$CId7k%k2))z#Kbd-dW`9W< z5Bf`bb^Oz#G%kbxWa=~@--Q!ZYLhxtWsFZseFb#5=YXAYKL-72e8y6JakAr>YZB|e z)r0#q&Ob`}=l(sMDGZy8Qb@pvn`p3vI2JWh2#(N9m}{%K^&gF4QY zrbgs8^RdUN?)Rny>;rLMk(vrnyZNQZDfbU2UyW1!EQs-6m3hk(RQC_3UTr0~`K2c) z_YaRubnyDI6O@B)s+ZTdJWjd)0w?GF4|4di$0-M&sUy7p(&Lm;IT$7cu$xAmDizM;!S< z%qOl7$WL|BBguad_M#m6Ck4>@n`NieL@(h%4}|A~9+H$pE`pOD`22$ZBLJL1{FV5< z3H$~Aiuk^mjU%*Q9Jl5#>2W7Uv~hC(Ww5_Kelf-6%*z-*U%!p<8~hV4&CY52W$6WS zbNfJUZb#TRpv%3HkEcx=7wpH^FW4{sJJ?T5-fKd;4e=WDm9JABuajL(^_5|K-;&{rj&0etiXXzJBrb>f-v<33)TuugLBN5+>bxWmoMBujD#NK{8OQ?a6YcZ&!R{}B9Mk5qXc{o;OaB!Ku=;=z1XC=SBjsNc`;^Z5ETGC6bo zN>Vr>yEpUoN%sS~PmcqR`}Ah=E9_(5h-vFlHPw%%*ALe^548`wYADBj`MLnFCu=A_ zIy|y2&g&yJlw)7Ju9fP>`h|Ti)j`+XL2&d>Iq;f$c|BE4`O#t5x z)IOZ5rkvzBLvq;72i^wfpcF8IR|)*RMCZ{V-3n z)~^J}xqSwa<9UXc>X09+56(}}n-leeu$Kop)+^0EkelixH{~REfa;0Vd9DXz{et`i zKZ@}WBFFeOdm+wi<4@{#8`11GJfhj_;`){703FUJUdqXiy;RT81I;_!=W2E|^pIY^ zUSzG*4*$aVGuN;5xZ`L~j~n|8O@7*c(fVbKAJ6+Zo<4r)!-+ciuLn8#FXs1cF&@O^}_>sS?k0D0_m%CEO`yDx$@CmK zF>-p4#y^}g(LU%P&K*YxKAes@{bU*gp_=q=*{Q9`-c{ctGyWaEiBv%9%VJXUr8~$`K4S?09mTuYY)c zw3qmnou^Tgd=)0xneWpH?h{8Ox|fL|uA$D)w@d;a=%Idk{zUr{4j;sTX`iC~en5Xt zI?<`^BZkjZ5&xq&*MWbz2hY!&2J!q#8o3th64qD!T#DZ}z}(EHolB3=Ik*{hJU^R2 zM)YHx-rCfQ9ZX-EenRihpf~u<$49$AI8lY?y4>HS)GOfYC^z&qmO8-uk+wXKoYrN; zqwyTfp8~!vZ#V}9^5>5PzmwKyX2JMz?%>ba#N-rTk0DPSa${fo9CA7jYUB9q1W(w% zoY&5o+CCoZ-Gm4Dln3-2biCfG=N*)v)#-uPtHTHN{D_{9>iKa!AJ_9|DM$Qd6YyZr zpV8sxC^z(t{XbvtFs^a_+`LlL1NJ4@uh9KF?*GqB@beqlU35(if<6!9^&m&w*2bAg z&OJUZf73bYKLC6hZ(Pq?^}IvRdnt!papZKK;{HJX+Z1>R{-?!j!+#+M;gK9p=*ZPzcu{X@W(`Qa#Y)& z(mf0JXCCix|94_yTCDnnt|!bx;!`H39^rg&Z-MydS%mZ8;7>F99pmx8(6gZzrw8oc zjrc1i?;U0E(}b&Pochz&k-JL~Z=a=ji#Y4SI5CeFBZK{MebRJ}pC3lgfjPCz&~H{gdAS}N7^S}mu!H6z97sT0!I zv@9)SmN>@WTF#>XnNB~NK~H}(kWKH<wBgI9|K2?GtMxw`$fS49 zLH~D}@@Dn_Go5}sqyK_A=pO-$M98+m7ibrQWayvDbB%vDi07g&CS;^uZG5~u*ZAG& zcrN;bgp8C)KiBvt=b=AO$Vi62Pi=z>et(AibJ45dODI?~XQV-FhA55e zd}unrB%U|{GtvagbJ54W8q&DVnCM}WvbY9Le?J7`x#-z44JDHv8=%r``m1&QWy?Pv z(C7#Cc}V@kqzkj@iO!6V@psB7&PWgIH6u6dK{6Aa;b$|5P`Q1%1~KwXx@=jqX*25= z=>(w#4XqTFkv1Za^O9xe-)Va|F#T0|WxB`g&fJPJ1GC0ejs*XWx#l z0Z;#p?LpK!w{P$3>hEvw@A+VtClab>?}U2#d$#WC^3-+h9_YKpbGd(~=CjxFzr*y@ z?F*_AwxFOr#I9%$_}8*px3ZQkTcECG?rwbC!CTp~g`u^qK7y|bcNmTC2z^EI^=3}Y zUTgm!_PV~V9iF;g{E@b&rgyaxTe}Le_jX}(P4{XgTF9JR3)%X<)sbK!n-(G=Pfg$U z)xpr(Rd&05*Y=Mw;UmI_Yx-2h?|I(_v0_ErstwGo;!}z*@hK+X#XRodU5vSd0G5RS z3<~ZD%I@$6K%-PbmAPY7LY282Q0kAht+L;^=cX;fv<19twheUlY_Ho7*uGtLyY|%e z?-#rw|Az!tAm&xD5`PsdRW>naxQG=FT+Mp>y9Rpec66!ix}2I`#qVaq)$F%o9A(ee zzD`e5Q=namt+IEpg2?-rd@GY`qF`}eQiH4P+in`@tQ*)@5v%Q0KPnVNn%H`X z9NHLCi-c*@R`Gm7ioq z!COSrHZjM=ps*^&wy~w1+sj#T_&Fv%#N458G3E{XYgz6)!qSNU-vqxB9$>|h4+-)S zw2)fzkFdhntITu+<|0^(u#d2%4gPv@u@d{8BwZj;EF81;0Dtq^hoppV? z%UN}U|1cBGOn!l}8YS>iRv&9~<33oqgO$bZU_nKN9#}KWy_pGL6-{0ySlG=CtL*g0 zo2cBwO#dQEn9%<#) zVsA^3#q_@=SL7|pESr`@{ZC7w&fVL8CE?Mh;9~dn!k+QPHNCNR_F#CG9UfJ;Z+DNA znci<&99h}O#OrJ&p&cgsXC!y@02J2XSD4spTF~%*wn|-P_p@pxu!4!dVoQ`j9jjF8 zSTR0Qk;PhUaRO2@u*SSNsJLKW1j9Ge_c!1@6j+H5&WA4c3m>EtE{Le9(IyEd8En&2T zKF35i3Pm;Q%G)i4`yLg{tHr|5qwuL)MccPoNo>8v9sVtPdE8nN`mITP+brEN^nA`QiCyJ0Y1f{ zSJD4Mv8KVl%xdpp;tt8(uuwFMH(M$K3&k>&-LZwDNI;=Da+sMHioVE754`9#ONH{A zoSKk-F%wkDE}9mECzwwSzF-qpo5i(E?N!3Bnrf7YDwKy_HB~Fo``NPS$^+J=Vf9M8 zSZQ-8g7`}tTh4MnDlS$1=X2b_HzctrSS^-Cj+)png`(Z|Tx*N4B;>cS3VzH=N=v`rlpK@|-=kykslAZQ{67u4B25hn;eSmAaJ(t8ybu zSGj}u%Dqioq|hIofR>~S)nM-`VhXvf>5JFepV=wM`Y$!u3!t@>Q1L1 zqac?%MV=Qrg?c3r2PD@ixRu|tQrSd>cUbB2Xxl*E0_9_QJ~i|~Q@t8KEn6*ibNkb;$o7w!AO6h}DGQOfKaY`LfNK;||-LtWYV-l_RoADzdrNo8>(8Q0Elg!MB(w zF11RnPPek$VXjwW%bcdN;4()}0~4RM$Zk-49!^%}%qvtrU@cL+PXDd}uT$KfyFhIe zT;W&bg|R(?JLqvTp_8o$t^6-pyw_S5D{|J*9CnAk>##Ty*1RVj@{gQW3VXpHFjkb~ z+qHeSwYsKnk)t9gOX5Gs#ewe&mY?KVzA4K;$&<1X&!-Ux?ftz1?A!-y;a_hT9!dZ7}d2 zlu!)@18uJt$S>tv*%w9m5Y((Fw>wJfg6i!K+f(`C7NI_lT#eBEQoCsi-WyFeQ4zqve|w$D{uISZ2ixokkXF( zNq>0dPs-$19Bg6u%c8?vP#O4rPD!8#)3XOo_=>|`s|EtOSKzf2|LMq;gG`iv>afbI z*b=w0iY;}k^cBRHiJ4ukSk@u9cpGK*(-Z+ts(o6c|~~zvK6AN%C5>#ZhpNQQRQ5GzqLq|Ci7)Qwp6ME z4!bJbm14JwHtBqRjw(|zu^_h8>590RXw55)U0~pWy@V>8+#yxYbBC!EM5#2W0AG>K zu8<-Zp^)=vNl{=OTNG+pu(08fAdi=r%wMgv|G-hGzL;ZuEnnDJDgWz2bBS_wWp1c1HMZ_(+>GA+01&)&7A{#Zp7d6tz-5+hg&JbWP3ss&WiRjc|l-xY3~2b zUxJ+-lgnfawVF$UWpdswS-O$sY;}k~b+|(j!BX7dl;!6fQc37JhwO7;{sLGMdRTUR zGJkomE7VrJ3|(?{CS7CN%ebT9n9iW{SV5R@SWJ+1UuPsMcs;h&Q_W%+n7cAp;Q#I znccy&g0LbK+$1jvscmrer$j;CBwK7BtXL3x924d>!M;f@4*u2&sHr&kK$&$DTx65% z!pE&_lBI>}Qi%$toJDqH#sD|>Etd7#r=fDTDc7M?%JnSYp;pRKR_0cs%dD;a5ZF9cvUN@|L4MiZ7ukbjT$?BosM%NJDWfQ$KgG`cMcZ zcW{vd;jj^~|8!szLWKzwA}G{H+V0J_nE!XK6z;Cac$HdMYRHtqQR?NA4-JSPOG3_f>iu8a~K`PhiT7<(vE=bu7Oy*yVG4 z-Cx(x;1sS<0)bEEF|k1~eKM~kh$k9NdGZr^%)G==5?Y4C$jLmZICNiLNq8C7s}=Hx zs?CM|Q#Eo!g;XC_1;5$cP$9o7&s488Tq9a))Ifvm_*I^%0+gRvB${u{li$lsCF=KO z+k2TWh%IhiY*kX0_by@5iKP|6?+ZoB3UNhrY_6vAn(8i^mKkj}B6v`RgL4k; zz|wF_K8+}NN))l2><~&^4QujkpRBY_IZeMc<=i5emzpitVoE;EEbhpjB8#{>-yQkl zVpB~pvN~TA@HUF?*wvQ@e^>4bAI-1XbJM%S(p$3R7R+TUa(R~d{k^Sh-bA?2rne4ne=;e2sPLqorW;#!9&-D@igu65J|+g}iu1y}CLsSPS8i>s9|js+_|P`WVk?-qHn z#k?R?Y|iVniL2}tkvA*|t#z1v53}Xrl^?d3M#J_+YOZCO63a!bjef=sS^3atw@$VOiFR{hZm1;x0Zz6MR>&U5!4H zEA^L4TO|9=dg=4!xx-Q6eJr<-RR*7_Emj&!%foLdR|7BfqoB=NW;+D0!EC4vG~B=z zU|v+>W4?Y-?yqv)ksFv$7QKO4p3Y@27vw)@lI|?RJZd{sWN|8wnJp=CLHEA}yxk4o zi^TB;x!ya3lAJ+iT`!8K0=CCYC0N-^)h3^+9>@{jGMRi#{(vapFu9cFex|sr;R9mM zsoJ9GM$sMJjNQTFl8Wf*1-`Jdnbn5_--ueT2rF&7i*1z+>xA;iH=;Qkgwh6+AUw)k zy`rTg=4GaBIkjrAMUd8D7QV?WI2wcXo2)GKCd;|Dv?$tvv&3e=!vx>V$|9SY{H2A= z^uCfS!jZPkC02L%E|FoLy&ki-2tGyO>>GNOXp2yz1ijcLeb^+J73&H$R2V6a-C~jo zBjvF%Q@%@(w)*mI<>E0>dKzm-^bpI<8449eMVC8#1Y5!tQbp{Dkh4N^hj+??w9W1g z_pCw?n9iw-0s$w)#dMC%IU+=(4gP+iP;r||8$Ko!#tunxyJ#thcvwa3q0+L*XC*g2 zf~43fM<{rP$-gk+>}4(L-XZ1;OEod&SseLZXWsdxA9sxFSai-{jNglE_l zy{dXb5>`aRpU?Fv!MCK`-xnorI8as8u-x1bRod=lrelKqMI37*AgPZ; z@5Cke&zu#JJE5~XaqEJ*>HDVoVB0Q-P#8TW*Ed8fa5_3#T+;C61r-g)7hqGG>*&oh z-?w~$TCqU-^h$Oi&(`OtZ;16FuJLnNVdq4lLae!dUcuuk9%g*wH$t#$TR7*9{zIJ83H|(!JCifeXo;z zndvKut?PwMfKlF%NUZeuHgDaxdtjd@To>A;85 z-_uu$x(W@w9etV%hcrX-Dz%i1U_PEbi@u=uXgl`?{e&Kb_@zh$Nok~Z?hxT z=m<1Ad`%7ys6gH1uz?N9@eSgK#D8T19(|DGZ*)1H*P7CDw4%;6iu?|0yObOqV#2Yr z+0pyAj-8E;jyc638CL5Cm`jH1^)^EnB*QKp_Lr1lT<3X~#+9Xq)EqLL`=e#}8%J;J z{CXf8H{%%%r4)TNI-Cc@Bv}hK^R06P+8xeTUR_6q3~atswT0W5Sk0A4d=q=)ow?d1 z{-kMkiwS9EI@kOCG&s%n zndvyM&X)gC9qyZn&c3C?Xpeiv=CrD5!)dEmbWoZd@p(C|10 z7@lFIVW_6lX21zQK#qrGcmN*x%!z+k$B*myJnn(+YxCfr((%ve_&k;X--0i}O)!~Lq^H_rRD|Pr0dJu|a@HwKxNA(B$2A@ZD_;DR>@Oepx$8|&=SHQ0Ve7PQU zS2y@vp~K@k+~Bhva6`vjApMKp-#8ciU58y9e3>NZ~NvsZ0e*E5hvq-FVG1 zUb)<}dq?|@I(lbjcb8JPt+$tTcMfdr*^Qf@PHZGLsuAstnwxjy5o1sPKv!Q=&+g8? zTQ(2$?7E2~1T=*8odbz|AZ)=?mR()ECDh*DKhV>TMEu{a(>LzY2$gVqJNjzwx~XSd z*L8SrHLKg0P=fqB#J|J*JHpyGY~0k;xUqfH`t{eYyRQAZ#-@$y+F7<-5I_?ReTUKY zcFq_Q?AzLYxsPS>itL$hK_HFf#r!V;qT-v5`U&xkh z29vOoEyQJPTC*j&3=*!ai{fm+1#pJ4W{4DuW{Z?{0hayVb3-rmI`K){6UGIrL9_YL<9=<^xVmM4S_zM9gH)F?ITa0)Nw_!#hx zdEiF8&&EG85B|gRz{lr-zc3H{{5)`{9*?u-aqDpU`N|@EjB)X|VWCd=t_=96GWg&U zI*orK1D?spPKrdz?l&+Gyk#DE$2{=;^T6+(2mbka;HTz+|44_EemwXX`uUFx{7n69 z)*Xn(o2j3hGT@nf9?O7d`qi0v;J=;+?xYTpvi0nn2flV5_fsX^@Ju~CngP$`W9+M`-_i{JO2)h^%Yfq{D^G-v-*Tkw@*n|_D67K9 zu$Pk2Z%qbVN5|fr@wa0e`1;Q^IUm{!8*pPUMELZRJOjQEWlGuliD_WYpZYTJ`85IR z1gp(}x1dak_|tD$4g5_6M9TKV4`|?7d~VX=#HSb^gU{dTaN^_1fd7LI&z9%YI-K~> zA7B`K{#l0;pUimpqz=#K^Ftj@eCT8F$&B;F%Yf_X*irM}IU3dU$%^2*3-dOQj$!cE ztNf2>sBFLu`?Y4_8|Q`XS@5{-f8WZ2pV8rl{04tx|NpZrd}IG_@H6m@{eM#DXTV2E zfk?esaAW`fcoy8){~K}{d;+@P>wmwI)`PMCH`XJ<&*eSpUv>S)zxrbQD=_JQ7pAXk z7k=r+chB2t%U-v&zn|W5Pg`s3#`S*@X5a2zJs;@WbqlNO+e2vd&&eotF;oq5Ksx)d z&(%RayL$%OJNx=NiG*-=-=ytruI<{vx50JW_S~oycJCSJs@rh&<`v+u?E^Y`*Ut8C z{4*0>nWOlV%N|gYigop+`NZ|o_p=%dzcLVMPLK)X20+gTb@80K^HL0v5(Rc zdDhXmp&clME;!*zGivG51=C=t(WdZ$ir^k)!c*A+AnPx6b*WYD+1I}>_QegnE1 z1)|SV6YIcJqPbGG{8s{rU}_e!LH}SD+>qbk)2`EJ_wRhKMwrT?&$b_-&1JtM@7D;IhIzkv@Fo{YMY-w5bj{rh%l_(ng}za1Zg&VU~Pj6}@t z|JQo|1|Cp&GU$!^hftWS|M(pJ=KgKlzS9Z;YRsv$_C;u@~BS z#%!cqBwFKX?~qMT`;Tnf0mRO9(l=wD%xcUYe*ti^P&nPg+k;nHSB@s literal 0 HcmV?d00001 diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/tf_grouping_op_test.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/tf_grouping_op_test.py" new file mode 100644 index 0000000..939eb1f --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/tf_grouping_op_test.py" @@ -0,0 +1,28 @@ +import tensorflow as tf +import numpy as np +from tf_grouping import query_ball_point, group_point + +class GroupPointTest(tf.test.TestCase): + def test(self): + pass + + def test_grad(self): + with tf.device('/gpu:0'): + points = tf.constant(np.random.random((1,128,16)).astype('float32')) + print points + xyz1 = tf.constant(np.random.random((1,128,3)).astype('float32')) + xyz2 = tf.constant(np.random.random((1,8,3)).astype('float32')) + radius = 0.3 + nsample = 32 + idx, pts_cnt = query_ball_point(radius, nsample, xyz1, xyz2) + grouped_points = group_point(points, idx) + print (grouped_points) + + with self.test_session(): + print ("---- Going to compute gradient error") + err = tf.test.compute_gradient_error(points, (1,128,16), grouped_points, (1,8,32,16)) + print (err) + self.assertLess(err, 1e-4) + +if __name__=='__main__': + tf.test.main() diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/tf_grouping_so.so" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/grouping/tf_grouping_so.so" new file mode 100644 index 0000000000000000000000000000000000000000..c10efbe728472286c4855488f0c42ffc81f88ff3 GIT binary patch literal 84080 zcmeFa4S1Br)i*x7n~;D=76c6#$V~_uG=|-L5kPd45ZGwI2&g<7OhU3iG$bL}@S&)Y zWR-P|MvK}FTu^Stl- zfB)C@++54-IdkUBnRCvZGc)&{y>BThSzt66n0}eqMT{WLE-*G((O?VG*jOHmV>YI- zWKJ6^X(AOr`&?l45}Y}LxQ)Q4`zsEj)c6;@+!`nFU9!~M8G)vDrQeA9(r+~WMK2i< z&?U=w0kIgA^m$1RBfLzNbb3j&bd?@=)HRYZS3ysT;9%_2->9|?fq?N9i(=>1A zW54|AJvlR5epg%PxP5f@Ta_1odf&hw9{%9(;89KCnEu^`(h##y|bx z>#1*FbJfR7?tiuXgCEvp9Ui}Z(_fx`_`_Qk+~I0@xc<4P7w>I-@r(POJpT31AN%E- z>y{_QKJtPswRLp2qdjLP)D`jaz=_92sL|l8k?cK4<0{9vBJ~>=1z$fQ zGX0V$a&C;$?;Hpkso$@n^!siUecl0mM&h&kw8;3LDE>SU#Xp~i|3u>dY7{viiZbtP zXGi9<8kr>${~Hq{`-wM-oQI;!>zF9^>Wnh)(xS-uP82=(qxgS%6#Dz4=zn$;IhRDy z!qVPEoMGtvV z;=`0Ee*2Rs^1mEqylGM7ER90{U=;uVB+C5lgZz=^i+5CH{OTxq;p!-Ucv=)b508n= zzdZ{5M^VOg2853!e@PTRKaDakDx&D;hnO!@8HBwTio%8F{K`Q-O<|*%)*#?JB)$fI zLimF%0`V2>P1khDPxuB}bmF=icBN|t;`UTU!{kqNT|ZjTchw3wRTS*cQXWmp zqmNyK{KUWOIzhNi((eHQ;n^mExDNS)uCMs~9nE&iMXiYv9j-Qfh~7*Ka9o%U{CZ>( z=^65G6)@5hzw*(J=qn^WhRLtDPnZ451$wUJzZVUO-cl*(G9-Sg9Ix$qfqzyMYytE~ z^gUtx$4NfN$_1f5-d{?2y29vpBCnAhyYCV7n&dx4>a$1U5vKS>B?;z?|h zhe`fYoouq~*Cz3rq&INCg+JUT5FUw7$GFJuiQfoH-R_6DJQJ8E{Q)4qFzq1ENcLR5 zM95;fv|~@JK#&}CeJ00clYXL)tC7bg*jL*9hk~4Kho6vr&2qfDob#j|duX$T3*GT+ zlk~T6dnH7%*9e{mp*P82_n+&f9Xl!n-yevAl}UfAxLqK0|NM;{FO%}?d=_Ael3i@| zf?gJk7I1{u!u(;399O9v7p=ePdJ+cZ^wO`=1e)C|`Lu-D_s^&&J2r&P-$rTo7CB#Z zeZIl{a02W3O5>c#u5^7a`FDr$e-e5#vq{Wunkoq8|34WoJEE+Tq!m0;Y4yhYav z8J8NG1$?KRNI#KrAUy8vlks+ESiCKh{3})pK97ikd8I$>4D*NSP&mnBTPFxJWE?&u z`wjPp15%%6x1c{F`5%D2NN+u1cE2;qd|5B;TM_1mSyBAWC-J4y5A}JmK+4H#g&kxI3Ys#xORuY5C+L{|cc=ckd#p$SP zs&8CXTfZUivW80?jdhONS@})*O`Gbfiq=OZv096piZ(W&ZOwXzq?>(tv!iiSL1k@i zX?;zdb74dCWexK)dF8Szt&wV@vX{*kM{G6U`duAbcx82MsSrMIsiW3W<*cc%TUy^3 zE>na`E>Z-1pwmcoqR++FoP~|`%?*)8A6~<|2}cI{7*e5kFk9)Mv9dZ`#Sv2h)eA(gBrtI=1js~lxoV=~c*;v1+Xvw8ZD~qd&z_@P1TFyR_4;5w>l`dXdo@vpY zs<6He?VGtN(D;hF4K;PuVBXZc7M@tXdgI3O21jF4J^7|{Q~7!ev6lU3TFRG}JJvP; z-{@$-gs5+O>MX|R;xY@%Ih1fv*C`8RRzs8wU{_pHC8sD zzv7aW)}kU3_R=Z_`Dz_?LzrZ$?Fy|$7?#z#)Y({5x4N{(QAKiuPNZRXYvtXBwa&xb z!8EL_wa!{pdA*}>jic)N%Nj&~#WgiWMWsudmR!2593M+Li5;zdrd76Gk z*Nl+95oG0c^^I#QYs+ikJC(dMkS)ytkNFLkVT5ZPYuDDVcTic?u!#n|w8@%@X;r1q z%i_|iqN1gi@@3^qt#B?r$(K4aEvxDqH&ixO2RN5ID_4v5JT}ZG9uU##C|bKV7#o&4 zt&q>Trm=oQd2RjbnyPX~V`F_|1b@xt6;0Nh;=1*i`_=i4s}U0FoW+-bxTuJxTkcY4 zrL(ySLlL%G>ddRDZz^9|+2p{~r9i*z%B9ZPfYA?XimTAQwQ5aeW4W`jvc?HS5zW$N z%Ye@lWSH!j!1&)$xi%6#A3vYkr{tVXi5b(-L1}~|sbzp+)Dp6WvutK0Atpg)PZ@J~=^+G4QyG2sv5>RG|p-SrFW#x!QMN4xL zk>Ged5+iswZK!O(6l>!CqRXdGWyOkte6P&zS<*teOP!5{l}_ZH#qiF&{Mv>!m5ZC# z4vD&;BPS67I)^(|c42+BBXYbxrBQRLY8{o0MHp#jc`=*~AMkew&(E!FYN}aXN4Zdr z?fV&5SAn|xnw{@-HZHBLsUic?V9RGMhWJQXLxyU#E_67ll9~wGVC$jdEnP)Yt9^Z6 z21AlW@5S$RRFv$7TzCmn^G zDSCrQ4m3o7;SuR;OJ(W}Nln&ZZn~_IujDH0oCv)e!zCVOOY1ON2D=#A!btnk8fcVu?7R*c5(QguG&F5|nftn39fwYBKHDZj4zN=k-^IED3V z8!DYO*x4pt@__C)iB0uwQ|Q6miWG-p3Rrqd~8Do8c9pKJl`?WVapS7lRS&urWuQ}@I|Mx zA9Ff3I)|9TniEEmQ&R^YBCXO*m{Ay3ZI+ zbCJxWxt}_NhKOy=;j3m$pz``=Cm#V%L7@Vc(k4~+bhvb8=DLtWjFQUcx~esf#+sU% zRW+rTuEKOKt-+cud|Fwvm0{D4dG>tTE@Qh}-*{QG^RiVXHFf$qNl2g#Q}*{uc|!*M z{St75qm1w4HksBY^=w5^6=|xpMlL(T4>P_`7uLMhffE&#KkwjDy@yE08^iCVs=2yy zkz;L>17~Znf3U-aIAMw2X^0iWc5>F-VtrGGm_pMmkCsDAD(kKn8Ntfuj7%;YOP6L2 zb*Ld3W(d#ObUeTr(JD2UHX$|088g20X=m8FHH3XOU;gr-gFQm@ zX0L^p>aP~9R3E`Vc8X7yKc42FS@e0u&8`t=@r9Zk~`nA9$T7- zoQCw6!;h$j9u8TvujdETIOL;%y2(0A51UK)0eE9&qcxKpt5_bLp*!ezc3?z9&0V^H zED$*I{;tEyVT@AW5iC9Rj$m2wFlep>O*x(HRIaR9Z>8g%s#Fg^fyBx>aSP7IjgJAKc8rG30z%ORo+C9l`|6fatAhQl@Mel<|vMd8j-jhb=7QPVPSa|dRSOe zTu@k^nUNj%mYIWx<;y{woiUpgE?=HsP+V@!$PzT7Lcc4>AD1`c>2cgP#Ixi^ zM)#ZPW5hq)n&r4Sd>7A==y#(Dq89o^6m-S#=g~)U$jI9gPpYHa#<3hjbcBoNcu^~G zp*&UoR3DE=0sde7N3zjl3I=vBzlY112DtPbdctBP+YKCDRDYFH-}Aye+!2rAI8kq6 zU9#Q=#Q2&T+=(8+UY5A#Ni}#vE0(<>;cfTa5A&E<4`8|s_}(h(2zK8x0}7010}}W8 zujomQIQE5voipx*5n&_w;DrA77sAer?OaS;@OKmPc>^7PtVG}shT+*Ffqyd$-yr#D z@;L;Zj}GUB;p63va{c)V+~#KV?2G=Kg-`M)ynOV7cdS1bLHIm{UVok;9Irp06OPyK zvxnnPD*e{+9_4@XzC6)ipx_gu;4M+`_9%F>g1=hfpQhj|6ueErZ&2{d75r8OU!mY@ z6#NHVyw;-NA5-Y_6#NPWUplC9h;j=8THso);O`7z z@i#;Y9*+bCt~Cl?j}27Wpy2VST;STM;PGfz;A&Cu`m=FVxmCeW3Q#lFq2QYo`~wR9 zE(PDE;2jG7aRq;^g5Rm&XDayT6nu++Y5tz8;1luG6J4}!pgR516g(1K;7U{Q@=-KiXHoF- zQ8A9sQ}Ft#hAM0do_til>#@uB{6G^Z*wAtl+UA30w~-_*(*4#<~>z%?kc;1^=Le->KmBXWgj! zIR$^OLf@_6@rY#LdP%|S&u~-aK?UC!pl0k%1&>EI16PlN$0Lt{%ctP6L<(HT6ns(u z%h*W;KUu*uL(p%hDEN23cgFhTNM1`3f`*VcPjWy1^=9a&rkt>AqMzE!~=Q}DMc_>&5Lj)G^#p#RTR@bL8$FIDh675wE2{y7D| zNx^q3c#ndANx?5s@COzAQU(8}f?u!TdldXK1@BYvS19;n3jTHle^S9;soM+4|4cXg2xfu z&EW?Lrmca0Cx`DPn3f>^E)L&8FfBd&9UR_5@R^X=~xParkP2Y3bp&aCiy9Nd#*gzJy>}YWU3@E+Cke7=Fg#IRqyYeC!(< zeypdIQ%byDV6&z9Da>pO5uKu!!Hwj z0l{Vt?qm{Ox$V_$Lm6Ko^6hr{O)OexTR zki#hiQ;PFkH3x{7Lm{OHru&slngD z;VlGH==Zm9_y&S0wEG)4+(k?8CYY8YehY_}5L`yE z#^Fl{UO}*#!vzFiOEBZ`9D>UUKK2E-Kfx6Q_i%XS?=-fmV{p1&J}BWQW%(~&f}UUY z)_Kc;V9rskFAW|`cyXb5V5DcExxIAIeU;hOeM_u=7iQg_j-^OQ=4!hqW;#uEyL*0W zytUiz8gwqSdltsq-Cx<;H~Q>z=Y9QKjXBQ;SEJ-bzHI3-ac>RE@Z`9 zKg>gxQ$%V@O#{N(?b}FfJ@cN1DE9UVlZd!IXFe~=Qq4X$rV$YFm=o0b)kH18U6ghR z_gfOEJ;w-=U6h&#_o%>qg{c*w`&QsSQ*j>%+)=_&+M~AT&{*1Y-X|QTGQy1$xEqnm z2%LuCj-hq_UIX~9uOGG~bghu?4`@l?FNEy7h!N#v!i^Li{(^A&02~7MGs01>CAvq5 zWqZyGgwqGrE^zlF{}ah}-rKx0r4Z#(h$Es`2U-$UngDMnMwEyN_o*1o7dGJZQTGVk zBDKRL(cuKpP_qj}hmSp{0esi@(Md7t^`eJCa3iYUiP4;?cGxPIZ&f?27r1{Cj#dEF z_8NivJK-qH6Yhu9O?%Ektbz!9ocA_wiWH(o2w`t8OO5w^D7tTkXV>C7_)>2B^W3x%4?Gu{NEv&j5li~NY=CHf>`!>lz-Xo;wk*s@9 z5SNdrmpIArS41Hh?tr<$Fjp{aAcnN4A%04Q z>&>I=x9+96e9ekIzOv6r3;Re5>v*H1zAsP?3=H@Oj>rn3@1wq+Kt(V@_}_-Xf*|5n z`@A%W;xlpOoL+*=X7}X${TU5EO-27;IQsA9BI}>?pF=!F?|ba-*d%IGP7<{4)$Hx3 zUntrwlIzwWqN6T$Ev$#66=)FW~@zm|EgVs5bJnMeVLcH-L=B+qDs z<9mVkk>B|2(DMG+Cjc9$c?;Z_eX<8Eu?ivb75-JA5U|~|*kbqGzZE2Q_er~Fw%t9) z?ip!!UlMOWKn)or$ z>d7Xa*bc~TZ|yeQ-6q;}+1+o#Go|l6^(i$t>Z|^2a4-VD3qtrM3x4t6!LLm68!!3g zUzvYJ{<15I-Cr;L6QwgqX;E_A{M0lgxC6%r2TR=VOR0ZPzTt`O{S~BcuT3= zCGEdXqUP?SawNV2^i&MnwoiC%4r7bl$9^vo$VVj@&IIm_7pAfV*BwyRCkf_OYMzi0 zdj@h~!fn3+Qt)*j1#8lu?_(Nk`-C4$ZlvQ9Qsq(KLe6b-su}Fw2HMj~4P4y`cl{fX z-BXg9hS@(_E+yVfaIGa#TlZS7S&?6szasxyat4v{;h5*4gA%uY7p*Bv+(&#r!gxzO zsi})x{mx$()3QOXJz9Q=J+X^KB{Cz#8~6d*=g;MO@QsDm0KSd2xgG2-k#sLZ^L7vP zW_SHNp{<-EfTu`jc>!m6H%X0g{R$lX^T;;?D-tlgLo%c_EOq<^7!LeG`ZenPke^!L z^D*!L8c9w|Tq3^&E8QXSHa6$bRD0O0e!bzT=I zP7Wo$0`;(hFNTO|q6L;+13kg7kNZA7M&pEu$gi=Ci}g+7k@t}AN$!z{e7i|ph*myW)|~C(zU%6~`MLnYh{cI%}HRxyievW1HFW>?Hi7(7qq8jy<`^cbKWCjl?p5PQ=X0PXF zf8TdHc_{G?+FCLDuYKQqMBS=ppC--yYqX@S`=2B|hSth$v~OU5$Poj{OPXIDc!O;I zx1j$czvgto!FTWB}9rv?Cz7!hZ z=lNIVU-^4FouV*U?EZ%0^(Wv<2|(dEKXoIvAD#sk`vJ^<)N$9*O4)o42;%9M3|58F}fb@9PhVkxns&wB;d+2Ke<&X5Tv@)#brzGpaS;3$i+% z^WPa-ZSg&f>RniPLKeP`yoF2s1D-q)bT=wN*I0Z2s}5WxZeN1yS#G?&M5u@0{BoWz z?FaV5n-Eft`0gR&A-6!GpC`D^BP^h_Kau)<*(4cd(S0TE&wV2}X_IE3+k_QzZE9jd z+cfIJ^|=!Ziwt|~es~nBJ?l^U-^0cy=ub+2Q$zZD2V^C1McLop0p4G+dw+@hTi^X$ z@1nmFH_~51+qI+|>aWC|8`9slX#G9SndsxYGqk^DA^n{*w7=MB{S`**FF&NexxBxC zKKnnAvDYOgpBVvwp5BfTjy>3nT8bJwgnB;u##~1C4BP* zZTbEI{{FcDI}l*(MDJV`O8Y9`BN6SBM7x`V%Tg2V?KdX+ZYSkHJpYd|2z$GZZqU)6 z0xk1lATIfDfCH`S2*3kB;_JU)>rB1%Ke(7W)3bGS0^0j7;A+}K?m3Ps5WrsMCDv<4 zeS^Ff`$vH)ai8#gC{d3AH88NMdcc-$3bFvew_%XA=!TlkOEK=lG~+L#(pmrQ^;&5d|yhAX|z6ZJw?+9@k8Sg;C*y2 z;s=i~HX8JUK4Ps?qz7Y!h%h%&b(NF4LO^*QYS=~LRz{3PyfYCo+{PfAD*|kI6w3|i z438z~O`f#zw4($6EjvEI+0$hFa{=*NyA6Ysm2 zIs*>D>_9ON01}k$O5BK532k@l-Icg63Yn($d`_t2vWE0`Uxfbt_I_x85Bmw$!vgQG zK3ac8L;8D>x7No;VT$6b)MGVuMWaIe`PP>k(or=H?CfAiq4DR-kofZ_%Avk9Ik!EY zBCNBT;fFWIyG_1(4%3EY1su3670&&3LfZh%bWb4&_*vr4N3qUYMRPhAiuV6S#;3=D zX3m?{_{67R!rhM|Dp9zMgSh_tc!;_&vBZ7c_dZt~0@OItDnb?^U)7PdglwHx%K2WS z@=dt=W^yn0`@U!4(*AV-!sFP$tvbJ3b$*mObY!iL>;W>l$qVf5H@mbeFf%ZUb~ zG;`nOk*i3ezTqX0Z+&qaLfUuf$3`&gZ7d?>=;mzl(<8iOpBq zuZ_@tYFPVXy?upfPxX?w|AGkZ4Pos^>g_er{)w(&`+q})L*z%jD*vCknjwEr|KK1$ z53svW&?=J-1WJGtoBns9$ZJ;E-Mjq{0H^Cq?#=D)&+P7h(3$v<#V%H$Sm|b?lYCEh zDs7m2zw6@~^Bsb~o@PYF1HQ*3s#92%Y`KoQMo6SnaD8v=YaR({%(N)m+{I9y9;`y)d-Y0qZG2M*wXanL^dSi)`Hz+qn7 z-Q!bzN$-))D99~}U*ztwd&Z|0yZ03v4iwLLbCKaA=LoyUSnM8AJlpHkO5D5o-s7m< z4KFBh$E5oH-6K_A;)y}Szh2fpKQ?~RjQ2}szvmoV;y!>lM$zd?kQKYT7vZqZ^|lk~ z=#bBZ!Irq!LXc*#y2%VTN(@=sw?RtAUO+Iw3g*Jk2=Fh!5ahWVKMMtzY@fY9;r0t? zY_89n|Bh{Uk#(@h`fvY5MB8DXeZqM?f)DIUdwxu#gtnW&%0A;O>*plLjimI}uMEf} z_I7(>^OS_&7P0)T^KnEqcluvD5#cYq25Kb-=lf{LkAtnh+1>x5Gmo1Np|$(Pf!Fo< zhj{ZNG$K}LwWlLr-=-D2lT!2d&reEZ1HZzd=H7FWb`sE9weEazzDR36Vc^fPeSL^- z(_@GE5;S3VKc6Oo_ZnrhS@aSnbRUhw6WfN)_ZL0DFt`In9i+7VML$9oFY2Q2C?Dq~ zogN-2+9`3*@zU*jj(B@wE5M_~Q-OmH8wT;B-R+_~|8$b&D{=pXi{rjCmX`-y%TYzm zixK60cEcgx9i$RZ?Ac&q{k+7pMl1Fdq1`VkxZs;jU>p1GkD(%uEzRzZ1NTJK_?C)h z{Z!L_V`+(JBOKa>)g1LtPZ|_^7W(XZzn<8i6X5|@gX|t(HwM491ix<8zqWQKf~Z3W z54axCzq(y~ES}h_h-Yi}8ua)aDj{f*z}SG{@_S-W6Bxu**Ba3HZsao{w(~)fyNUK2 zYYw;`M@=G>BGLvY1hW>RAu-&poz%4A-NC^{SR=Mwg;gajrR#wJ1MuEW%Y^p;lSN}&z`)}-==6TM@ z?IMVJ`p3JRuc+i}n>T($+W~%V;EBCBD1p?}1p=dJ>TH2gH01`15C!m2%e9k}^B=+G zkNq7{!srUMFN;D3ncQ=qnag}5I@`v*jvNYTavR;RP($|NK8h-hS4meDjiCQE)J@Q1 zWp89@-+k|L1qij^cm?@ZI)x2R$DQ0am3!!YTuB~24`1Lr6VD|+-}JY`75IY(NP*%! z*~R&aMdOVe{}XZ!&x)6jN|VGfv&WSNK@wa)qG{glc=H-@(wtu|?qE>z$GtR|at^w> zwmhmSPlKiTlRNtT3F){PM>{5;XM@)ISA*|9jD1D~z1w{w5+b)%`|>mNbFB->!X zd2jwuqy71Dh#b21oI5DIClAW=X8@q}SnIsU1^t6V=sy8Zd;4$5KYcOOrWfDzbLiH2 z4b(1l-JEduyI53{hrU6>dKEyB-p1+A36*IvNYKYc)JGS!naA6(;L`RjzK;*U{4_Ec zgai12Tf^u^VSv6B|ALdb_rt|#2E(TwyB|K)euIxY)7*Iv5hx-yr6=|+P7>93I6Wa`5{a z@l&yAnL|QR`+d&c^b7({ese~kV%KHV2S!~(qViE6vU{%d`DSpNAM(A2(wBtRs{FL^ z@!ie`haJyHA+;2LFuwMjWMV%a!4^w6I%|h?L$)=6GyD%ozV@8!sSR%RbCG^Znr_ee zA>l5UxO)jF&Y`aoHt}5y2?st8ipS&Gw}bNjtlhmKeskO>&Sl&U~jf4i{Cp5=_!@1OHc z>;^7O3mP-NF8_>ge{cP;%Mj+rFH4$c>2ZE3iuN!Bk{i*jeQ@Wk(fUTZObT3NzPxARW?h` z(}_r#Lx*9AfyaFBzK-IdfgcAwj_+4Lh5r7Ii6qt|5gvr|9Tt+=-S1&LW_N$!bCP8} zHztaQp3q#t1Fb&fTe(h8J>;>06AjFN-yg68_pJhZv7VCp=G#(e zJeoX*Up=)vAHOj?6NJihKFRYBu$S`i^MG#y=KOV>oJrDF~usL-(5mwl7 z`=OL3C$>)|^ld+(HJ^t#q0UbCC-U&%g!MJ<;usTL(U%S-w`@+m#hB3cDs_Te@OG@r z|4!erp~b-oZm7w70lZsVNVlcF^NBz9w(-Q@Mh|9)fxPc~74t!|{s$WAtRLj83lWx} zvX3}5iPGSEk?if>g9V3QYcaaWD z7rEcdzrwSO_`FFYgW%>*@)OLr(bF=%Lw_9{Tz)lA`4Bje@)KM?B$<)+fekr>vB5cx z`rd>J{F8ai#(JPE)s8s$;n`?`P*?UhQh&}lXA$_;E_#lcZWtAzCptH`(|ONb%9;6Q z4{eM1eIc~|7OnlY;0$LL=RG?AS0w+hiBWsb{xb#tt0e!!e?cJJPWc(~<&k_TSi=!a zAq)-fLxWw{Na|Uzlz%OC)1lWlQ~e5Ae-`R>X%pJKv^)%+^1plsByL+wk-Bx>D*%|Y zVi%y`12CCXhdeUNmw|cdp9U~Ep4NE}@HSIJ+kDH{pw3jA?@N;YD6K&C2apHe4Ss#t z_W_1V4rr%!Gk+4sOyWX&EzsUT|3EKyLew^YK-xHuuss8>1h9A{;;3&tVP6*a6O~{g zBZVE)4D0kGks`*`7?X$?Z<3d|_fcp#;46G35E_osjtDNExTxw|-}!_WVTJA#lvI5} zRY;v>m;z%^Ng$>@wxgKPa>5@9On5o3F+(<|meQ^A83ASXuxb z%Bgq;2`Oj!?erbfz^ry-gVA|hQP~#}dmM$!N&Z$~l79%jNzQ*pJDu|)&bbKl6BGGe zWs-X#30Vt~2vb$o1Sax!J}vk}#xxJF?dC!(ga2Z)kC|NT_Id80iCc;pZNWJ5z&Oq4 zhe#1;@iipWkXbD91o?{@p2YtMiO-uuK1T3)@K%E5yxx`w9-wXePtteJ;xh<*Yu6^O zvm?H>FVpbEyiRD_NRA(zDP6bI+}=iN0*4fg#(yQ}PU~gQ=v46g7S9Clx%1|XG;Ai8N>ZDz zfk^WK2B)7g6Cd|gVDNHQ?ZaI{jPV815@y%uREEsaT9O?UUeqXabYJs9!rn&M{j?qr zp7r7|Ur*k2MjJ8}DKH~>{|y3)sW<^M3^QI##phlMOvOW(ioVAQ7E|yLpO?P-bqdUJ zx)FSvWCSNoa8<%>IHN^XANguQSv(uqOM#WC|ETZEAV>H+<<< z;2OjeKFHuUNR63>EejrpvRC0Cw=J~=sNyOlXR#vTL4YqnXf5$4Vv5Hzu(~fvHS@X1 zbNN4MX`g0&&37$)3Kl3wHaC&8%XvPFzI%AeMR&?uSQJwCsI-|4N(j^h zW@`4I2M0^9nD72_K2`}9h?7|CehX7DZ9dLve+W6n3?#!Sd6Yj8b?aWTUw9ss^XsVz zWVl1VR6f+4j}i!cYu9$PpuBnd0SK(;&F#wEn%_oq>jjWe>-}i$-waPhK0}80+yo}kHoM&=Qi07$xHqGWIr76ewi=RK;Qtut{t*Ajt3%rmXD_i5}f6` zS}7l{CuHlqKLQw%53i;=OumbKQ!r!vmjYDt;a1)zY!;p)+t_>`&{)m_7;zRp?E4o) z51ng_#D!KGpcU>NlnXx+nn?m~pm*S}1FwiU6P$0KiTQREDKOf6E2jz17t5bvItAvN zeP3|C@m29GLV$v7L(XpYP1i~IN{=SY8Il(kcnL1rK#IBh^g6qdr zMKV*>dRe9Msurrk@c^&7hF8&|YBnY?o{Ua#RiK{cJZ9)0Q(Hhs#;%Y#wj{NIl6fi6 zXb-y}dz1JU`frWTOYD%xm(%P<2L6KbI3fgJuIKq~A`|{G^=>rA9RC${ggL$(uK#QL zuFvsrjK|_pUQOs(PvBgT0x&H>3=8buK>Nj36qx2m>;L!<|7)( z_>zbAv1qVspQQeojB+G(LR%j#g@f_s>RTwj(4Ahymofm%xuRA6l^#8Ao-QNBWxj4G zKCmxoooC^+VfO>{b#|%mUZSRTb^@K>1%u05!LMDuo47vto>MNicaW@5;TaedCGvqD z-9IVH9H#L;2b~POE#ph@?<44-qQvtn#vS+JlNh^_(-9KrfcApgn>AKE`yFQ*Jt7oG z`x6ttLpJ+pg6kx;Y~2%&`sO3Wp3YR_5x9ZA8=@9_+ER}ZlI|^0lX;lYi#$)I_5evO z-lP)jXK?o19zp(V!ub<*fd5MHCsr>>R^5_ShGZr8Yl^%+YOcylO#=MJNq%(GnM9+Y zOQM|@g%6 zn1`)gIvug5V>_3Im0aozF&LrswNDbpV)clel2k%TvjpGbeP@Fc{wgdEhj4j&Fdxt^ zjjt$qBj49AP<8WrgiqZS4?Y6lFYtdRyvF~0RN!9`_@@bek4*Vp(V3WeL=k$hr%t&NFY{4I1-peI6Q2 z0s?;IzXs~;2-=6%CyU&Fqb+*A`;9A>y1&BT*||TKmVf=B-m7nD=5=Y zup(Yc1zlh&>#h&8@9(S0?KBH++G;paj{L{!|YYZv61by zMAmV8dFGkc!zOn<2b+ZK2mN0GHEeq*@$dW0p99=Inv>~VP0SsdMI!R?`EwhhO%!{0 zCTzm&w|?$F4knbB@f=Po{nC-R0`JDdn;PcKQC~f%)iu}FYIXHajlThq;it!Inw#*4 zE9Lwb$w8k{)zHA|nkv^e;03qWvqE|)rbe&)bX3pL0xua#o1wALm(WU9b2Pl*w$iC{ zn4{qb-r2@YH&|swRwhB2L8zvBBPT*dgR`l;s?Mo{A@7rA*9%tFc;7~_G2S;*qmwTT zyuXA6ztWpU*lPNLb&v+zaSZ?Q`KBN;bUgfxuPj6cfl`>$dw0V+6ExBep&T^_UgKNk zY|2!01NHdh|de-PzRh|L;SHnK@=4q7i>6*Eweyw9>Rn^SO zy2`5hy6VcTnU1>kO*5UVtQmP3mYMjq^_dNuoNMaqvNCdKLO90-4S3rt-ed}zzz??1 ztf{N2ZLW5xD0*QVZ_dA<`zm^ITgICIEByw3Kz?S}TVeG6L!B2MuU!PCXqz{a_mhIc zUmi1O&h=N+U0+wfp$@MG(-{84=4QkBpC zQ)$gYqeEamrmak0n_f3Vqfiw>u7nDOSd|rH7Evi(!#h>$@P_aih!zN;O@a{#jVl;} zqVUm~$iOi}LzGoG1lv=5jKX6WvJ8vQcgXTzX|HHTkL$|Fgnmx--j~SlY!Pkge`}<7 zs|2gr&{wP|mDE1s%Tt0hcxlKf-n1cT=@lodDXr6qPJ68Vp(dmqkEfWG}F* z70Pb&;oY65sT4(^>)ZvbS55T0-uUJpzC$?Cc;y^kw}$!m@gh80|jLN9&Pk7WI{dvg=XYu z!)*5bC)6;elI{Yh2yu%0o+=uKC~`Q&x(0_c<;oi>@ggF`(C_2l!=jZzh@0coVLW7z z!{x6IbU1BJJ>PWeoqAeQfc^h;0uS5G^(-*VS)q95_h|8dX{K|Fv(({^JypD&t-NV1 z-ayy14le*}tSeuYm08|YTfYHsJ6^-%TBsWhD;$ap9r^!dK%3Z@A(d}%{Kz4AD^k0O z;bwo>^*qOq3d1rs4mXrcfwvw7tMBG`+|&!Zj&M9~7KUA$aX;C_=o~lnqF=O0VPn|? zBLv7Zt6TE>nByf8=yW{Y`W|Nv%Gn|p zN-@*o1Mh>08K-<7t9%EIE&e{SQyxpnU&b zdC@zUw*uu>_=_qpxzGQfQqCp|cx9+$*gu!gEQQ9$iWQ$EG|4VP_4(yys3?O$Oc`n3MZ zMoQmlrn24fteaKzChftB1IvxF*J<&Scl+++1>MJYr}RI>IQ=y5v>4WXSK^UaRsjOr zH1F$QGd5;o|2rm>EDiWBq$I+FZp_4mmV)krmfa~kl5XYlp`DaoV~B#+Cg+^UqSevVf@|4`%-pv693U!U%#b+>id6!BoBEDJ5zZ-{f-zKZ(5%> z75zM=xvDDxvTX}{3fOMr@oBF91nO^FVbXNopQ|&K_up4G9sNC}6?RVJ{q=b@@PA5! zzBvCQy~hh$w(L%Ebg}$ib5O80Q_|%{9ACDM ze*f2Q>b!h5!6yoP3VI5TO)K=S%NO(uh<=;VdxGmPIG)erfsc2UokQbkGnSnod&a|l zXx^9NDqBE!wEKk0c$aszfXl8RzN1~8=V&O~rlCI8c2}aSa{rS>@0duw#JFdStz^H#vOV11rtN(+kL-QkJ+S+B4ff;nVVZZU zh1<`-x*-Vc_Y+=$RJM3cSdN+1Xhxu82 z?+P(5;|g{buwL|^G?n*1&09H^;!XeL5q4q#e}^1TlE0I@X&6t>dFbD?qL=FJn7`cx z$BZe)q7N|8oam-`^F`d4Y~*~vPs9%c>mvU{y@($>5q}C=Qc%y^^}iF#=}B+c z9)Yfq#}OUxN}US!tPOFE(=R+$*pj)$nBuCQO7gWCQ&VU&AJ6(P2Yi=iN*YTI?1g3d ze0(we4$=ekF?I#vH|*Ym2x3X;^XBt$2IgJTDmiY$c+jQwuP{_l>GD>hKtD;hasNEA z2lKz6JH?fh&+*K=j`thmb(H}@{Zj+@yXa4s!VK-i4qb3%8}v{9mS*%eh7vQg)wQTw1g_e>=|w{PJL@T<1_x!>q=eTO_2&PU2~ zU<+pe{RiZtV++LnQ_5o_J}00&NC5xQ^(5qiJap(1e=2!4koI@O|H~GX62Ay~qFY~4 z?A$``-jN|J{NF4r|=`i z{@uhcf*)BqpD6Og(d@$t0_zEVo*d+ShRTz}`Gm<6uz%T*`PRev47D5be$bC#|6VNM zrhSLqj&VLx9wXwc0r|YJ>|^{XHS#YOpS_S@nSb+$zcJ}RK9$qFFCrcyFB_Ql??L2k z8UGOPC@xOK`gu1(*fffB2mm&HzF|ITXpeQ+cgMA${VPF__~zYLiFSefQkXO(u2rBu z4eJUsN@0H^t>35_#kX5UeCyuF;~V@D@*{sG4VfPmz^9!k>=yGsOwW+thWa*R_dY$Y zNj{ezM%cNT;YBQc?eJP=y8qn3C#cKdMc%M z5%k03+U>iIBmu?$5V?jSPXqC#`A_*9a_R9cqMk56Nd7ck4r9`S7Qmwy9@_)=q4+84 zb-xVwWB0zvpd){rN+t3utqZU|qC9+8;<>v2O=T@Gxaxlv&<)G~j6DBWYTWfBKs@xi!t*NUvw@p`H`dQZ=wFRL>o8vA>9XD2-b3=M zlq(L~hb>tDEBQmnC*?7Ei)`RKx-jWDpYPB!ug7{!13v8lR*yZfUqIh%KljsVSoe^A zu^vq&I?V5uF9%InPu^Ng`&sOtZCGFO{c|_wpT2(%tk;qj(1f%Xc8YZ)_8kCOTVI)2 zr$LTH;*0e!0toWAw}{u9y!qfy>%A9~c$u`ng37|Ol@)v+<)!@_t)~rKt^>!b`ZBS9 zElk?N_h0babySb}f@ZXzn=Z;S2bGuyZK5o5ar}vbZiK(yl>0iXIgGew<>P)RhOdt? zZgvpkHg!JQ#ca$j&J%b%Z22qjCydmLX-CLQ8EZk=GYEXYfKOS*6|gUFfU1dTrGuO zt&8;6yTV9*o46y%KzvK%c4Pl^{5ayz)Sa;{_hO%%U%~a9Q~*8StM#ui+Ngi{gE-f~ z{s}*6fqky=Yw$^<^N$u|Bl2J1yd^+~0At%x)`a%s;Kz^g{f#Nfl#O=w-gkr?3*o(V zep1*e_HX@@#r{j|-|p93Nqr!vedpM3$nP+oidgUk982dTCw621j{OVa#eQe(NWv40 zq1$$JzK(Iyc|pGzm$x6|0$+JPf%VU^!ed*YzdfLf(F&9LyM&+hK_BWmB=sTh$7wiE4iz3UyRc3D(mO*)|j+U)L*-o;B;z#ybtG3o&6eM>?>pPQO3t^ z-%D`%G_L{cK%7^2`z=%-`|e&U(_LkUZ3G`dWBf~ry>=hL=`Qc#Qo_f+yN}9rS7&bp zhqv#h67BjMIDGA1D$`H!=DhzFj^DnQO7Q9L;PAV9sigk8IDG9sDyg5H1k?Em$&daB zPA@F`Y!*s8`t7EA=z&V=_aKL_-Ag6p>hB@=NH0!r5I-?qf~ntQgopp|_=R)F3czU? zH{lUiOr-?ld~+F#SL2+)oZm&mRKVdJLSC9wfq~F6WC`aq|3FzP3Yo>af z|8RT7zPp>?bl77mN}M|h`@~)=>;m~MM33{QV_g5+guJC|fAM@3%wLs|SIu9QVfo8Y!SmN-lNsYf{#uFnqvkIo;@<;^ z^IN(^{%S>hdw|Z}h%fTj)IeSm`AeL~)B3~sjFHa2K{rW5eCPb3f8w8J@>ay7Kg4xQ z3-O)ieUJ0$^IAG$Te`F(wE9KjLuG2}1y!Y)e}Z{K5qUb>Lytd#`Q`7u<0{BOm&iTF+@zT0+qpS6)b^mDQu z-fYg-mt&flB;H0P5v6&fKvG>$2dgRe`l50m346XPKLBLqzg z^ds%C!>d95DVpAo$=gEuBs++4c*QvM{N+lbFvGT)`21D!SCZ6+&Ns<`bGZcMFTo%8 zU$6q8^$+3i)l5@9MZY-T>$D*LP0%o3trQ1gZ@>@m^E{rvI{QNNR~dy9vip3VpQInq zdAbdFoTtwxzrs4^<2;d%QVHG`$R9~X8r8d!rc;UY@*)d|%cfJg&DB|C<8bG6DzUCD zDkWIYUs&f7jCS)Y2#@}$1l{}w4trCn+~!IuYTl1E2%+s*^Rfy)?J}oG5KVurf zkRRCx`zPql47d&U(oiB_3Hv~9f=O;FNp1_lh2BrO9`yVL`3b)b<8MKU@e6w)&WrJv zNxO9lySX}ry@L6xumbI{Kbb})*|C9OMGrLZaGopdsOuq+zusY`R1g2c_|^Os7`F}e zfpKG^m^7v(Q~{J7u8>E!sK4>Mr$UkxSsFXsy@)`;{a2aDU&x;r=a{o8uH$Oa?;F#;ITm?g zM<>s3xPOGzv(be7p+R0&+WU}!^p&`y>~ap{zEBs%MZmcSEzel|$inHe4hhpS7=zbsaEbf=X zFcnzek7nJIv>j!9o|Vqpg6#+LM`4oKU+pOS0k7}FI+^Yx7k0khLgRONV`v@JhJ8l| z;az3)!&+ zTee{LbUcM^zv4eQzU+03;~Wd?tOh*EVl116eqK4vOS?X(Y`4;c5)dQnjO9lU*9K?7DW zje+*PvOm4O&Mz7taelBn1^0EiznQ%6f$x|}y1sUJTX{dGYu`Xgc^UDjdnD%1Se};` z9)kk;{YS#zN%@(L!1%H6;P=^#W&J!KLmnIC#=7`bl(Zid%Gzp#G!RO~awIv)A1 zR|7qj7FnVlhf8HyLFEzI9&`;7-y+KnS$4^Cr!2c=d5}uPPu2?$M*AL#KSrglZ>;}$ zzQee>`F-;#LJwG%V7)@;?>PVO>E-)3vb!;vYu$e#2hounW|Um-6t~0WLH`)Raq6Gw zhUh)KKj@+5TkKo(dAg8}KXCrIqw@#kH&`#O<9@6Av+j?DWqn;@eM;vn+@E>8!};It z-s8rU`=p*Q4~fr2qxTuk2j>=uf7;pD54H>_{qErLf1I|k0lNpR-}U%wD!aFf!A}d5 zQo5->kw@-IM7%vn@fLAb!#FXIrlNrTaePv6jPD=1j(|VT59w+#cE0{K&PQg8`F_`= z{zv$^SsKY9`qyJk?L{H}{jOtqu?|Kg1HACxqo%Q^Um7-l2sw95Q zjQ(2%zY~bx6#vjpqk6?{!+@rX#+}7DA;$L~pH-HiUOhpu5Z^!8Y z`qMxUI?Y0KJ^#s;Bfe6*G&!CQlo8KGLG$0bAT!0mA(u^JE)T0DdiA1b+rvnB^*s_f zm%Ko^!rS9_OG)2Du7hy`MZbMT7tvtffCnqYl_<*@qNKSMa^=xoCtS~pA|PX!IQrjO z4r%|PY~Q7{r@t8pZ%=RD#Wke;og^%-ouc6V(}FL&J^dIUt|9F&lz%j!wl_y;f0Iz& zkoF(S_T9uC7nx%$KH>d8100DEZiA<26@yFHKYfoh{yH>{)P6h><7yD&_FTO`=Z<{8Fx+P<350gx^33TB9N6|P^dv=~6QQKnylo;NAvD9C<{M{DO zzD3SM>K`UOE4)3mi^WI(J7s(gxgM90Uh4KBnW>%bXCaLMY&~CK^ipjXE^ByO6%MvD z80@GhrKpUn1f`9C8xi_<+8Rd&{+n`X;4A-f#wY_jo|LiP(a3)UBR9h`D#KaXxY~i= zHCS7o1*o#Ry3x_p#D8l+%gvt22D59LYF5@dv<&=kL*piGxPPaXHY$Vv9j2DioRyi& z&Kz5w%`Pstq+i5lBwH_H=bf2(5ldc$Pjc2~cHUXp7qOYS_-}G@4l0v#>AzX{?~{>v zqh^e{aa2a5W3`sifIrgKrZ-$@&AVtWV((VN71LK;Xq`2VnOBZu3mPxX%^Js!8*;O? z>5bJFW@TSAchsm+wbges!)=CJrZ;9<)3s|B8Yf?{V(vngoQY2&{xiPJ655%ToYl@) zau$#ivw_SqB(^gvI6eZrr&l>EYpOFg z0@qxNUv0@~+Gv=Toqn@{jWv#*%qFC#ute)+%$#!u8|Pfi8t@|)4H>H)nQYm}=?&KO zWM){*zB1bIU0c~$sTCAh%B^{GM^&(~x!16S&CE1?7Fc|0%FLQOs%pKnGQ(M)vAVW? zWo7LogVkzDUt=)NV}%=PsvQ@z!fLBEeF1CDo;wOA&1lMGXQk({nO5t6u-I`~Qz8E* zHp0-)W>(MA@a0dev^p2s`kJL=&Y5AD=2$nwFg|mG;S5WmVOp(o5}THl-e+27oja<& zu^@)kGsD?zMnmQuhOxN?Yym{hUSmj_wZ?E(?)`={vc?z;6Ipf^4JkRZ&_In-*{DLp zL;w@BI@s8p4mRo$%tvO5H=LI}=RtNx)+S?2m2qSegThi6t77L?R!?H%b6#b}M_F?A z9~qmLlRkr;HfT6EH~mkBbZd^2jnBQ=kkEk|Qp@QbY+T-97SjQ95iXCYJJ`9i(`Oo| zTJyd!nJzUND`MGCV+}Ldlod(nGKsOxF9y2%tD`+v6<)7esg^8 zdXT)=grmv(w$o5X%zYWNf= zA?BP}>3=b0SJqX3Zo;ikLlV2c0rs>_o!*dF&K}8`I|?3^(Og$!W--^rOwFB>&y36B zCuFaV8TC_B@~l=UY<9Yp8B1f%oP8aen>lw>I!m=$E?~yb**R8A2Ag8dVB_&IoiQSA z{0K8J6P)v6r)Fh-W=PFkksCAWH|z}SVK&QZnX^A8D?2+~V|mjXa}UMjWanV`2|Y2) zc*Hb0J2hrv=6f+0XIkez3O(##BN~{om5qFYO|(A2l5?M6v2b$GKf!3~OlPN|CI>ay zPcUOLzML^VbI#Tgf_)2A{+rfHNh=FFU4mX?|IX1w9TSmQ-3v%#8kIA*#vH`6dF z`*2LEb=CvyyjgQvcN-J{bovu}u*nVIu?LfnW^v8Nvzo$&93_|)w5tJwLC zS!c89IdG;V>qn<2#G6MZ=fs=YIP1jI5^@t_OlQO=XKqLsjUF=0#^kKen9+D{oT=2D zY(0NW?99x(^UN_5v(6hcayBy_9FdTW7Ei*-Qp}^rS+9?q@W0yo`uI4C`|jD@+q=`- z?pZIkWJ@|3`9;QhxxM$ZO|UGTEMZwvE!h}q`+Sy8@|BbBxH}n{rU}8s7%+tL;!x6p z35nx}Cc#ewuG6NC6NpO)6gMw~wxl6VVml;l9RiewfS&K{&dPUtd$!4w&-2fd`F!qv zv)`Th&2MIY^PAty-p(n_j@CrB*};#M&3AWl^}Zj;3xY|`>1lEhE=t_u1-# zRgR@}3_HEgNmi-LR{jr?{5^+_nmx}8gjAKZB*vy}OO~b=Nev!Z;Qvgnc72PpzFuzq zvMj$|F7sO*6|3esm9La@)xK6z`ooGUG%nzk3@-KP9uOW@z8%t}wE0M{w zwxoLe#?of*_GPmCoeD0bL_(Ei^Of~wD-`9*3hq(f>Ai}x@`-Zk0iiB%73R5#dDb=V z!1jfx`Bj|EAFfyc>I@u^MEP2dbA}E`!U~V;TEuYa@wm4CY@YmY6*lq(UcMd8tSASh z+IElofMkEZg5SY)c-;T1(%RrM@jzfWdM@bt*4h=bK7X$|# z{H@mc!G6Bllj1tOtG*$Nr2}MPXuoVbQ{`16*q?0mt$Mvq{*go$_@3k?abBbA{gN72 z9K$mXCH#?8y29;pm0gV^fX+x|vIj?z|6H=ktI0B_vYIS+y6Kk(U()SPp!1zR3iwwO zS(fCLm;hzD-4~Q)d970*n=SI2+(P*(CntB4dZ+u%Ml~Wg%AC{NO)PS=+=Oinxdo(| zm)k(vc-bx2H+st|I^2G@T!!zr7V*N5D`Z8sHoCJ?saq~ps-14!6W**SamzH?H9xrA zQSYxOysf-8_}>IP;7hn=i_`0t%bh+N^B`91QIKAd#d@zIS0Pezyr#+(CX2ki^B06} z=j4+$7V%4sr7uX8?jMxc&Q);zjq=|u5Nniy#=53OoW6G? zQRtFNn=$q<6m2ot={W}3ya9aIc#lbPYlW!Zg0C~s&@t}2iYu4DFIno{-5HhtKRFb-9Gw z8O~Z2yn=|WQu%SNI@CqDf0rbeWO=bt-a&Zr%9`fT_W!s-?rjzwUaM1nLgEMAtxdt5 z#MFm$_f6?4Me)BSbp-ImRe5i(mp{!_NpadjYcWmzheSHOUc{W9MH0+mC$KXT zoDdrMcSQV%bojU5S78I#gS-QL6Nuv)Gzj zEYFveztzB3m&oxttF5xcdZ@8E6uOyk4`RqXUSVnVx*xBo^u$`Er(4@YAqRK0;&Oee zobVye@^E>L2TwHCmCN5KC*m@x#(O0eBhQoz)!swpHNGn`U$w~}St3@po?0r08iWp? zn`;%tP=nl8Zt1}BXuNf)+ZB?fx63UJ$ob$RUi@&md_A$$xW6UauO}@YxVY`X)!StG z#$`nK`tkRBO|)Z-$>*_{3q z>@@Y1JKSDGs{>zLL!z@Nbn`Jj$bGPS@)dHrRa5ipd4vhgMj=v|Rq{MbMxIN#*RFz|^p}g4W1$isvz% zOmH>zp{pwF4>#I=;;_7HDcQ@3%SG!jhUAOH>hve8to)h^r~eC!ElWNAH5HnGAEWji zUj0JPdkgD*4^%8o?%CuM-jW3;C)Tx<{Gyy+ORf%hSFf!vm7hiX2Z_9`zC?PsywMXR z;wE94SNS<7AF5wrT@LThvy7M1gcF`??eKeEtY2x%5bJu;>CO-fU*CpRuBE}vPs6XD zhS<}@BBa|E`fnA4CA{W{Mg_6rdld@;&sbJ?-48p+Zr*YuU+cc3qQmdMqk>-+3S|UD zKP2(OefB!fhoq&R$SeGno>j?`6&~f8>LrQ~%YwEWY8Uulw#tjG;(TwlSRS?Wt4kaF zzqG=%wqx`iB`bZaK3ZBE@Rcrdmszh=f@QF^fg_U8=Ji&&`#snWc(r6%K>3k{YcH*M z5epB}5V+Qg1=V-RLhlAU|H=9Mzmdg(Rc^2FQteV#>&@H>#dW`6`JX2KbMvk9s+R`b z*yntY$XMxdSBZ^*mrCkfNv^^FsWRKQSCoCf)b<#$FZJ|!BJRLnl?j=J!VaNycZcxA z!m^JCxIZLim88-0(u!)Ovv#5Hm&yRrdB5ke+v@DMLu)V^R=7gfk@*-GjrfRL7nQwT z=Ja1jxVpe~#QI_x`Qf~Zf3gS%t1ynX-(F>PD1RebU*+eIf7R7I9{Q$#Elf%3F0Q8J z7Gmq+`BN_Y-&krevsso{THNk^CHz|!OAC>2;5jTNmy@z1)pem8_>xmAssjDIGq4%n z!Qz^Rz<1`i_>|40!{<5?uwCs_wog^t8$)4kq5njnWIb0KvT)pSQlH|jHNj?L87*1i z_VjYXRTzc8B33Mo!TMLE&igA;GF)2~*on2oX5c=GZzgsA%|!m<0%G~YnyY>O?VD?C zPT$=;!8rR_u(X$JQ52PZD5V5?xuuGy8D7#yEu5&>+T7kse|2!LMX2;I3_fnDsON-T zEfw~K{3E>ZBIb_3?WC;aR&P~+uXpgQp-*s?!P^Bn!dvJ0n@B_O)3tT}&k0U^I6*0;6sF)M zBLAlaYcE^X_yk|_abamtIgKUX&xkKbJ!|eovM@E?FfYh+9YOb5ICE>UqB%?I@R6^( zk~Ay9vluLA30D(3OB!*}th8c}v4@m=k<@t4lG2@mFGv@f;1o)^6*&BZ9ZnzRw2CzN z!3lBsK?f&Hi&y)7+wq*GMmbB`756#P?8dTs<H zTIX+vK0dh!1z@8@e)>b)a<^xfU2;`a`afMEFYXYkl;_(@S{qw}p>L9s=S5%0gBEuB zpM}@s4>dZh^8(u}jiE=xy-R*y2!2kyy85!^wXm^o#-nHus|P6 zY{RDoe-MVQQUk?O_$&d2=1>F0Zo}u~`0Q-62B|-kc?j`Wfl(P~py@u3&pXqzv-FuB z4fGk^Nqj8e*91y>CC5H~SfadX`d9I><6dt1^T4W$q^JM6P-{rvMBAcB`Zc&m>zB)a z2$;7>`ULI;BeS#1Ap;G1-ME{F`qkFAeQNQ-CbCHHgI1(jg3p`yvypyK%LXV(r{vhr zcS^PUgnp?hRw~tYOO8&--Ua||H3@qxJf?R_S?|TYI!%A=QEbqi!$iL@Mn>)9r>w@pWxR? zwf6{Nsp)RZI;rKL*e$vCTRWxTKHGX}h(A>-1v@2Ir_{1eYC=lnfb4x`>fspFCLO(L zcJ?u_BG=^CcaPRHNE|NPDcPSn^}-NshW!Ez-x?(U4a%>F-MbC;!tzaMtlG9+71rKQ z2=`d-77tpx_t}IVY&b=*rTYk1ig6h(>r*f3cw2*de0Xej_OC!K|BC#O>>gT|y9G#k z&;p6~)4J@l_DMVW*Gttpbmr9Inlg3V-iG{dLSN269?L&oiqC_EeAbs~{SQ8`euVYo zlNujm{~P;}>Z=89*!@x1M;gCFZwFPs*GtC){twWOXc}mEcNrb;$1H?=7wOfTjK$O;TQlJcAfpgSlB?3}~KLk!R}%$V2OdZENyf=)(}= zmeu)nqC8!g3$oyOC*o#u?8=p+Ta$y%wWmKoo}FM_57}GyVVpu9%U8hwb-u`0?Uzn- z!ml{#6)r5j#_a&@1hT_)^oC(72`%Fml=04f%;D%0%ct|oz!)m92Ys}R`)L^)QO2f= zmVr^BmGKjlu>}6WAJH=QUb2jSa29Ukq;rt}Jhx4ysL#dg(F?Y&yJu&wr)|}9d1b8S z!FwIb*m2P^dT2eUE_@zr%P|j4(J~tI%2;pGR@X=;IU&Xsmz(m$Vw$wTm;52+NiL7) zN6dLb(=sniB7$$AJmU&s0BVYQqKhkVq?J1-v^(Pq12Ae0Q&{72G{`tQTCQf z_AW4@=^vo!;UnsNMt~iq>4AL`@k@+_0V&1zNi*HjEl{ADPAS!GHiv-?m4p81$@>6& zG))dy8ggK2Q01t{y_N?r$U*ze1#AhhyJ+5vkI4Z{JYCZ6ZYi}z+TAJbG!=u&a2t5; z1Wz*==*x&Bhw^233fK^^-=z$8)cGKIo~8A|yqVh$OK@)|e09A)XgdUfZ3A}sGF&62 zhKjcX?c;;s`ObA%3sD)=xnvvlIGW&>ge6x*atx`LVd{*yHec$pcg_#~3HYvstjy)7 z+{x@LS1zGK^T;#Vrjahv$#3i6np;wSCmDT$o1)0FS3$PdeJ zgHEEd#W$_wFGcZRtmJ!E{ImgC+gg8$tY6^8(^m2W{!zs5v53c!mu7q0B4XkDfGD1^ zl5bhHvVyIzz=l2|h|k!`cZGAv@QjBHr#863x?DzRz%dm|g2@mlK^DKWrnb;ne22&2atJ;mtDjGkfiZAPtKtb9h7G3sZuhtVyJjx)N4(fy1bV)O{3#~D4v z=*x_rVf1ZAt=+7AMwc<_XS9dWEsTycx`)yIj2>e22&2atJ;mtDjGkfiZAPtORz9Q4 z81*yS!{`=9#~Izj=zc~IF?xj2Kz@DLQZ*%Vou*66O64t%}uGr1-yASbJh&6B&lDS;Z<|?mY1i~Q9SturyeXkwXEcKMm^A*U z4R|lZQ}!tg-@KF;s~15WJ=<-gH@-^uWP1O5cVHyH5e z8Gelcf1Tld2D}vdrs@w<1YD|NMG-yRp(ZFC?{UWG^gb7P;4Ul>QafBXt?vvKib#^*J*eq(INPZ{21SkJw~aN{~pvTAbb<5ch81{E)~ zGu^;xyXoslz1$%+eWAT7CVx^*KaV6#?4DVD|3poHp?&mn?@;kVd-bS_7uvm}z^VL) znLU>nqbGsWrzu9e{dJX3q1}6l;Vp*#{W);;nYS(=qoU0mzq$ziQQ%iV&Rce944N39 zPci&8hF3BCL=is!UIbqZg{1t+UX1~L?ny%gxS9NWi{J-Uyqq{buH{#sdm;MQMbiHU zc$1bi@0+gos60D&Y4Bt0?pENHz)$Yi0I0DV_Nq9$$RURqAHwXaUYEaR_-o9r>g!$^&mdEcSpIgDAfxd`f{-f0=Rgts3n~u%XY-wIZ~p^0E%)#rX+=E1 zN@~ODXSBXSLw(mXTwkw2z1467aI<GyWBP_mHGCNn%HKHu+OeT)CeN}Wcv!^?_W?E(N&hk6 zX8nG=Nc!_Cj`7I)SJ%TjD5_aI_cDB~!5%)wa0hEoxIJq4CUCR*{+OjFEIl(b#0p2n zEWI1JS-lcP(*GH7Du0t=mXMR0ecdB0?V z({hJcz3$i8lc#`FKa;SA)W^lkEWOeGzpJKanRAHZG%q&;+)Vy|D1yIL1g~r|=d+~< z{?Q`%bHHgk82zXpsp+xaVeO#%lh(yr`qQ@%HPGKR($E6jtX}aV_z}j(IKEy})5Cu5 z)A*{tM?`cb78hpxZ!UtLQ*o>-**vSu?`UTFwnO8O=~xYIz|HvI#L^!#jIZw(NnZ;? zVa8`~5&SXWwEd0kNg~$|Y)%XfD6VX53hynCCz3ZQzRd1uDi(=PjmOflsnM9?@6)4Q z$*F8?CL0dBBHl>P=7I3AJMvreaOd+F&Uho6VkyNH*%%p%XHv;bEWByMaI|kUOiyge zwksa9qTH^*)P`7kDwcq7nQVGmeF-}p_UYUeQwfV@^XF&VFq~DqW3ln*bRruW!zR7>~zLQNh};5O@xQgChhSIjVs*uxuR3qE_ZlFYv7=Z%6m}`7Z~JZQ!bfJxsWff;DKQrCFPqFsu$-3C1R|*gKo-p zL#{af5mih5s)CO7jrUI~;c$3hIN~1}*_(=G`bHvKM_iF`BCIHC^|bUfg=Zq0P+Xnq zbad}xU>X4(QMZ=GHRniy@PhhWaAM8%3%|0 z?#_gkQ9(1HtnSzzDE0{KGZ{^1Gui3!@%B+N1|!aTCp#G#O(dsc84`(%B_rAqZIQ8T zGM$M;r)S7$axz6N+*o_S9}1c$ij1S%BT+hzBI?0=WIP?6j77$#CnxtJOU{+5T6sw$ zk)BPR8^e+CKz9UIIWn@bOOLbWAMEKF4v$1eI@k4wk)r#$fzFM6T?DpuhALZR{lMl( zxRo2#%yQw8w{+~+`}0(B&P99zF3p7$J3|k9>I4kI@vc0Tp8UB z3zAJoVLcN!n_|8O0qLj;OYeIu(6Gqv0?l z7>Pgv=9dg--LCOu`le`lEE0$P!}0b@igsRPA~lUl#AzkeL(T{t##;5( zu1&;u(MxzZnPhvYy+BpHqseqkaczo?cO~%yhuGMheq7XRGo5m&QkzY=L>8>F7+97C|SPLSDIZ{N;=xZETuN z>8QcT7U+O4S2uP|j~7>r{Mu4mU^?pD>X7td-W*fSEYuDY1wC|BohS5u#wac5?EIcL zSeqc!1Kw*ip#(NYZ-}W4IGEC`OkW}%hC7+rv|$7q2ge5E6BYDg*2)*caQe`kzEO3< zk$Sr3A&Tq+1p`{wC+2~>UC{g3WD0gO9ZR92$#irg7B*?9Kgb)ds(JbV#v!tcp67~V zUa4UR9laqky88yqWbwoWZ7Z~3uI})bzOHa&{m^F3KIM!8ziI?Cimz{K4^(ZeGd(d4 zhd0}I4VsR*iqun|&V;*DBN25N4#%?7DLtY2XdF%^GU2$f0hFNH^+r>bGs>wlVs4?@ zKNH;*-=iq#y3s@w_0D3D&?Qz9?Fw7nxvnpww5to8;cQ4#-gpw`tI(c>z0~o7UgT;y zY@ep(N~Ex(hLa(zsp35a?H-`P%9D^Bl-E=$wBa797ro01+*@UN0!^Ww`Y<^ah zIfo{hqN}0{rEJ2i9LFKT>Xf4zx=`-zYtzvbyn?>|U3A%?4uu@uQQWy{{e#n4OmgOv zeqMu47Hd$0TXETI7q-fwMHe%1eKYMw-zq^k9aw{yp%plj%O=d~VnWxGKb9CW&d~WSsCaKs*ZHszRNTEvKzBGahZySy#odRo(Z@_GR1R$zn$yL?&K&9) zq7#+Q2OiCEq}W2+*t&C;+pRFGy%7gg!%{MpXSzEwY&xMBxhVxhrdykG@_f^|G8JEc za)OnI^+Gg(-lg7~%GP9reE_ozn^jC=)WtYLjr9n1X0vH^h0O+GBrpKy8Dmr%aHf;1 z>hGF3?FFVWN`Rm%D(!raO6yy&m|&bcJp@Lyh#AbCTd;H zOtLyPZj4RhEO65j&F#3RvLpFhWEX}PiEN&_DUKb6O|cB-Wy~R6U09{49`A5AhzT!( zsW0ZGgV@Ajzut@WyutJF<+)!5H~Y8uyk@XHROor3na%Wc^E6&7qZ z9jR_5Ed*O}2&P->l><8O$RNFc0$0Qa_#9+4yD>$6- z!)uFAOqr}@)qReF=Fjsc;Ext_CJY`ToNw$7T+&I*?diEmp~yrO=t-uxMAO>Xbi33w zHpW>Zp)Xq0QB$zO&Ml%~moi706sk=5x@Xv@y~zIQrG1?|rJge-iqe+}&!p&H_8!%) zstZhQo9Lph=^TeZXQnf=cWRUxWy8MQ#Rr37eL3fO7i>HfI^uavS}@`JyVaHDoTF@} z_-0N)fbD=_(&%|b(|>l^G$q;^LKrYE)e%xr$hYubV?HXlM-Z&Me+d|1-fxy1UV z(0b&0;kn+uQrvyNu%O7>fn)mQ@%F?M2|QQGbWV+Jp({bC2Hm#8CO7tX)cv_WZC8H} zTs|{58|y2dy3N^_*g66?RkabhkErPUp4_WHqB2$rt24U!j5JUX-8D8_&22(TkG9}r zsqqwpKExRIW4#Y2Sziom8+vr5X4rMS{LywiR~_9)e4+3Gb1`DMv+NWTVK)Yk6^(5?y6sCb_<@i z2d|B1cW;VL-2ev>d$)>ez1XAD8Tz!@SmC+46s!{2nl!%y^wqCfvzRZ2FXN#>j%pRC zt1qs4L+WDnlCyj< zRj4u-ys^Be>(npv;n89KlIik1KrEdu>JgeN2I@2SG>K~MwbAqxHT-mIqVT~|Zkrc4 z@CCylMEA#4-KWanr<URxBd+Bn_D*J_yFjyPjqcWCQ^{p`cMbxt$q}rLuT%NFP0_e6^?M8aq8sSyDm3T@^Yy z86QRdWR}VRgzhV$L~Zdh9GjreJxDvAu@L-#V^Y0hze4-%2|eF}zBzEO}RiwB!11oB^Td*WZ)Y=>c{_VEOdl`RZ(fh?L=@=hxq-o}+j_f(BYHhmW2g zKL=LBT6|O9dVc-AYn|%nYN(0vMS0|gUR=;KF7*6!?`ONTRM)T!y8L=Noo+@ldS-@x zt-r^u(~}w-Reo9rU490WF+?b?=hxr&)~S9@hhDy(U#~y?88FSi4j&ry_ri6mpA$m! z(>%ufGr(vS@mii5^!LYgil2Mu1zJ{a$lxM3zy6-NPMdVbER+5_ew}{AkY9fvU8gO% zg(0rXuT%P3S8n`{+E`o_n-bAyiR*{ zK^d1OMs@NjL~`@%=OE~GXp^2&qXu^5h#|lJex^?8nfbZFDF0K2{QCR%I(>%@&YWN@ z|L<`xSAP9HeVqn$#brEb+T3s&7kEoT9m5pY&)qrsF1=!BdumbjTQ6UyuL7keLN7*t z@BbB+Ur(pA(64oR4mYSwdVc+Vlh;`OltExUpU&?sM5v1E`So)IhTdZ-So`bkuIJbF z@Kz+tEnh!xU}#p$e~huwiJo7l?;G;dU)O6v6TC?mjG=lyo!XErw|xElf+qIjjxHz5 zJ~#gK>k31D{hR|A%dguhJ-uG8eofC2pqMVE{{B?&W#(^Y<-2r-jOzKf7;suw8gi*& zh^Y4uv+`eKFX%J^9l}SKU#}On54!AXM)q~fLal^nmgJ%r%GFW&?uS|auB)~Dt-7Er IgW;0=ALb;nc>n+a literal 0 HcmV?d00001 -- Gitee From 46aa4ed0b5096a383c7addae70b27c428cd53c28 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:33:49 +0000 Subject: [PATCH 57/69] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20sampling?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../tf_ops/sampling/.keep" | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/.keep" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/.keep" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/.keep" new file mode 100644 index 0000000..e69de29 -- Gitee From dbd8642f3032c45ddbd121fcf189a5764a586cd8 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:34:13 +0000 Subject: [PATCH 58/69] =?UTF-8?q?=E9=87=87=E6=A0=B7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../tf_ops/sampling/.gitignore" | 2 + .../tf_ops/sampling/tf_sampling.cpp" | 182 +++++++++++++++ .../tf_ops/sampling/tf_sampling.py" | 91 ++++++++ .../tf_ops/sampling/tf_sampling_compile.sh" | 17 ++ .../tf_ops/sampling/tf_sampling_g.cu" | 212 ++++++++++++++++++ .../tf_ops/sampling/tf_sampling_g.cu.o" | Bin 0 -> 30992 bytes .../tf_ops/sampling/tf_sampling_so.so" | Bin 0 -> 84480 bytes 7 files changed, 504 insertions(+) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/.gitignore" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/tf_sampling.cpp" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/tf_sampling.py" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/tf_sampling_compile.sh" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/tf_sampling_g.cu" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/tf_sampling_g.cu.o" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/tf_sampling_so.so" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/.gitignore" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/.gitignore" new file mode 100644 index 0000000..9d22eb4 --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/.gitignore" @@ -0,0 +1,2 @@ +*.o +*.so diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/tf_sampling.cpp" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/tf_sampling.cpp" new file mode 100644 index 0000000..d92d3a7 --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/tf_sampling.cpp" @@ -0,0 +1,182 @@ +/* Furthest point sampling + * Original author: Haoqiang Fan + * Modified by Charles R. Qi + * All Rights Reserved. 2017. + */ +#pragma GCC diagnostic ignored "-Wunused-result" + +#include "tensorflow/core/framework/op.h" +#include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/shape_inference.h" +#include "tensorflow/core/framework/common_shape_fns.h" +#include + +using namespace tensorflow; + +REGISTER_OP("ProbSample") + .Input("inp: float32") + .Input("inpr: float32") + .Output("out: int32") + .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { + ::tensorflow::shape_inference::ShapeHandle dims1; // batch_size * ncategory + c->WithRank(c->input(0), 2, &dims1); + ::tensorflow::shape_inference::ShapeHandle dims2; // batch_size * npoints + c->WithRank(c->input(1), 2, &dims2); + // batch_size * npoints + ::tensorflow::shape_inference::ShapeHandle output = c->MakeShape({c->Dim(dims2, 0), c->Dim(dims2, 1)}); + c->set_output(0, output); + return Status::OK(); + }); +REGISTER_OP("FarthestPointSample") + .Attr("npoint: int") + .Input("inp: float32") + .Output("out: int32") + .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { + ::tensorflow::shape_inference::ShapeHandle dims1; // batch_size * npoint * 3 + c->WithRank(c->input(0), 3, &dims1); + int npoint; + TF_RETURN_IF_ERROR(c->GetAttr("npoint", &npoint)); + ::tensorflow::shape_inference::ShapeHandle output = c->MakeShape({c->Dim(dims1, 0), npoint}); + c->set_output(0, output); + return Status::OK(); + }); +REGISTER_OP("GatherPoint") + .Input("inp: float32") + .Input("idx: int32") + .Output("out: float32") + .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { + ::tensorflow::shape_inference::ShapeHandle dims1; // batch_size * ndataset * 3 + c->WithRank(c->input(0), 3, &dims1); + ::tensorflow::shape_inference::ShapeHandle dims2; // batch_size * npoints + + c->WithRank(c->input(1), 2, &dims2); + // batch_size * npoints * 3 + ::tensorflow::shape_inference::ShapeHandle output = c->MakeShape({c->Dim(dims1, 0), c->Dim(dims2, 1), c->Dim(dims1, 2)}); + c->set_output(0, output); + return Status::OK(); + }); +REGISTER_OP("GatherPointGrad") + .Input("inp: float32") + .Input("idx: int32") + .Input("out_g: float32") + .Output("inp_g: float32") + .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { + c->set_output(0, c->input(0)); + return Status::OK(); + }); + +void probsampleLauncher(int b,int n,int m,const float * inp_p,const float * inp_r,float * temp,int * out); +class ProbSampleGpuOp: public OpKernel{ + public: + explicit ProbSampleGpuOp(OpKernelConstruction* context):OpKernel(context){} + void Compute(OpKernelContext * context)override{ + const Tensor& inp_tensor=context->input(0); + const Tensor& inpr_tensor=context->input(1); + auto inp_flat=inp_tensor.flat(); + auto inpr_flat=inpr_tensor.flat(); + const float * inp=&(inp_flat(0)); + const float * inpr=&(inpr_flat(0)); + OP_REQUIRES(context,inp_tensor.dims()==2,errors::InvalidArgument("ProbSample expects (batch_size,num_choices) inp shape")); + int b=inp_tensor.shape().dim_size(0); + int n=inp_tensor.shape().dim_size(1); + OP_REQUIRES(context,inpr_tensor.dims()==2 && inpr_tensor.shape().dim_size(0)==b,errors::InvalidArgument("ProbSample expects (batch_size,num_points) inpr shape")); + int m=inpr_tensor.shape().dim_size(1); + Tensor * out_tensor=NULL; + OP_REQUIRES_OK(context,context->allocate_output(0,TensorShape{b,m},&out_tensor)); + auto out_flat=out_tensor->flat(); + int * out=&(out_flat(0)); + Tensor temp_tensor; + OP_REQUIRES_OK(context,context->allocate_temp(DataTypeToEnum::value,TensorShape{b,n},&temp_tensor)); + auto temp_flat=temp_tensor.flat(); + float * temp=&(temp_flat(0)); + probsampleLauncher(b,n,m,inp,inpr,temp,out); + } +}; +REGISTER_KERNEL_BUILDER(Name("ProbSample").Device(DEVICE_GPU), ProbSampleGpuOp); + +void farthestpointsamplingLauncher(int b,int n,int m,const float * inp,float * temp,int * out); +class FarthestPointSampleGpuOp: public OpKernel{ + public: + explicit FarthestPointSampleGpuOp(OpKernelConstruction* context):OpKernel(context) { + OP_REQUIRES_OK(context, context->GetAttr("npoint", &npoint_)); + OP_REQUIRES(context, npoint_ > 0, errors::InvalidArgument("FarthestPointSample expects positive npoint")); + } + void Compute(OpKernelContext * context)override{ + int m = npoint_; + + const Tensor& inp_tensor=context->input(0); + OP_REQUIRES(context,inp_tensor.dims()==3 && inp_tensor.shape().dim_size(2)==3,errors::InvalidArgument("FarthestPointSample expects (batch_size,num_points,3) inp shape")); + int b=inp_tensor.shape().dim_size(0); + int n=inp_tensor.shape().dim_size(1); + auto inp_flat=inp_tensor.flat(); + const float * inp=&(inp_flat(0)); + Tensor * out_tensor; + OP_REQUIRES_OK(context,context->allocate_output(0,TensorShape{b,m},&out_tensor)); + auto out_flat=out_tensor->flat(); + int * out=&(out_flat(0)); + Tensor temp_tensor; + OP_REQUIRES_OK(context,context->allocate_temp(DataTypeToEnum::value,TensorShape{32,n},&temp_tensor)); + auto temp_flat=temp_tensor.flat(); + float * temp=&(temp_flat(0)); + farthestpointsamplingLauncher(b,n,m,inp,temp,out); + } + private: + int npoint_; +}; +REGISTER_KERNEL_BUILDER(Name("FarthestPointSample").Device(DEVICE_GPU),FarthestPointSampleGpuOp); + +void gatherpointLauncher(int b,int n,int m,const float * inp,const int * idx,float * out); +class GatherPointGpuOp: public OpKernel{ + public: + explicit GatherPointGpuOp(OpKernelConstruction * context):OpKernel(context){} + void Compute(OpKernelContext * context)override{ + const Tensor& inp_tensor=context->input(0); + OP_REQUIRES(context,inp_tensor.dims()==3 && inp_tensor.shape().dim_size(2)==3,errors::InvalidArgument("GatherPoint expects (batch_size,num_points,3) inp shape")); + int b=inp_tensor.shape().dim_size(0); + int n=inp_tensor.shape().dim_size(1); + const Tensor& idx_tensor=context->input(1); + OP_REQUIRES(context,idx_tensor.dims()==2 && idx_tensor.shape().dim_size(0)==b,errors::InvalidArgument("GatherPoint expects (batch_size,num_result) idx shape")); + int m=idx_tensor.shape().dim_size(1); + auto inp_flat=inp_tensor.flat(); + const float * inp=&(inp_flat(0)); + auto idx_flat=idx_tensor.flat(); + const int * idx=&(idx_flat(0)); + Tensor * out_tensor=NULL; + OP_REQUIRES_OK(context,context->allocate_output(0,TensorShape{b,m,3},&out_tensor)); + auto out_flat=out_tensor->flat(); + float * out=&(out_flat(0)); + gatherpointLauncher(b,n,m,inp,idx,out); + } +}; +REGISTER_KERNEL_BUILDER(Name("GatherPoint").Device(DEVICE_GPU),GatherPointGpuOp); + +void scatteraddpointLauncher(int b,int n,int m,const float * out_g,const int * idx,float * inp_g); +class GatherPointGradGpuOp: public OpKernel{ + public: + explicit GatherPointGradGpuOp(OpKernelConstruction * context):OpKernel(context){} + void Compute(OpKernelContext * context)override{ + const Tensor& inp_tensor=context->input(0); + OP_REQUIRES(context,inp_tensor.dims()==3 && inp_tensor.shape().dim_size(2)==3,errors::InvalidArgument("GatherPointGradGpuOp expects (batch_size,num_points,3) inp")); + int b=inp_tensor.shape().dim_size(0); + int n=inp_tensor.shape().dim_size(1); + const Tensor& idx_tensor=context->input(1); + OP_REQUIRES(context,idx_tensor.dims()==2 && idx_tensor.shape().dim_size(0)==b,errors::InvalidArgument("GatherPointGradGpuOp expects (batch_size,num_result) idx shape")); + int m=idx_tensor.shape().dim_size(1); + auto inp_flat=inp_tensor.flat(); + const float * inp=&(inp_flat(0)); + auto idx_flat=idx_tensor.flat(); + const int * idx=&(idx_flat(0)); + const Tensor& out_g_tensor=context->input(2); + OP_REQUIRES(context,out_g_tensor.dims()==3 && out_g_tensor.shape().dim_size(0)==b && out_g_tensor.shape().dim_size(1)==m && out_g_tensor.shape().dim_size(2)==3,errors::InvalidArgument("GatherPointGradGpuOp expects (batch_size,num_result,3) out_g shape")); + auto out_g_flat=out_g_tensor.flat(); + const float * out_g=&(out_g_flat(0)); + Tensor * inp_g_tensor=NULL; + OP_REQUIRES_OK(context,context->allocate_output(0,TensorShape{b,n,3},&inp_g_tensor)); + auto inp_g_flat=inp_g_tensor->flat(); + float * inp_g=&(inp_g_flat(0)); + cudaMemset(inp_g,0,b*n*3*4); + scatteraddpointLauncher(b,n,m,out_g,idx,inp_g); + } +}; +REGISTER_KERNEL_BUILDER(Name("GatherPointGrad").Device(DEVICE_GPU),GatherPointGradGpuOp); + diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/tf_sampling.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/tf_sampling.py" new file mode 100644 index 0000000..efa8428 --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/tf_sampling.py" @@ -0,0 +1,91 @@ +''' Furthest point sampling +Original author: Haoqiang Fan +Modified by Charles R. Qi +All Rights Reserved. 2017. +''' +import tensorflow as tf +from tensorflow.python.framework import ops +import sys +import os +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) +sampling_module=tf.load_op_library(os.path.join(BASE_DIR, 'tf_sampling_so.so')) +def prob_sample(inp,inpr): + ''' +input: + batch_size * ncategory float32 + batch_size * npoints float32 +returns: + batch_size * npoints int32 + ''' + return sampling_module.prob_sample(inp,inpr) + +ops.NoGradient('ProbSample') +# TF1.0 API requires set shape in C++ +# @tf.RegisterShape('ProbSample') +# def _prob_sample_shape(op): +# shape1=op.inputs[0].get_shape().with_rank(2) +# shape2=op.inputs[1].get_shape().with_rank(2) +# return [tf.TensorShape([shape2.dims[0],shape2.dims[1]])] +def gather_point(inp,idx): + ''' +input: + batch_size * ndataset * 3 float32 + batch_size * npoints int32 +returns: + batch_size * npoints * 3 float32 + ''' + return sampling_module.gather_point(inp,idx) +#@tf.RegisterShape('GatherPoint') +# def _gather_point_shape(op): +# shape1=op.inputs[0].get_shape().with_rank(3) +# shape2=op.inputs[1].get_shape().with_rank(2) +# return [tf.TensorShape([shape1.dims[0],shape2.dims[1],shape1.dims[2]])] +@tf.RegisterGradient('GatherPoint') + +def _gather_point_grad(op,out_g): + inp=op.inputs[0] + idx=op.inputs[1] + return [sampling_module.gather_point_grad(inp,idx,out_g),None] +def farthest_point_sample(npoint,inp): + ''' +input: + int32 + batch_size * ndataset * 3 float32 +returns: + batch_size * npoint int32 + ''' + return sampling_module.farthest_point_sample(inp, npoint) +ops.NoGradient('FarthestPointSample') + + +if __name__=='__main__': + import numpy as np + np.random.seed(100) + triangles=np.random.rand(1,5,3,3).astype('float32') + with tf.device('/gpu:1'): + inp=tf.constant(triangles) + tria=inp[:,:,0,:] + trib=inp[:,:,1,:] + tric=inp[:,:,2,:] + areas=tf.sqrt(tf.reduce_sum(tf.cross(trib-tria,tric-tria)**2,2)+1e-9) + randomnumbers=tf.random_uniform((1,8192)) + triids=prob_sample(areas,randomnumbers) + tria_sample=gather_point(tria,triids) + trib_sample=gather_point(trib,triids) + tric_sample=gather_point(tric,triids) + us=tf.random_uniform((1,8192)) + vs=tf.random_uniform((1,8192)) + uplusv=1-tf.abs(us+vs-1) + uminusv=us-vs + us=(uplusv+uminusv)*0.5 + vs=(uplusv-uminusv)*0.5 + pt_sample=tria_sample+(trib_sample-tria_sample)*tf.expand_dims(us,-1)+(tric_sample-tria_sample)*tf.expand_dims(vs,-1) + print('pt_sample: ', pt_sample) + reduced_sample=gather_point(pt_sample,farthest_point_sample(1024,pt_sample)) + print(reduced_sample) + with tf.Session('') as sess: + ret=sess.run(reduced_sample) + print(ret.shape,ret.dtype) + import cPickle as pickle + pickle.dump(ret,open('1.pkl','wb'),-1) diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/tf_sampling_compile.sh" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/tf_sampling_compile.sh" new file mode 100644 index 0000000..ad14627 --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/tf_sampling_compile.sh" @@ -0,0 +1,17 @@ +#/bin/bash +/usr/local/cuda-9.0/bin/nvcc tf_sampling_g.cu -o tf_sampling_g.cu.o -c -O2 -DGOOGLE_CUDA=1 -x cu -Xcompiler -fPIC + +# TF1.2 +#g++ -std=c++11 tf_sampling.cpp tf_sampling_g.cu.o -o tf_sampling_so.so -shared -fPIC -I /usr/local/lib/python2.7/dist-packages/tensorflow/include -I /usr/local/cuda-8.0/include -lcudart -L /usr/local/cuda-8.0/lib64/ -O2 -D_GLIBCXX_USE_CXX11_ABI=0 + +# TF1.2 +#g++ -std=c++11 tf_sampling.cpp tf_sampling_g.cu.o -o tf_sampling_so.so -shared -fPIC -I /home/chendiane/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow/include -I /usr/local/cuda-10.1/include -lcudart -L /usr/local/cuda-10.1/lib64/ -O2 -D_GLIBCXX_USE_CXX11_ABI=0 + +# TF1.4 +#g++ -std=c++11 tf_sampling.cpp tf_sampling_g.cu.o -o tf_sampling_so.so -shared -fPIC -I /home/chendiane/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow/include -I /usr/local/cuda-8.0/include -I /usr/local/lib/python2.7/dist-packages/tensorflow/include/external/nsync/public -lcudart -L /usr/local/cuda-8.0/lib64/ -L/usr/local/lib/python2.7/dist-packages/tensorflow -ltensorflow_framework -O2 -D_GLIBCXX_USE_CXX11_ABI=0 +# TF1.4 + +g++ -std=c++11 tf_sampling.cpp tf_sampling_g.cu.o -o tf_sampling_so.so -shared -fPIC -I/home/cc/anaconda3/envs/tf1.8.0/lib/python3.6/site-packages/tensorflow/include -I /usr/local/cuda-9.0/include -I/home/cc/anaconda3/envs/tf1.8.0/lib/python3.6/site-packages/tensorflow/include/external/nsync/public -lcudart -L /usr/local/cuda-9.0/lib64/ -L/home/cc/anaconda3/envs/tf1.8.0/lib/python3.6/site-packages/tensorflow -l:libtensorflow_framework.so -O2 -D_GLIBCXX_USE_CXX11_ABI=0 + +#.so结尾的要用l:lib......framework.so +#g++ -std=c++11 tf_sampling.cpp tf_sampling_g.cu.o -o tf_sampling_so.so -shared -fPIC -I /home/chendiane/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow/include -I /usr/local/cuda-10.1/include -I /home/chendiane/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow/include/external/nsync/public -lcudart -L /usr/local/cuda-10.1/lib64/ -L/home/chendiane/anaconda3/envs/tf/lib/python3.7/site-packages/tensorflow -ltensorflow_framework -O2 -D_GLIBCXX_USE_CXX11_ABI=0 diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/tf_sampling_g.cu" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/tf_sampling_g.cu" new file mode 100644 index 0000000..6e28bc7 --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/tf_sampling_g.cu" @@ -0,0 +1,212 @@ +/* Furthest point sampling GPU implementation + * Original author: Haoqiang Fan + * Modified by Charles R. Qi + * All Rights Reserved. 2017. + */ + +__global__ void cumsumKernel(int b,int n,const float * __restrict__ inp,float * __restrict__ out){ + const int BlockSize=2048; + const int paddingLevel=5; + __shared__ float buffer4[BlockSize*4]; + __shared__ float buffer[BlockSize+(BlockSize>>paddingLevel)]; + for (int i=blockIdx.x;i>2; + for (int k=threadIdx.x*4;k>2)+(k>>(2+paddingLevel))]=v4; + }else{ + float v=0; + for (int k2=k;k2>2)+(k>>(2+paddingLevel))]=v; + } + } + int u=0; + for (;(2<>(u+1));k+=blockDim.x){ + int i1=(((k<<1)+2)<>paddingLevel; + i2+=i2>>paddingLevel; + buffer[i1]+=buffer[i2]; + } + } + u--; + for (;u>=0;u--){ + __syncthreads(); + for (int k=threadIdx.x;k>(u+1));k+=blockDim.x){ + int i1=(((k<<1)+3)<>paddingLevel; + i2+=i2>>paddingLevel; + buffer[i1]+=buffer[i2]; + } + } + __syncthreads(); + for (int k=threadIdx.x*4;k>2)-1)+(((k>>2)-1)>>paddingLevel); + buffer4[k]+=buffer[k2]; + buffer4[k+1]+=buffer[k2]; + buffer4[k+2]+=buffer[k2]; + buffer4[k+3]+=buffer[k2]; + } + } + __syncthreads(); + for (int k=threadIdx.x;k>paddingLevel)]+runningsum2; + float r2=runningsum+t; + runningsum2=t-(r2-runningsum); + runningsum=r2; + __syncthreads(); + } + } +} + +__global__ void binarysearchKernel(int b,int n,int m,const float * __restrict__ dataset,const float * __restrict__ query, int * __restrict__ result){ + int base=1; + while (base=1;k>>=1) + if (r>=k && dataset[i*n+r-k]>=q) + r-=k; + result[i*m+j]=r; + } + } +} +__global__ void farthestpointsamplingKernel(int b,int n,int m,const float * __restrict__ dataset,float * __restrict__ temp,int * __restrict__ idxs){ + if (m<=0) + return; + const int BlockSize=512; + __shared__ float dists[BlockSize]; + __shared__ int dists_i[BlockSize]; + const int BufferSize=3072; + __shared__ float buf[BufferSize*3]; + for (int i=blockIdx.x;ibest){ + best=d2; + besti=k; + } + } + dists[threadIdx.x]=best; + dists_i[threadIdx.x]=besti; + for (int u=0;(1<>(u+1))){ + int i1=(threadIdx.x*2)<>>(b,n,inp,out); +} +//require b*n working space +void probsampleLauncher(int b,int n,int m,const float * inp_p,const float * inp_r,float * temp,int * out){ + cumsumKernel<<<32,512>>>(b,n,inp_p,temp); + binarysearchKernel<<>>(b,n,m,temp,inp_r,out); +} +//require 32*n working space +void farthestpointsamplingLauncher(int b,int n,int m,const float * inp,float * temp,int * out){ + farthestpointsamplingKernel<<<32,512>>>(b,n,m,inp,temp,out); +} +void gatherpointLauncher(int b,int n,int m,const float * inp,const int * idx,float * out){ + gatherpointKernel<<>>(b,n,m,inp,idx,out); +} +void scatteraddpointLauncher(int b,int n,int m,const float * out_g,const int * idx,float * inp_g){ + scatteraddpointKernel<<>>(b,n,m,out_g,idx,inp_g); +} + diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/tf_sampling_g.cu.o" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/tf_sampling_g.cu.o" new file mode 100644 index 0000000000000000000000000000000000000000..e98acb0e5c087e1ab73f116e64c9998e24844f43 GIT binary patch literal 30992 zcmeHw3wT^rweULooOvdh$@@9UoIIPf)8w2p@3gd&q-_E%l0JBpCM0Q_lt`PVlcYR+ z5Yh$=2)8IG>VT!aYA>KXRJfvID0&gpD{|G}$DlE-BKRr=`Mt0I z|JeOz_StK%wf5R;t-bczXHHJ9U%O!)69h1d0IOiOWk3NA4b5K1_+51P@{G5Yu zvbEbfn>w00pK^QG0L;qMwB_OpC%(OT!^Ah*CccFuD|1vjv877ldQqQWOtEEi73&_q zk0d(qb%n-z>d+$keU{)-O24FhL-0oko@rKXM^&cLc_(!iK3<6Ux>0t|7{xt4eCEs< zLXaPq4TKF7J7)~z9^Xs|DR<%=Uus4GzcSf_JDRE8bZOHi=TCf>%+xgT12WUZ>laS^ z3+jx_lrq^l8Ht;7K}-E&!^EFvEH$4Oj)0&UbK7xh=p?gzxI=K>hL#F8+_Isn`@)G+ z8zz3SX5!@;&a*zybVXAa`oSpjf_LnXN-)`bA^Trw*xy*D6R+d?B>ST$7%OtlGDQ=a ztwxJ<5&P#|IPt%^tzKHQ;Ckh@GRgOjz5WyJg@KkLZaS^9ckG3m-hWMe{l8vc|55fr zbGQHQ;>QKZgb!@O1{MZ_5X1V)2(@O54YQZB*(PQv3zzFtd5_>{85Q7Ma_+3C5QvJ}CL&Ndmp6z&L)YtAwwRzn&bmhR#YoLB$e|`Tz-(IL6?!9KX zzUy*L9oo?|Jlq@a*|~FYZ~wsX`ri0J@9zHo{`U2K?d$v7`(|j)9ee1?o_O!hbI0GY zcVK9^XJA-4M-p9jHN2~5_{!e+Tg!b8m@_h;1Fl)#w?a4c9PaChqp(B6T-u?YJ%hXZ z2X@WTc3*q{9A&=^k{Ri>w?Sl{#yQg&x{qw{ALxl+JJj0~-*KgR8k?1_x${uZ0d+?H zb3r!C{g(KKyxNYDJwqdV3`xnm=eV}ID8j64i=r43Y3RUE9Z6j&=VwIvVX9} zBs23oSE8z}u5+FQ;_evP+0(Uq?~b0`UHg0DU20g-lrF!&Yhdp{7uQ?Y&ip2fSLxJUm3xclcyzI~(*Ifqpqy3I~-uDV1|e5(j)`1V`j&07C0S(>eW zF39GYy%qjq;mt@o77l(!Zn9`}B!n4HU7Uv@>EgkdW28J`K{g9`Y$&RsSD8H)?cUcL z4|H|;wYSYmTZk2M3Y_Ic3(9B84Ch{6icJyo()neI!=3ZNumV#@5wgyL|1*cDodwUr z99o3zv*5!Ves*^IRgv)1&%!UjyEr1`oCW_Vhi9Ax|2c=}o&_(n5TE?B;7uIvIt$LM zgzr8JPHA;^=B(aE=?spg80+lJS^XIB7dd=Z4>9CR<#n8IcINE7VyG7?Pow2xsORab~U&$B>?IX08!u<{EK~nG?>;HR8-%BaWdp!kM{7 zoSAEuYJUE}qn!y(Um1Js@-l#2> ztOltxHIG^ZNgg|m1XrRBq%sLIQ5XqjLhAo^nFi;{MCZBxYCVyTevKc=lg{--N7796 zPtujF>&o%pAXgiX*^M8v-(^@UB#X&@wCZC*A^KF|j881=GqXPSw|xeELmyAZdJ3JK z#%m|%eW$#mfp;|Ujt1V*!2eVY#85E%|5V}rf0N60;P|vQ#wt&CLMGnGu9ZA+2hPz~ znH0bhbCpoOi)B>uu)7E@CE*~|M?GVQA!e0ZCr`7}&7&2S$s1(Cw|QU+Q{nE{QgpGC zFYryb_GBlMk*@ZrM?k!6dAd`eExPdOvF6d^W2Y-y60vSeOo*LG#s)2;H_PJ*LWhG# zJWZ5jX0{}nVyXV7_Ei6N-hb2Ban?LKU2)X&aHb_D$usz#mQjx1(R7ILPnHS_64ah7 z6{i5_X}PDF=zXoDr%#-2nW`91bdx*>k9Hi#d1cF$$=gxR+X^lB#0c&iPj<`HZ~0k_ zY9H!l^35}1?2WPG)2A88uazPc(-_eM&msS|l>TljM7-`lBi@Ut{!y%_&S3kbj{l#a zf69j!*MGv$zi8-pe`WDnW_`|qA>ht{cP<@5=L`rl6ayTj1`c!KNwpkascB-i^v z-A;Gt_PFE4)7$_u>Uosx@i*)cTgVQmf8tPK_&?eO?GCkQKM5we{Yb7+DIw_Vq6f_D zqII-+G~PT}aqJ6c+{g7;nJj-3;I?8~C&;(^4etJ| z!dNdylif6ayRkl6JSx}Yi4K+f@tw(wNS-^85At=Dv_5p2^j~qZlu^vAMZUny_AeD= zf=da;<)40mal21ObA3lP1 z13JW_UYZc^w!-m_3Ty)EA182{RnU6)t4aTc9#Ebb$%FEuWUdlpo+n=bnfTdSMvpg- zG8}(9@`d)Jo_o__(mE3#T0G4$eI28J@pa7U#yw3)4hM1lwoIKKs}Mb!9VVe$Oe$RN z%A|_pp`4x@a2oIo`G;9P8lUX7YZFd@$1fG_?JAa%cvzTpwZB*nF}#W$?MTTrUCMA- zLE({(4{T48_Y~5&YK3_v$=G(IWHyQmG2- z+UXbQ7eXjJc{hoTaW`41z+@T9l_eWj|=gy1jL<8RBDO74WmKZ=klAsJa&gZv)TTjo7|S^kk*lSNw7d^+Pgn zAx=kuK_M0OqWmr=hYXRVang!q8^YT^F=BHmY`_+i8s!@qCB&8PVk!}Sk* zT+vgm`=N?8`d_jc*}yXt2j!F>PT~HD`(ny}T2D2Pj-mV~?-n`zbg4)ND`Xw$kC+Fc zJti62L-hQD*573!`W3AsjI-odwU0HQ8e?&-!th(PFx@8@2;X&pa6pYlRPC7Kb$WJNN4!ZQBM!9 z)6Cg;kZ2y&;{lI{H&OJ){R{O`i{sVanT%ltcQDTwpKmHJ;p30Kg5!&c9({kNIF4}K zys_~8<0b50Y@asv>-n|qRb2kc?EW!*H*LRAm|Fk06^coj>mTX(y2Uuk?M8V4o||%QDd!c1J&;c#3f#m1iAI z%9)l?tIU$hoA@|eT1!HWksfb~-To@vtqH6#)CGLgVUpYQ-4AIx5Ai)95Da6ca-L1%op*d0QGm& za|7zxF5|oo(Y#W6ru~nPZ_Yntb9@jJvhHVOPwsz{|BmC$_b(AqyyfYX8Fgn&yl7tzPT-2WsTCmZtw ziOazfK)3Z~;xWpN{AfJ{92AeppKEaZH;@=-K>34=hKZq{sWk4%P7CLQc(Ee9<%|7P zjvY-rjPev>oEBo(uBH5o@{gnUlwG|7Lm z6up=9_kv)IJBe|6CXYsZoWHARy3>-0c!h|^@mrPVSo0+6?{H=l!rR!CfI9`)r+<;! zU&X@~KHrw^a+&-K_cO};+fGc$3hkdaA@LZ+|9ew$yT!AX<_+Di(~mdpC(A9gU)GLy_X=1)1Ie}YBTf5wa-6rB zXGFrb9rYi_1z7Rg3ql9si-}AM{B5j*eo4W3U>w1^u*H*-|3oq+H~Aysx1KuCdLpRkUNF~qaC zf<~oc+_Qw{a}eVa($l_OEmM2kQ={-YQ4`~JvZkHa9W~ulhfg2C1u%;A)q|=2@=>ax z_a|$nus%56(b`V^(DBg`4)3TrMR3fk zcpLZgQi8WmHa|4Y$8U{c`x%yyY5z?h_q1kGyCr!F(_m;DPqZr3M!wy=_N*PG`i%o) zW81~&E78uQ)P{}`^1F6O^j^&ChoKB znBe2dwGUAZ9UtLzsE0{vsSa*tQ=4qmN*eR}1 z+|9W=~F*cv?G$kLG%w9YdiW&CbiGf_N)GSXMeC{k#pWqAANo(nT5R(9lf0y-VXSLWk1~gQ)+xMBu8Z+Hv5v+; z{hdC3`ZW4~vZ+&h~J7;y$nu>2SX(M<}jWBflF@Z~&gangKOH{-25cq&KQb zZ!)zFdek5CL471QVmL>AB+6r0r{nxF0hvPk?{6n~OX82*-=L$fhuY{*hF8K!7pKVqj^p_2Xk4lZ?o|=)Dx!S|Ga1~c_<6Ai_dhg0=Kne#`5U58Ki@A5I6FKkksRXG zB#+z1IiVH3cNEXBlgK)Zr;S@n@4O5x?!})0n-SK3yRuRUTJo z=JCQQiob0rM=DLsS!e^Ayg%BRHwUpVR9py*@=X=B+d9nd~T* zzQX5+`7bR1%71D8WS?%*Kk`E|u1^_jnpY;Zv8FcF3e_||t^gW8ro-Fyx?8UYsmAd~ zvBvdKj(WeX&^Vu@^v5btf5hLcKaRK9xX3R?_;mk-@&}ZA&ixbI51X*RP3*Ap{nXeG z=kLQeVemTbA0z*x@i9LjyJ5Ualq*jy5j4!9oH`%AS)oTF$GU< zY^VNjcy32E!py#1nS@fn{I4a6W@xj@qDR7f3)+x=5*3L4r|5oKh3-XqqCAfK4=otF z|8n?$q1}x5QJMS&;$!|#`D1GR;QGS#jCnaadW_Mz75U&gCp*_xCQjyJeHqS^YFv*G zw^5#Gqx6l5Gv{B7>oIfQCVC#Gb3r%ei+C=e{7yOyF{ow9CeFVVvyT|YZV7())>|fO>VwCpvmiuTEK{{Bn}c zFYyDY!its-+-6C?$$h7|eiIwHej(9=Go`$VxA}cQvtFN`<_f_0!S6>Dqu%*BPE0(+ z`J z_dTKg2Pyhzj&_Ks&te(nSV?+3N^xaU;^%YHgK_>c{0!HX!q=64-)a7QC690r`TLCd z-PGKT$EPEB|CY|Dc3JGeb%c5l6StGS+s2daSizI{`$8;r{z=*WL~=Wx=MUrkH;R9l z|0QoAKf(KdJ=`yF-E)6B+99C)PL#h1<)XZt>*Wr#6ThE}_h0$?GvsH9DXvG>@gW#R zzdJ#3xy>Wvd7R6I_XAK);v;i?-o)Z40O~J!n2!&hlpMyq;hr(FV>_O!sjh{)wOVS^ z_{ULyc>jj(AmKU({k$)Le%_a##Qyo`{gae+1K$7BYu@@hzR;%t{qw96(;?rS*Gc&1 zLWjk9D7@P)rLbzPW^8_MZVrH+c*FUhRTTY^!*A*X5>5tw$SJ6hvr@r(5L5yIFQK?fN!>SE0xQj(12Ecyq?npo~gReX+Q!1?rfDf!N#kCCug%=B5F?rPQaa9Ffc`@%<()PDZvCM<^Z2Ce`Qt}YiaCqkc(BjtAhg^C=)a6Gn!+AO9-sUx@xkgp9>ZzmWbk9C#u67%6KW{X+VCc@g?!q^x=L3+eB@sF;Pu z=XWEJ-tbrAwUGXfEJ9zHM-;e`5_7#QM6Y2Vq0C!mFseV=I;zu?{m>;cVpyBY@3FBE zy|KW|^k`C7%uI~eA4lR8y>2kQf0RyXSYo51pAL`#bAcyJ|XZ>(bd{^(VJhZ3F zk1hOj!g%k{P}flZb-i*Z;0(Z-%KoAL?Yn#B`rd)z__gvPemm_c-h0y}RW4LHvtDxq5J=-@j@_4&2%2 z*W~KHm7#DpWNy!fOX4dvP04{%VkjV2$9JyO0;^UyoeuhYf(zig^*y`$cMZq^xqf@( zMzEZzdOLpsu+6W9Ly^uM;P+}fKxo<)2?TneV)xEwLHLAleRVt>^2yu2E2J;)ToD3q zIE0Cy8UofTc!PND)k65PiX-42@^`Lq?%BKlZg@RYjRt*k^^W15{+;#LtZ)wX4iDDv z>a8F0YmVwc)mH&;*pTfCXAE2N_IQun+^lp35O6;1(2#OqcpX$;xm#uxeqRZ^#?%$g z9s7rS>WBB%@7le0d(ZBNpgiDv6ckj#7O2n~$DnfeFn(#iCLv3GKjdrghoa#7VY?bd z>6IS?dw>lEqj=e;2w*P;HKh1t5Qkwb(C8c**v!+n4~5Ye1otbzyulkls8FL1LP_vJP}OL7 zOc1sROG1qrR7S!UwiCq}=-u@?>K>MbS9}C=l`EifdElGikRho0edmEN1mFkzbwVJj z`aTWI{43tYssl8zEaV4d;ok5HC+fR?WS~D2a>E~D4dKSenUtj-gxa0K_rW?<3;MPR zpO4_gM|Rg=-M_O}hBZ6YsBa161fsy+1z}C`T@2-F^e3XY57=XDqaRJSZ7Hl?7U;x) znd|pLcIQ?Q3!pN9bBAVdD@z`>{R zr73ufUIpHW7o>7klVNEnxSv&Ok*is4xbb1BIQT8e`A4xP==+Gse#r8IKV+He)66<5 z`PG20MU+a!%HDnJfL$+?1lC&WL&3Ec@eh`2Mg1jPht3)rf)#-k&JI*v2NbCt;MF?7 zR;YA9J_1Vp*b%`F1bVU4f*usbE(EPc)4eQ8W#N~^hN%D7Vp?UyR}Iczi8WDQJCsHC zh&kE`u_*d(5xxV=0}czbtVGnn3K749jX0@38`}zSHMo-uDx#quu_AR)vcRI0vp;2adfs^*KL`qd!;#IR+lstpS@n(sQH$Uh*6 z@3DHK14wvGTFvYtTZNbFC03y*gF>Me!IOYk5FHfi{Ee4Gp1Mz1ttrYr!RqznS5?5* z3Tbg+Nzm7b`~z_T-6Ss52Rl7hc8yS{1w7V(qWOLZPLH)g@ed0zB;j^+?-y8J;0cR; zzmTVX1}yv0LBHqB8hJ>{jgDEe0;5uK^nQ!|b*KuWzooqn-q7o?DqGu*YecbHiH|UC6ivtKnSE-EWrymh@vbT$=%ed^o*W%a+(S!4t|v$yAGFwWtd04q>&bDi(|2 zZf6qvHdsYVRMQ&20abon-f2&RH}EvXP~9o}JWU}9zw&};ooArf{|r=xe07la47eXo zkFf$*j(ULwXZs8yM)1qZ@f@c!>s0Mp(fK*6*d$bG>OC!TE9}9Zfj@4GOI;~IxV`_0sWa`6*#Z6!@5j&XI_JI z0?-RD2jMz!|CkA0|A0e~ZpzLJAIM0%Kqw9uxb2OQ7d|hWU6PxxZg*L>3!doBx$J=G z`icd1<+62P-QcS9`&%LGR~nb2&wFe?iDGE&iPsNZJFr7mJghS6tA#?fJU1)wppX|? z1$K9CuIkP$RiDVqjh0YcbUX@dy|p5ueF?1N&MH6FMQTI3_&8*%In3r`?BgKHP!#wM ztWg7+uM&!vXS%8b{)^E17eP$`x3Kh!Xn|Bh(eg3@kwU&fC=M0@s;v>6=dnm2;@b<( zTe7nQ7m1~TTe2JQma(`h98`TnhQC<=WV0ztnIwUiViL1LVx{K#qBENaz# z{vSEtuLc9lmIpdzjOmJu;>lngOcQPaK?Ya{?$3$>+X6xjI9?OPe`2By-K7MUC}_L% zXT?IT3{_+=v~I>ps-I_dTH|L0Pw)X22!?z`Ahn5(F(-4_#3krS+41YA z@lV#g=w}M8>k)(tr6`bw>n%;n&5r(_*>7Z?a7YXUBgEv#VEwtRIv8CkMbu!#D~j)B zIl+E#UMhJ5mrB+e&3~y>61-F@z&c;cNXzwKYN_)hK)AVx?E%NmtSt4~EOZ%AEq2Ur z3dFL|Wm0v>-ww_{3*it~b~~`GQdwxLlpWGoDX+ImR7XM))Vz~AekBsX8{R6d4u`p# z@iG$De0>7-hoe#{2M|N|RtYKsT9l((a{My5wo15drnI_M67mt%ndW5olTOpNN@6DS z`mrhqph|;}i`MHis{Fw(vdU1SC|Vj}Y0&qkEnh20tMUiJP#12zRLBj6#p1x5g8RE! zG4U~6CSPMq!@*w3ekT-$FZ5Uxu`(Eaq{Q)o9Pu{MeXR$?uNKvW!;d-hwa1)!fg_e> zk;WW%wHE%oM?7D!-r|5WHv6zx9gg@Q?Jg#sWTlZw5k6lkmV@&%BKu^ixEYqJkvxI@ zR9cRAcyTd9+UiIofG+3t2Si|h%3iIh!HwuN_mrHcMgki}Oa^eyE#Pj%3?gzVc$eKL zG78ER2%tKsc)?wqBZ-%~YXjjsVM$QE6T2`ni??Oe1=T1NXi?yim>oj^HuC+r;{`Ej zOXBg2+~A{Ws{%^nyF^su(je}$g`r_@sj@>VK>lKpXj_sl{l26zpf(;6omYsqZ>F(o z^u$NfmIeYhmN%&3FM65qZ7(YW=d!|DHL^_dMS^?1%Ol|>a-*h(Tc9@TTME)|ZL0$Q z#t6#^M3{3!nebtU@JKmSf~z^7ecU1RLt1k_?!0R&$~ARLusxM0d^Jye0aNKO;B5&T zxU+GC#>Er5)4?urAs~IDb%|do1M!A(>p!72x>!-b;t(C>uCJmp1W)9vQcYBO-ccO+ zvRnF#oUyd1A*y{@TE)at3u;@dkwCmh@O{>I$cavJ>6{Z$tCNR=nwKU`J4XXMXi*}h_^ ztM1#LRTWS*p>+B07`wuj<}GEf<`=0|H7z(z1ksUX`^Vu(C=z!t(p#Wm$a>*mJbs zg0QJj-aQzUwm7Qe{@}|6<%;iTV1F3*E%jL_@?V!-8&Jv%9d~A0RumQ~msbSTuJAXd z2Bq=7N?T!MWg2`%5RX<$BYD346?#gM#p0K-KX4TqewoAh@AgIBWl(LcZ}0Ov(A{!fD;)0CQ0`S%!?KL^bY0^0 z0>L_Vr{!ynbG?z^DCU7rCL&>JD@GsgI9Ahv1BTFo7Y z<_+{Bhd4BJ^$)}PQh%;%?>2uZ5(+H!gRrC6T`QKk4mcYEN*l=TZ#khMpyHKR+w2tF z1p28{@CFDHs0naLL$LFooD8ajBK0%QZ0(ojwqGvwhL1VZtD@mTtL>OGqZNeWvW(9- z;jaZzzY^~8vm>QNfiK%EUvc8m7@tq@uW?#$0a3NFaa)zv^_7yMf%Q%}1kN_+O06r{ z>Rb{CT>|a`r`2t(v$65nl)oWiEkT;Km zY+?jcp05E6=Rc^anm-(gM#JF;?JP?z5sHIhC{S}VtQ~fiAz(yvt*WRA`xK!_8CP<& zZ`gCSqpH{k_Rj_#1tOjgy-*j|zJ{u?r8#!8B|-lsLP_`!49{9L^=s-S%=bQ-AxZaS z*w^}Od4lk-c=)~}whD>Mzql+_)^N`aAa)vvwtYQv?h;;qN0`9_OmIM5NWD!ciJrGi zxJM~gvmsZlEMhO=orJsaDBS~&tsb_jDE)%E27KiR+aUh7fqlp83B4Ol6JQ`syl2~3$nELtytZ>F7il-v6Z4Xc+?8wZ`9?g|3!6Kbvzfu^(>=A zWuwe~jVq{isUN~j(`UrA&~%pm;mK>=K2q1AE18|B7op z5(HnuHU2dy3vMofkz(-<&HZvU&bt)czba+be$5NZBAvq^y=aA}w2VU~Vta#EdtSAO zx0kpF8-)Vp88zb~=2ah5oppZgL6vf6&(A0ODSNf+ZCqmKvbMz&wCW&1}UeI*{ttXgj9Uae|RzYM}(MX?6*gAXhA)!7Ww z@Zdd)tabe^)BTc1U=LVxgGEAqq|ch)_mm=CgQ>uZ+p?{H^5^;5ME{`buL5B;2;Ud2 z_$q*Muzp7LhWi~NKHJE?U+htbt?Zx$&I9SwYTBV-f&WktqYvb2tHJhg(76Xwy-`>i zQ2JEJkt9_pivDvx9?%O|gJ0?L3M{AEF=EOtoc7QB76TztN? zK?&!SS=mOsqw@iu6m4ME=4vg9Q<$_CjC{I*{ocDY9Q|}dj`kKy7Thzo{m!m+tqbNcoK$>Rv{ff801!9QHc4Q3WNRLnp^D!5Tc*{G<7 zNazA|s|&)2<}3(})c4)oP|>G7;Vq8rYmn-N^hcTaY6CQY?J(p=H!|m8$QxPFj5o-O zqWO*`Q9QQ0Z(?}&OY2a9%lVA!!52FmTJ)=1r9T*7fJ(Z zOQa>NSos`F{}l+mY2Iy5h|SrZ@$`J{LCnPR>{VLuYv2ifvbZoVlKOLOwI2b|JBAlt~v^0a;W`caWMa-7_6FG$bPmNq(mb3vB>)f{(6j>8*z zS+YtOyYL>HXzgmUp0vA_a=g0`JmkbR*II7Dn;$`t{q>u@_Wt~8&G$as_&>jXwZAL)F_dpeDDm&XrRD)=Je(8k$Pq>%ZGBxHrdr<6SOId6 zj1`FN7Qt~Q)3&x+xVpgkNxyYPMf$Nkys_}lI8g)F})vVj;{yUQN~_;mp3rDfjPEF zd4YF`uui6MTt`Ew1o#*xAwKvcR4!3A;aAj{U z3yXFyeypyC1-H#EyB-(ZY4$SLQ-a%#)wczA4pu)9+yz+uOmLUjbKJJg87N(e9dF;1 z*w5fRO6-{Hme?&`1ls8f^Ue}Gf$$beiTyKTk=K337AezyRGT(n7jiEQ4Da1j2DWR@ zw;Zy&YwV3>YK=V&-%0X{&3z|W@&XswU9a1619;-nCQ2;m9r*43K#3Laqh4v}D4_>R z#P1Z?ZVa&d%N-|uX*b)wp_}cPN#1NP!B2h=0r)bIIUYEj#+!Z4>1}WT zZ~Znk#G!J|@Ayig6W=-K6yt4s^fJlRdjS{u)kmk_+;;1Ebop!gYP&UqSGVwI_}&fl zuWjOK`g#^$blmjM_ypFOwuz_OCg>+Lo#w#T<+JJ9CVrkGo~28T@ZW`P68~9~Bz*4M zUZUB|Ef2SBUq`f{~S{miri?@#q43Egc`e41XT-8Q+Q>{w@WKB`Sn9e9jOut=H z5w6<=!Zn#fNxIE`2pMN0j2;-GqCYZ$54qqwS~vvYi&QtcGH+x}uEGQ223Py_PFEoU znq2lKd`5%lJ9J!b47?8DXHbXjqq^CZc`Ivn6@E}$-r!kM|bQYR4f3rUfem-c>nB+P9A0SUR^82;U2Y;sK zetUp*kU7z)wQF5Umn*ZKH`kJ-m5bkIv(07KMLw&d;UswF%vl`>5c#kDXZMKMvrrMw zdden+CGsZ*=2GA<$XJY(H@8oO`A=n_N)Uyao8hmIe z(zvp|WDI%Ab-1F#4L)mh_@sfa&+kSZ{+#Yw2A}(N_>>Me_&ksBh3xh6B5(ocvk*QE z?-{(NZ@5dr5)9z+mM*9)4|Warbs2A+?%GvPZ)EN3RqJ;Q4nkkgFuu%rP0w%-CU6_H zQ0jfH7Z2cbHTYK6-gq;A)APl{{k!*b1SN&Au4lMq1PR;l8IIk(ya(TgIW&wfT1_Er z(&?LarwG+xR~L@8rgwk;j@~W!{&A?^(1M3C`VRB&2>-@g92;6R1-e={T-4mOq3fb` z>o%|5(zT_jdBfT+m@gfQkrEIk@Q1s)c5?P8$H;bkN9}^|&rH4CybnkTn|p^x2Akr$ zM)u%iNyuVB0*<+%XJlZ5mrJKGdgEsPEqe$0`ge`Q zds}*T6K>#a5&w3|oO%`eO!oY@kx7`yV0`a(ZtC7yfs72C-Z9>Fc@QlzS5J%F;58@k z?MSP?7e2-19BFVoExO>_&*zFq{7FIRe{*H~+b?R?MdGij$JxKto0rw7>80`W1kiZ| z(ZT0RU_Oj4{I>bIQoJ=jDaAkU)%y!d!{423tR$1K&xNCB4&#$}(1ULZ#+&6BHh94> z$Fm4}7@kU#4ySPF!H)qSTm*jWBJlebfgAhJQtZbEmuC1sZo)4!!Jjn2@rL3IKJ8yr zNX~Nn7;-k7;AZ~x{3@0C^LlO(_^%g%TS#FG@Tph?-m(aMlMW|6RN}{w|56jaSr0uX zxLFTBHo?nHe122Q+MtcNTU+^mNQ6WpwaBPO_+|DQ~7vmO*ZUzx9;wngB*i@?VgfgfE2 z{)i4IJ@`y|c+7-v*29w~xLFTTJq0KE&3f2mf}8mp89U+Qt@j!Ie-E#z5F8&snt|^# zt;Y&8oDi{4RgE9Rj^!pj*P7tpGs(Zq1lQT%*(sIx8GMWr7WLPg@QvJ%@N-P?0=%X& zU!Rc_nDZxI27i7-3fly0Fu~jKnhNo!drAiWMFhk$-*2xeVKIXXhv<{!o=Xo7Yd`MS@JjVHh_?YAE>pK2?K6r%S zh4|3N;A7R{gs+(3c*zTO8)tC>7=B1_JyZ}FWd5DvP5g~Q_uQx$;~IX@&7rgK=k)gQ zJouE}eqbKFTbJADH~1Upw%6w2kLp6L)aeX-kl4{n^>4$gy*lA^GDdmh|4xABQ$ zzro))x8>^N8*t;?)-exmoZAk~gB$0zf1C$5&TYp2OWkIB;s4)a{I5&K|H3r$|1!mU zcjNQ-{M56Pj#~BGhlc2N|8yk6QY)CC{^72kc)W)=(1-*3Q%U3I-d+5JQ@>;Hp47#_-r?T*)^}aJoS$cO z^xiAG`tbi-=_RfI{Y6F=WI8?E%`lAVSG|UV>utkd4Md8u=z1H1q21PK4{6c+v0}oMnG3bppoxc_uKU2TqX3*34^kbPp zZ@`ZtjK){+V=jt5KIIJNqN|hB5S;b_17^m%LZ>&w=z2cxlR9j^|99*3ZSxdk$Zzod z3SKNU|5|Qoo?)hUQ2#$5rH~t+QF~>qVOc8$O)00ol6A>X8kM#Ix c{CzYB;@AB7lS~WIPsURe-!cgGdOrRC0iZ7eng9R* literal 0 HcmV?d00001 diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/tf_sampling_so.so" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/tf_ops/sampling/tf_sampling_so.so" new file mode 100644 index 0000000000000000000000000000000000000000..ccfa2d53220cb160338d1f327ff49d3978624eca GIT binary patch literal 84480 zcmeEv3w%`7wfCNxB!q}0Ak=_^9Fl-RW0;w|L3EN3GSL8m0HOpZLo$$PNaADy_^Khv zsMBc_6|LH6QTnj6msI*yW5M>y;|Ts&$lvX{@Y}jbT=%u<4vO zUZaW99yp~UwU%KNUBoQ`Kf69Bh*~A0eR2#3S)XXNaeAO>ob5}s-}a@#KWHr@0=h(- zs6s3bE&Uu6%NX8f2s*7LTDq#a9;MW6O#X z7AReb`ML?j>pJ|{@#DaceANM{$Im+atd}|(@VN;;&G>1>j|)F-_@T?KHf)nsAbuXk&m;Kx zE`I3h#n1Qf^Ed&xzOOdy2WWpJK7aGJ->g~j>-@J33+@>D&F3Za9@nk7?Wai-8oXIK z-*4aCJA0t(kvrRdbJxM=@84U}X!mV0O*%h&)gK4aep$Kd;V}iPGgrOres^#|_tjNv z=C;k>`Td`-nsL>2Tb^%fe$#QqiMJOFO#NW|gogLu_-OpJq{C~zx9P@9KOcWt%L_dp zkMIBH(x&eo9!Q&|YguypFTeluH=fOZyJOxRLpM)rZ;ju5^@`mGi{k6%-CVZhx9)8# z|Dcaww(wfZ?=Bnv-33R6K3wk1dG`-LxpB|*oz_W5-qh`QDSyI@xBLTXpVpjwF8O}n zR}-Jlf4j8oPapj7mH55;uDD}paHZwd>k}q4WZk^$zKYD8?_cxXBQM?d!s?7)AHDBZ z+n1BKn0DQDaLVDtXFhl%<1eeP{&?AgjXCdD8tk|I<-|a5`L{eBi(l&6^N$CfI{w8^ zzW?*XH?A_iuzbE|HlfAHCSx*V6C+U^GQoMH;V+&L9se%~#%T1XfQ*K38Xq0+jKODD z40()@R*542@~P4JKaG46O`bI|=FLIkh(^B*bEZX^_xc$6S%idut`S!X5=u1uMVK_& zycW<$!z<@U=W}rkJv7Hf=W}mrbUqJYpNKZ^udz8r!@oKux?L!rDkHSO*p?XUI`xg{ z{BMYXevDqtV}cPIUZG3_t%ohTr}keie=X z^)ciuI4?T=Tm(`yK8LXBMZ=GSeWUs92QlQ_9K$Z-z&slNeKG9&(-?7@7DN8O$I#DB zG2-H~81z4kVV6f^tm|luINBV;PqxP3a|C*hrk}TC=q))$o|zxRAE-!)#(ys2J(_*D z#<1`0G4ys(jJ)w|3_bW`@TrZVpTZdVWEX^wwyx)5$p2Q1^%lmEvo;3(Q|CsXcW;b% z{|n@gCg)F*qT_Wj)^&XhJI+sz&Zi~@|F2@m-y9=vTo8j#QjGYW6GIO^9*^VLc2QWy z(?)^oS7_7NIHph$j0!*e?s75^=@VZTQ_&R~;S;B$pz0P<@& z{DJ6ehw<0L-w1ErqS8_RrE44FaRwXD`tDa@gcZLs1-?VzFA(^-<5WHh738=w)P{Wo zy%B%qdKIxv;JeWheydm)!hv5OX!^NRg*E+b6Z+wY1av8CH@lwa?QyJCC_t!{-GsV} z_;+kj3G*?YE(-*n2>i~wR2bVOzn+-G_D2v9pPjWF&9VibA=sDjePUg| zRJ)m7$gc=NZWQ=5!KalDIJh)DRB(HZXI7z4h4e&Xp69M7Ng;LeYFF^qC^&)#zQ| zPxJPRc{TaRA?^rYE9`=0^2-)OpE|K#tMFSz&`%Kj`viXt{{r7HU>70(n}U891fY4h zx2uRMli(za7_h)7Pr< zc}Ljm@)-HBT-cEfv*QN%Ey-yUctvH$p6BbjfNdVOt^n#fns@73m9QK8D_w43-{JYQ zPRMBzate23O@dFqLmmIP+OV6&zF-m#rTNv*WBAW$D3I*fC-R|Y$A4hGgs-ARjq5qJ zVXI@rZ)praJSFglg#NYonk?k$xItyC+3P1lZ^Qke2zDerr+lgK0fj;yX1A~3(9&#g zb30sayPeq^n;YHCz77quS1ey-uXnnf>l@qLPS^59rA;l(&gG7^O-{8hdXK$svx68o zni_8g;kAp+CbzS>t;Myjsby2)lGdx7u4ZRbL2+Ag+fB`NWgBCXn9UV!Wt&?uwsE6V z(B)ORTGm!OHncW5%Uj!*v@Wplj^!)O!+T<~6+;$}7_q<{YZ4RhsBfy`!dh|{LRk&Y zHg{D^W3xM&G_grYtk}Jp2xp*BWm#;_FLyu&7Z+ieDn@j1U5vnLlSN@ACb5|0NnDQl z$cdtN^A$!PC=?uh+yXIabz81|nX}bwvXl3cn#;;a*sJOobv$CaAPuq?B=+U;s=USHMdtRp!h18daN zn)$TDMi;`55tfc7bHO6V4bIXAXWb1;TGja~8XL>Xs;b+TUA5efACsNLjy2w57GoDV z+uEQnv2v_HjH+&FYAZu-iIhGxe~j@33tL<(9WFK4BNK`yd5n=oi=CU)b%@}W9p@}# zLje+POf*ex*br4y)h$Kv<;LbVr^{Udl4aE`%aLkhs302SrsyV!VB91zu6E~dbk@OT zE2<*2WzK7BaI`uh%R1!s<~nE5g2oM$U=bgPu<9IpS(&*2!n&M}rlyuUP3S9mPua%B zMTnZ_cBJ^tAhS2OxHdSN?2Ygp2cHaN)p_8tpmhmWxWTz$L(4`dwRNpG(Sobn%oc=I zofelBRdr=$)h7FLd$k$P#e=-sZ85EDacy$A>O-9EZpV6cJkJe9!~-I_on;#~gmXi+ z+YI^K4X&0=_NJEgjdgaX%hlqF;;;F?Jfe0dC*f)hw06A4Gx#x?Q%4_fheOWUA`Rn zLX`}`jsV90PRE95^nCq1w9m>pmo{pIrQ4hr;tj5A+t-1y+0JK%d|!KJT2wTaQP;K@ zeg-dbG&vFAno6Y6Sgb10h|!^yVzzv{y+zfD=5EncxeSy!>QV)D#d15cQCW39G7=n* zXJRDpwoQ&!gjgH*7fn7blqD;M>HsOa3xtL8tKF_rha3COVt8j^aZ_u9V{!Y25m^^> z&f`v%TiR0ZjGnL0YSg^CCa1$yhLu|E6>v8EfWK4q{Cr1STjTm>+6%?nzMgS4 z70BgSQL)?Ys%~nmBLmW6?FEY=KDMk8OEsIzoo?!+Au4Tn^vLyAjmYuQw`HVl_vVGz zN84S_s~g=7S36wIa2wh#8o5A1#Utd2&dzKKvpYMF(6$pT=WgD7_GAu{sL4FHuEpgv zo0d7(mA15FYpoB}sS)XT1lfpHh9%6aY*{Zh)rAhXqX`Bh9g%Oy7L7e1n*B!zFghZw z29+Xb#Li+4Z!Jq)ybyCVyOB9J50`k9Bh8~28J==f6G`Nw;T~&6lp@S|p^en$Xs&OH zQlJgrdZV+WQtv;;F26$C)S+#+bF;gU;%S+q`39Axxtg|qA>HPxYRiZcWTYiV@GPS8 zkTaqzl~0>cBR}Z5>`gc&R&e3NdJS8i?}ga^d7e=d*j2LM<}c@G#GKrPjZIC^Wm|D` z{Yu)Jk^f3tHnck2jW~DmnJX$cpngU%Qf<#!?`|@iid`UO(X3FQ4P<=(C$_>o&moLAm)!Bw^Wfko!O=W_Tl3TQih;g(vh)R^0VG$K=#Z5@t z&U#`~xB#I`9cPnj-G8}Mqgb!m%;QwG#MfPQ5ik7t+D0rinwB(q&al!^%QNCAUqCL? zj`DDL`750aDMUt?!JIdYBCoL-jMNiL#4^lzs-Cz@mN?p+@O_-oBxG4xVSVEUdmC

gm&b?`2r=Ro&B*C;Lesf% zW{!0AXcJ5GlG=)j>9j#-2&yg`*TQsdYK>tri7p&bq&^61E^NiUpf-LlsM68iT!*_M zjg5^}SFJ+=ty(v-01*p~w#AKH(ATOX%(;@;>E1=r0=jX58xJk6CGGAd>na%`5hR=SzR)rtBmQlTMjj<|>Pb*3^Gt>?F__(a1dxT>*g z9XVk1TSKFtrLMi+vBI`$iX1Bf zM>=xkR$#x>0%|y3<;d@zB5XI)ZP>`t&K$uWM^_4Kik>sy7>XE`4|n_z3^SXmoo=4# z7B-hvEu`ob$79MMqo^B3Zo?5btf-g|-zQs!8(g^M>*BS7T2aW|5x3DsX7Gsh=JM0Z z*_D*9-jxo^fC4hgeFL*;LD|ZR(lUE_)ryfbk6@C&z_}4;tEikt5a;oGWl@rC?hX8o zIqo)6KDo(Upk<9^{LZ|~;WAsuu`0wpJWPiL7ll?d(nqQmlG#G{;?H!~bQG)9&NkH% z2ayp+3*xlAViYthL1U*|OpdjU8_jf|sjdlUU%MN16WzIL;hd^RZ2FvkRBR+i+}XvZ z9F=LLcwq$*>+0AEm(P6W?&^_u*;RQ*q*QgTs5j8nd17?gTXE;h>8Q8g;JiuP9mCCP zN{B0(wOi6SIhNp?85iA*%YkC+xqsr0cKcd8c2rAU`v%+_S3QSsrB&-h!-x@*Kzplp zuM8}r4&an(dMA=xm1j8LV}nMnCw8bd*EO!k!M_yM188$cN>sR(i`$0VB6SU#T-qLw z-9Ge@raCh*DDv`#FWFKMb^DWNqRmg8NNj_n977Ajt*v1yE|B)Xh*@X{6w`>!(0oQ( zS`HjJlD)+|Y{xsx6lbRwE~KU!r%Q4m?ph%{>M#11g*7#{BaE~&7avvX7$wL|+pl>fql z9?$RN3sXGJuV?g4KK=;1mGdI*n7eO>eyNH6WIQ}~UV+Ew=qlqu|(wEoq zykDgyY$ESb`)NG&sJ#S5ex_M<|AYV0@0aMVegk_%^jm?L)Yyn`&=c580@psZ5l^(mvwsutwtF9hi45#5z;x;G`8Lr< zu=n=aP-Y@~SK!`w`UV(q4EsdD?%DUjkb3r&7_a?4jB0Dfc2*G=#`?tb85+KCv5G%9 z41Y-A4-Lb&3OAr@NAGyABHa$szfi)TlJL_dJky2!R*~?D5`MdcPm%EINVPLn!Z%Cw ziiA&-@R<@mUBa6re1?QCl<+epyj8-_lJGVOf02Z*lJK)7{3;25v4pRc@RvyV1_|FG z;aerVT(@kN@L3XlhlIaW!rvv~=ScXi5`LY8e@MccCH!^?Z;|j%O86WJzf;2JO88w8 zK2O5;N%(vTe^9~~Ncck%KJ*MM`VUL^CnWlQ311}P0}}o&2|py^=Slce65cD}nLh0Q zcS`s~34fV{Pm%ERC48!czg)sA65cA|GbQ}DCA>+(7fbj;34gtWw@Ua&CA>|--yq?u zBz&oaUnSueNcdU_ZXN3UnSxDB>Ykde^A0NlkkTme6@r>Ea8_+_@TVmFN(s+IJwvs?)e=5Y!rvp|QzZN<37;zAACvHkguh0@XG-{M zCA>+(ua@w(t`8DzzW8^4tuULX&#gaw$kz2z;=T~Y>AaHPDD3pi1^7RCx)q-Y=9N{j z|1_?da|zaAK=2TUEd*0Z7VP8j#RMA&-pS!Kf~nLBZs+h+f~mv`ZsqVqf)fbt;BXSb zq{d(?hxG(gDHW{c@IN7X!&ri=IDDMoB!aCRexG0}je;f)4-iZxQBdLVTLe=n6ingp z>jcvgBgi=XD#6JF4}As1%)JEDQ6t#T;oSt&5hHkr!_N{-rBASr!%q=RB~NfCho2yr zjvm479DamgDsh5aIeZ_%RN4ePIQ%Vw&nLK*!<__Ei4&~l@XZ8MX%npCu#4cS1Y0?L zJ;8KD37R;(mS8G*f(nPPC3qUaDI8u#FqJq##^EaorqU)j^d+sogkXi>eh$win38?) z5Qi-UQ%Mr+4S;i&{u=@Hz@;fVxO$r0?}a1y~65!}jQJ;9j- z*K+uuy8%)6%M~eFqH_w6b`>ma5lk=!>4;_$NsQ%Ml)ApmrV=2yox_h1Ok00& zD~InRn6~_22Zz5!Fm3h0Rt|R(Oj~@gmcus_Oj~%cio-5~=Mrq?@bv`GBiO{@wFJ}F z8&o)aEy42%PT}w}f@!M_G7eu!@D&6Pea`Jqu$AC`4$mc+w$$Ju4qFH=A-Ipj7ZY4c z@JiNa4Ux=5=>iMu!F-%1k=_QY~`??VA{HZwH*HE3xF#KuHx`< zf@!M?S~>ha!L&sMO&lH|xRPLn!*3BxTU9WH!>mBXC`uOhgX!#5L5TR^ai!!ClaCD_X0>j_>>u!+NK39cbn;qbKtuOT>v!^;T1 zj$p>&D+#6&B{=jCZhwMn3GV0cT!I}0AL6iu;I#zzark0_>j>Vdj(<*J>$W0~i1u31 zeoM5wBU)S6d@B&loz!*k^icAi@|3}`zVeiws?*-9Q#^gQ#|Pg+l_R5si`_^SAPw&R8`0d=S zE2?ch^ILaeH{J3#+#aql)z00J0QM7 zOwr^w6LnACKLPHhZHsUTD#<5=qpgc@$4SVZyuTBUwm-s+CmDP4F2eRlpdASv;vITk z0r*aU>__N>>I^T63CF77K4OG{eB@-6c{|}WA-1TvTjdEKC6+yT`>|gW=MOon`PYdF zTf~G{t21m66F#J_CRJvhp)y}7GpG0j^F=aUmO5b>v_KQyrcU_5uhbQObxK{~-_#it zG2ulj_#>;dnqR1@dY5pT`o5>){y{jZ2xtbMiaR0Wenivsldgdv3d6gyke1$t5ST1TurQxe@0wBq>on%hQA{U!7v9U7#LoqGTen2dm&XY zEEEi@REGNn!^gu669mI%iD8K<;dv^4-1A>VTR9f&o~g6 zCNV5l8NNZ(J@Y3K!%2eSgM#7Dsr9LW6^hOFB-@skDE3yb*%!F=Bx&Jg(!v%#=%c_C z%rQ7P7#w_0bR6WX`6$3b9pQ)$UIK%KLF5^PHv%`1PLlSVdjdO|&6gMdjKZ$x|6sW5 zvFKOJ%&!#hLcYZ8`)uC$dKz;(NznC@f^o;xaevyWj)QzOTpkx#8m4|nr5hv&m zze=q;1NEn*7FCkBRO)jgsU`7W4IBtj&u_|wUW0!|{je^1I*sg^@A!qnR@i#tuR&`o zI%V^|>z)FizVc~+aJF;TobC@RaLqnCMTB9DpF!h#@}_b&v%kW4+n%osHt&#aPv8oh zZoloduiU9%xJ57=M+~)$JDgv~{2*e9wa-C{Kk{>YT0!!RL%Mwm_<#p~7O=r8`{VZj z*3s||I2HS3pSm6*UlUvh3b8)lN|Vid%H~V5`5wFrgf?%I?EumnV|*pZ_uemmr@-6j zBo%n{6EZl0?f}dlI6^k@#oHi>7NIK4f;T_ zlv7NXDU$DL0XZ}g_&c-|B;KKFWw1Z|@fU%fk7?Rlp=9JdR6p^7qa+rx-~HfE?N!`1 z9|g|ip7rwJAd$l-iM;)_!9U{D;2*;AN%npLiI~CY72Yr4?VrHWYa>j)3YC^`p~-e& zK?Y@f=&C70sq6t%i83k`inRGs0>{a~KJ+Ytx~ElybyM*Z+*<3521tMq;r=)jVQV=6c%d)}g%QKY4G6`poC*nC#p*D#SNh~JL_ zCqBaR>)gcWwv@mfSp06fM4Z(jh(5vN7I9xw^nfp4~;kezB?`cB4Sm`|( z*aVS-YVmkWYNhvh;3Ua`H3V1EL|yYA*=oJv-LqE~T>mj{n+T}S(Io)wX}`&Te0YVHp1 zH%Gxha?$RL%|IhD-AxR8^6tvVfT^T(sN^YfIJ9d?)RWxb$uqdWe|a{4C;RfO#{E5! z@)=xx3^y@coifWbDGglrB)NLO@a;ZYysy;6L*)h49p51qp+NFgB?fl$fQZ6u0%uky zn6+_c!u@X=?*5;CKvSu`$=wHN={!JM;Ub8KKcicYhrjVT)p&>q0-G1nL5Fopg@I%b z{rRP6@qkqXW{{agJS2N=gr`&j5;5T>fkaH446GCWB*jEJagG)fnP3#oPa^(kzGpZS zHU5%4#a(sB8XRoPKd6eO!n>jp#q+=auE~@Sa;z1K;CTYEuR~F=;o~ zayVG-<$S~NGot>|2Co%^gMY>pf$l-lFRF}>0@v`R9|_F(J4cbK77EmOpd#zB;;V~S zs`(zihw&PSRvYrB;{EnU>CPm6lTes z*;v8sM$QbUbZh|0-8-PZ;Zg8Wkfzh3;NuX5h=N0+TV+r}4yXo63W7ulJcDlfvr5{g zv4@Kl3X+D>CKObnP+0n zHz8uaAM^P_`Ji5#kH_cAi1`qo$=wfx=ZlO{x(hC?uWNqF4u!Gg?oJGdjJ<#7!A7zA zL+}sMZI7+A|2+YE_qs^h>q4|Q6YY|SIQ@)-rS0z=7|GV7MPR>%t_ND2D?;%aycx)K zTSM^R!^7hD32pR3EGY2Od!&CJzYlZO90@qM3+$uH=MI5dtfAfuv{LdHv4PycOg5 zdCTVgOwDqMfuH^j%=sbh9VDpLYi!=V!BfC#`U;i7pW3{CrhASy@33M62?&+O37DbS zmy4S0vW&n*#1th)3l!#SNB%evm?Tg&+=54_+Ifvo*lp=9qjsPPw2y@}DU><(1$Mqm zZ1}y5AMoKH1r}im5%wCbe9JL!g(lx;obi#sAGktQ0gnpYuT{Sy0ZnHUFgYw>oDh)o z85VHe*3ruMOH3P*Z;X&-4&)R1`{OZHrN*#$_Y&2a^w-MAaQ!(s$BzQ%Mu-Q0r~R+O zdy)>h72Xr7cXWM#+Yd`{oS1I&9;}%Cj&1f~+n!Sf+}ud+{tZJK?8#*n2M*wd&}m!I zQ1Tspz*Tx-s!17vZyzP0)*y}+B`)&z+kBHUD!eaO=nhoOKDw`FnKwRzzMmEO3Fz#2ZBAYU8?{@aqC1@VcCX8*0S=&$YxmEHp=#_3r2=K(qa z^ew{863<`UI6oW-)M2re-VG3>9jtCkfg7bt8<6JDFCrPBfW7QfG%UD~a+I2v_`NHD z(``ljlkdEg)`nxvpKvTMGoLOq|2=p)(QdUBedfLa47^%vu~bPq;{osX{zQoc(z zRtbV8GdFZ=@vV#cd9o)O(PxLBmf7wF}i#t%Zm6W!> z>|ut_+vzjfCwWWh^g!88f!oDfuV)wW_Qf9rk4j%HZiHB|h&?v1hrWreKw%Q7^!|X0 z$%kcw(;xG3GO&w zjsf>n^aOsP4y&Y|o?EIaeVgIXR-BD#e){IO!dD)!Y4iHxFCfALo>nnEK5hmkOe-8hF6-koM8*;cM~5ANVeKcJ(!2#$D)ypwFooD==JsU%XGnAg_8FKob~* zYlF~8=$n8?z6c)Oms92DdybZC^Zk~VYU_F63ThDNcXE%xUBByDp#8|{ z)7M^8+4G!@)BKh*@_GoOnew^vIVkQiEytwEo(DPq_@~1X2u(ezVkAxZRE(slu_{K> zl+DLiE%={!nD{5C{0oQ@Mpt)mRj;2=${>^b9!TLb--5}uajzqX0-D@L^DFd_eYlUJ zOW|F@RnG+VqYn#qU}^p&g~*dW;kZ}_1)pz|()Dk;zzS%WKi>eK zt+n-Ve+Zc1eu8f>frOcEML5i_g~X^=k7Fs3HRwEKSn{4z7#GY2K=oMHe3MH5$O!sR zz|+?A9QkM9B^oopC+PqojXU)n1$FK>BjJkknFjIOzDfihLc2k_hg8sdUp?b_absQHKnS~-4o&5-A-K^rUe{`^wsqkfbSrL+Gb0!p!#gN)=$147)$-wIVb!@W!b2* z9Q4q6Ai~#^X&iRSnDylOXbirZrGy%OL50yTJ2765tatJC89 zG@Ex*;;myofmRlPE6>ND1!~}E;Q{LN6?ylhVV<~A%FFfHlh;PFl?vH@Mb_@g8zam6 z1or?gZ@G|nNep?t1ta9Fhn%EOQs>YNOhqrYIqUuCuMTrLE+9Fk3fZQrvZWIaMJ(5E z0+;Pb;6=2u0#Sc^P-Pdm6SeOzNpArPY>~%#UH&OQzHj+pJ9orTUE7^Oeh5GA`lrt3 zrMnnZOasN*ZMbQTnRPZ_HR=msZAFJTNwVi#Bon+^4GQ7a$(}w!Z+VHP;HOn>AK~8s z!N1|glW&II_}M1lAt&7&Z0@b(=?Zk`Cev=DvIQP}+NyL(i zM=8RD&>aorzrZ}4zyp5*vQ(XgroYD@GyDGu*~!H$kL4KfyUKurMS~!tWTo6Yjnb-NhZZjn{+5L;>xgr>bfGj{-k@UCaUi{UxR$ z+S7~bJ`8L^cSX_5$)3l61x_6Lf7<}kz_sYa6q)#Jdq{iujld%#nR4lESo zHUP69q&6CM6vHZec(2X-rulXLJ^kr`9}(1X+td2w?)xy8t&7irg8UdkzTXlv=s{g2 z7HuO%Y=$FpPx2_l`C+ncorl(e5i}W!iB>dRIGjl6GsM`8CQ!r0_}O)YqAkORBI7jf zPrOp$JE~z}3vk|SdE@lyRo7x^ftR_zmeF}cEd!E0 z@sx^&A4Gt8w1L?6<}NgdB@OL~_i79KklQ z5a%I0$ODQ>?>}}g7Zfj$c|(#Wcb~v4lC)QnwB5^S(OvVG0|0agLw5tCJtC2OkA?bs zo+BFu0#Ivk20ronDetLNb-q<%z6w5{hpUmcnEmuP5}pQ7i{Za~3%8t>(%pb!S`gGR zo7mB}+u}TS2o%hn9y$NBBy`VnwKRVN4R{=g@L|Re*&pKLBF;;ys=)lDLJ|i!~_^V;e z!50T#8r+Mn!M~HAW2bG$Q&6h055Q)Nynh>f1NBtI{V>XJBC##-JuRNl8J^-Av7C<0 z;|m6y~Ud)4jo*(P>YniE|qK*o7P>38A%1BEwdz6fHC6u9Nq^%@WC?lzH9z7%d zje?6N;h`QFLtxB|gdEw5HNa`02Q@40r`7fFj1xU8RYglm7)^;JunVKfW~;7E?*4$d zhGbqxEAU0nO(nE^Uyq5aljkPf5muAZ0`L2jj{4wdCle=g0m4_aBIerIDy>}xn8K#e#Q<`aj$SK>P zo8X#=`Id?K+QobybG1o%CAs_Oloi6c<#tTjUPh7oF0{dap9k}I06QX|9GCJ*D$V`Z z5&6W%$6YMvq1$(0SK>Z$4Eh;-Ae2Rh<&-}Ky8neFOs*R89*rOjtK21eTyGoCpyC3o>khA7Wa1>c_-OC;m9w z>A>S))p2V^3u>p6MA-!q5I6s%&$#&yTDfi_(U!bQMy||A^)1H=ynFC?P0Au~U;)B$ z8w^5^I^emDP{nZ#mX9MESws3JNQ?6tEVw6c!8b_8BZ1|ln4Y}F69{~Bw}*?4?;8&i zTOvix@jWTVySpgHf5pj;1pbE66ysJT6_sy>;QIwJ=*c^oqVn|$zMmezfFII+j=3|j ztl*a-AN1tC3RRcsQ;o9a&%K^h{#KTP|SU;96`zV&>aP`)SSR} z#7J-vk9lZ&{$UJd?irEP*@D^>a1r%%fT5rpd?)B1)g%-Pk-@Pm*rnmHI(g%JFtgUW2-ILgx z?74utAQ$(XyQzz>JdS)FGGShbCCM}oQ<21};gGGwQJ1lNL1b-g4 zmEOOaU*~cAQ6Q+9H@SN;%_`z{8J7TY8(KO)?~q=;Ls~-=q1bzw%=3}=aB=d}`+Fu? z_YCN5b*~53Asn>8PO0z)7kN*4-=Qsh4UhQ9u>KBBG$O1+`=%OyHRq`sx|KB4leh6) z0^i*IV-ZvBFR2poxPDBE>!03Eam^q0fhUiL;K5rF14tU(7z!pLzHgnV@;9pdzajWP zCiqVV|G~$2y{^jhZ9)G5P27|Bw{faGPYC*V_hF(NFcFm&sWj^kLivDx0g0aD{+Xb? zhJs1tq~z{*Xk!gO`uf2)DIf3}vuplI0L*RIe&@^cdh$K9Fo&8A?t{*QlueS=`;YSv za_WeBKKY)L#2(qh6j)32?{TSczcQQ&-VA?yEbt}PLCHIu6smdX<9Tc-YVg?Lz+k^f z4g1>hE$(}PO9=l?NWbEFd;aJ(Zp>8ro@U&yzDI}Fh&>fgo!ESresP<^DvI8AXVN3Q zV{m`aXW$2)qK}e2r?_|SOGJPBdlkOk41Vy{9vHB|5*4_wH$-Zsvl0!NkH}i&`)Ni$ zkTl{jwO~J+vnP8>{2PYzC+g4x39myH#Ok16)hAeG3s%CPBz^%J-X+OPLqhyI1;3rd z2ckVmEkwIG2EXjpqy+9)t}a6R`p+J9E5^!JURKLsZv`XN{- zILJMY=rf7ly)u&bt%CL?qHW={9bicGr*0woi08bM@A=?MmG?HHKLSAHjj$8{n8!m1 z4qgv|!~0bjpGJ7;`L@UTILzC@=N-1M{s>c19E}M~rJ3nhB15bO-wC`7O{?R5^JgOk z1>LZa_`bsX2BqHL<6u?jy_Fv5$A74?5?_qNIwL|N6J_Ri!3h>UpX`lyE1WO$}Jo&hOw?jed8t z!UC?}A&|X+&(UlR>nnH_mMg*Pb0~jYQSJR4H?7LdeW;qdKFF-_{+{o5C>tlyU&%z9 z3P)ov(0>3D6}25lunpzkB>rn0fuHT6)wU-iKUR7NydNTGKYpa%Y*x^ zi@g6@T}j`9{R?I`MW_^C3dC`}m|q{E*b*!ZioGyWvB{pNNFwnIIQwv4KT^Nr_^9?* z2_L8Gmy{nk@gf)eS;D8ts>K&zfqz#!AE3_E-Kn4#M!|1S1a|YzWDi9Z5h%MAjes8Q zRss(Tf@&i0&3}J0jPs}8!s$wY2&_i0WRKt_k+Awa*dNGD#oj-WnY^Da^6rPu$V{rq zMr9;uKu;R$3B=l==&LxukV`}0vz?EjlH*26MT~N5j+PDCBgqGd(klhb{ogj{;v)2ypMN#7G5je~+ zdxf_{)XS5%AVHW<29JXQ?PT~;sZ0KgNbnKq3caHc?-rRmSAGSg(%jzEq%^m<75*kh z*3943fnPMRYwse-u4`>&rSwWtgldUaFKiTVdf>0yDgROuX9XWHH%^Tb1xYwDp3$smg@VD6KwBF=yXlc&L&YuH*&P!YI)?2*M z6*Qqg=RT*gxvr_b-YKK#&3Ak_|A4vc=nZ(;4XQKzH_sdT^YU}}+r7f?Io0NmbR&42 zayc}j+hh5X6GjU@+$m6e!Rjeb2f?kf6acXlWAsSBujJq z2D`J#N$+WGBl+lsoB~vZgM8sY3xz}GT8F!?fv+TsSkyJN;GHaOvylv1749aZ8>hY> zi4Svy#zjdbF(+0TWaY>+OY#0+KF)>rTsOHfTm9ycywZH3bTYb$$@(m6I*K_yiU410 zesw%4jAz%-B2-Z#{y*DmWk$XhCSo`{!^>MZVU)MGL@s>BYe_w%{!YS;oWh$MX>2y|n zqh7a{RW8))bU9;0ub#cgu9YbJ3It$L+Y93_WL8#)w>Vhwz5~>n*=-GYvzptn7N4~C zrVT30TYFnWbNyzP-MlfI-W9~M={xs@T~7ak~K1l6dj?S zQPRs=7{w5Ks{=1bI{QwbCA71}qey^cf-`R|5o;R-t-P+YkdJ=5(pEL<@C+~S>kRqY z=_A9A7o&}G2GG*Guk7?H(%h6d%tJ?Zf>El6T>EUvR7+xKqYGRAtSseCV>HC_%Fad~ zE4^vn96k9_z)@`yGdo5`h`w=qQ^Jx!>;r3t&M@}m!25XX52*S^91QLXfz3kCiEFJq0gYvq$irtXV8Q`gGO69(S$yOCiEFJ zn%0OW^cggv&q?aL_^i52W|)%6;?7s?8_yKR@XJdY9LMlgn*?*elwf+N9~+twqSw(I z|0I|z4zS^ZD_Gn#bsd>N#u*c7CjM9B<1UP2DbR5oWWs3n|5~P2_=PMJo#*~<%S1W~ z;3v*Fj_auqpHtwUB$py|HTM63TsF*EiyyMzDzqk&MY117y;YYApGpn+M0B4C``DR% z2EHNYQ_%Lq$!WeS5tBgu@AbbL_+JhDuLk~C1OM|hV1-DD|MSH9|5KtdU3&*Co%-}+ zYgh_yWM{@1**2^+E8VYS@z(JdP`!(*BhILQi12ZK)=ljWW9N2eO;F1Fj_Z$?bfl&G zwo0n3fG=c$61fP%{Sf5V{7yZ#%1m>>OeP98CavEEjsl;XGzE2&g1E&z1G@z ztIm4VZ*7h5_?F`7C34pNgt3^4%#_mJVyoI;T&4D}<^6Yd?$wuc45aNaK9dq}jZ;GO z#?lT>UsJr5=#Nd)nJ^&p*fc{w!#d+1E1~gO~=$|;GY5otp!0s#) z_KRbE+j!n~3&ddZ~rkCwMO{Wk(u2;|HA9!2O?LM$ZU;oxM|D1BC{@Ica$iJg^JNP9k zr8P5ZDQ*nLV#EQnmHHLQ9OJl>0i?W@DfgWB#?^%c^!5A0Nm15<>Z*)L3E_aSZ?Bam>egjKvtv zx)Hyn{l`1g3`R>L>tnSBzlqD8?l)sT$Z6byWw2MsKVkqe z^Mv2T=_Pye^}r4o{?*j-oh3shCQI6}NA;W^^wohQTP^I(*Q@EvV=M(d>pr$7j@#K& zQ_AyIIV`@XMdDowAVtp4vt_eC5{CePR#{HnY zud~F|sZT3abj74+nqNor*XowW<2MjXdld)aJ@j%2%xR&$&keY`+I83 z6yD!Elhd7;K@GEd{3od0<1v0p)u^X{mh@c2+J`s|MC@#%`zeW*t5_-t8oB7KJnPwab*8Y=C8(4 zbNq}Q#@usRpQ14T(k#Gfo_a3WwjKT)GwGw8?WF{s7S}tGp65`YUy?I3&c6)w4EieN z{{9~BD^dQ?@{8u*cVhD?8PX&E8K2i+oFV+sj28aqF993&3gtmX^}~MbkJuMg|0y3T z>F9*~#~wBC@dMKg+|Tqi@JG}^ut%RB_AnU#O7XYK0KcL*LY^hR%6z_Ls8jEU)=QeukQ*w7yzH?-RU*{H}ciu5{p4xqF}Bt;a~}hu!>7@Rs?D zNq-l4`j-*>ibr{z*0ZO42-#ErdYVUB%Gamq$6(w;@*y6Ju`ZrRw^M%BI1^>ocO)o!KYyFsIizn&G;oG@tFL|M6DhL-`h(wR(CToFDcl zY7v)~RJ5zo{Yw?f&*dGRYX09&`7%iv_^T?f;fXCoXDh8;%jG=U@8I$sdt8@6d_3D# zzMt*uLZaa$3wpUFQNZ()@cC; z)IX1R(0a7>g07!Ie>;p@pyxyd>)J}|QuR#xAD>^YKNLAX77z`Jt`aywVtnf24 z&3kN3Jm&*CEB=5$1N!NDa!2nokS7&+T4x2EN%a@iA3GkKf&4!Xcy*kS)ko{o|($tcgznT`K4}eYvT`TG9vo zZBHo%-lp%@VW(h;1AkTbSHt#rzTVQ>84CFq_A{#dZAbeR6YZaOVsIzr|Hsw5U1}`j zbe`=B$XGY(YS2AJb#)csOiv9>!@3Q|joed?Nz{AKhdO_j&*;jrG=9tSuP`gb7!(@wmtt08(rZwe)To4$s=Nx?dhN6@C08ddp^ z`c=8fABnzvsHX%jv=EuAw zpUp6B5A9zzZC|DZ-n+geea$_@ZWR5 zhZ1nDN7N_!nx$6IC6+S_U1=WUMYNu7!9T-2eRqdbAK`pDf`Wm!Vo|>6kIc)s*8#&xNb32Fqn>bv~1_ty41K9s&?&SF1 zJ8wftEx-eu6zU z3x}xH8;fZjiIi6;@1FOl>R0Ki|4o5Ebi~0QK11C>3M-AQza zt17ga%y`Edf|>sgZdcZ;>IZSv!0|QfDW0m{E9r;%;6F8G0Qve!Uec2rZ6@@naJ+G$ ziMRfRR^Ik5qA0_TN}V_|o2!+}~KuItRh< zCrvLruC;uE`iAt7`Q89Iz+TM1kme=(D{5Ot^N}A|(Q-ea{a)<9Pp11nM*rPYK11~E z#dyvBhR6E80KHA`SNQ$`dk`O-d(FU)pF;Z(DjDok{Jb~?`yb4Y`d`qIzk!7M`F^3{ z_1pX6NDf1PAJ5y`IiVch+kx||r#27WJpMSHW>3_QoE~v0@G70*oxaOJZ#!@t)ywhY zX$HTU=he_U-X5a-Yl9qWz7%{&zIe!|H`1S_Iekuf$1?6e;BN(eqNhgXkNpn%q&g=L zE$KB+5iRM1U@NuIKOYacO5kfn+bY@)(QXy(cG2#nmd2O#WBoOGeFA?-wEdzTq84>) zD4xlVhG`%3^`ZWy0HFGp)=&1SCH;dRhH-r=XlY$31fwOGS`)Q2KO%tUw+eifXlq5= zN-gH^K#O>o0lnXys+~_%{pr)7KjJUzkJH7P7yPV5Pxnu#et_H&_fN1N7Ng&0s7c`a zskR>u--qu+@;aW=N&ZLkqdp+JAz$^*F!9znOHtbyXej?XYtbUEc$`)te(3<2#(I~k zc>mHW>VMgIFVx6F`*ymYO=GD4O8qc{EkQ9D<4|ug8^-rRf$%?*@Y4*q7wL)WIPO0b zFm(TA`@h3(TK-7){}uG8|EYdd;|KADct%|gN4M(f+zLL3bFy=0dhfAGXjfsK)FK|A zu~D5khRQd+A$0ylUbo71o5A=DoeOGFFXCK4^}G*dNP#IMS0^|MfZ;+Y7%m#(jKRN9Sq>>R7{`JCVKU{D!=4#eArjIG*%XOMb5D3-%}Z zP(MS@kPC5L2zjQwx0f79t)J=qq1Lthe4y&luW-4va~syj*8@Elo-7xw{PgI|xQdZP~kC7mY@5&u=t zf2YfU{9!b!{^Pe|eF@6T#u*sS_GoeC@tgR%nEHJP;@3=ZfcV7?5ZXU`&9pzFUZDG) ztm;qd_*Zt+7@*Io3gozq^tglaN?#m5pOYT6^Oxplh$|D1D{0QBeI&gW-FRSs?4?6FR=V-?QT)Mm0rEtv$<{ClB4 z+`pkaNQiSL&igth&il$9^bbGpAET<9;r^d!IrN|S4?e}|51#8h;vbO zJTc1n`&8wPn7>IpbSKYGx*k4%p{ke>XEx&w`1gjkl7rhK-M=zxQK|f0C3nYSP*$jg**+E82Jq`iNv$n6VD=XlsWUPxgaLoNGlZUmpU9HeL%bc|1%y z-YO5q=SMJD9WM+f`a9^9hRZ4hqxnH`zLk*Db-mF4aQXYDt3+1eSTr8?A7_=u(#NSn ze+k50zS0CB&@>+QSFAD(K7R?}$wt&4agk!_+Rp#y|3&kkYcTMP;~x)?k7hqwHx<00 z-_r||^G{C-SZir6x=80VPKz_b$ybM1zt+m*xURMJ$%Ew2LhkT5oz8~-hlE}4MQi2s z36a02sBmQHznQB;ANhy#l636kwCs&eS6gFCvyz`}O3HRST1sB z#(%$9$wS{-E z_|q9*=QjXW=U58z3)j@KoavT2rYl}un49ZhX-)MdI^B15x6gDH6&@@ zYyo(n_;tFvHavwI;+Z)j5VX*pRJ zuoHUoyrjB~Zb!DeC3}5S%UVa%PBtSq>sQPKB`jxYmbslQy~&OLELp|63FdFIZ&<#` zrsRE-tu+@xdefUs|5JTiUJ*X7GwE33RA$aMWhuv1Grd!Su zFV?+Lh~7eTkp(MiY@Wkr=4a)ywn9t?$C$z_h32g3tS$#&bNzT-dI5w~F3p{(o8-C_ z5C5_$mR82TVCOE?m0HaCS<{%&^0aPNPSy;j|G92*j!Bnd`H0nRaMa@uIBaC8MQ`hp zKW6&rd0Ut+&0O>}yCCmrW;PcUbn0}=brs&J;=r?EYF;iHJ|C)7|UUaH|uhX%vnEVm*mV_te=@n6DzuJ7Umx&4I~QU(Bw!BzFxG=A@kIY~q@0nBjbuo{P1!5|)hL z|DBw-lFcz&u4cN)`3jp_cnyQvKwieiPG#p8PGzaNQ(1m~Ue^6=O2JC~OiTXrx}>Z1 zS6B+N@)FERKh$LuX02wrTUaUl$$eFV?kP55bp~ddt4p5747r%RpW$!v=?#~&>4no- z+zhitVHfA;ZPce*3ODI93+6r(H#P5H;*w4pX60o)Xwd&zKRNHu`V{j4eL_cEjyX50 z)DU-pA>Dc7LZ-i6cR_Ahe0F|bS-jz+_?ag2yZVK2*8Da$FLz#24OCadrkHEkbW05z zlWMAA-vDA-4mt|a0pxUa#>0b(&;`^LFx~rxxC;w{hB-w!0mC`zg;_IM(%%fTin6NM zg@qdo=UI*#rW9RoV83U2BO4p9kG~A0x$_M8KW{D;wQh|58pbxUWWS!J73Ck+PcgR| z;@vu(K|e96MVC5v6+6FRiEesPR$_dH+2qxodv$!eIcp(HYSk5(Em=iSzZox@Hx$HQ zY__;{vn*LR>!##1>kPLf7>k-Q@X5F<^oa)j<@k72oIcHDYSpD$3ULxJoL|(co1HWF zS~l5yqwWfe$#kPGVR{b!%gD_tXXm(d7v*Km1^--^4sPPoW#_FiCg?ZoW?OQN3ArXq z)}L9DF=37=$E~ws2)ASMMxB0g?u+q>8+DV-KVtD4;h?W2O=#a4H>s#IenM_X+|;6< z#3z2rGVEnKsI3*`fuggy~%+t@Y)axGNH;n2OjebJi80FJf_qYZ9lMSB*tL$4xH+4A*{6 zug|b#je#2SAJgeBvgAx>Jvo4LT;KW?gJVw`@p&X}J9$;|QbR~XDX{Wjed zX0s_iUYA^@kJJAvOEAP2SuAs3W*IpM?{f~Y>A44(73x;)bAVD3{+pZ!>%79I=Dfl( z^0Q{Mb6#P|&y2I`&mVuD`O0{#_7xBo;=cq=H}-hSY_sJiL(*ZgRm?ie%lKOLC{PbI2&?Bqti;K*v>j zpH4qL|MGZUf{|IUC%JS9tIwTmx#+x$3g=!iahBO)KWAdWs)>f1b(uM)spG~}>2;SR z#4BvJrKn{rYfLd@u%w$4*}|0MHIwHgc^SO$TBf_1CBLcHP0wi_tBbpH;^cy^bI-X_ zH?`pW4wG`jwNuVO~2veyz?}^sPzyF2nfW#IyC2^b1)+<@ofRoN`u>W14#@ zeBLL95*t(F%8#9-sF z7@`b@5J7haKYCi9S}qT~9bWK3hbr-xoN(k8n-I z^q-vYU0yK2T&l@z2l}q?IiXUngNlrm`d&0r`VrqOFZe!J<9>j5xjijaL|i7At}^o` zgD@MGR8lVQMK;F}{ONZpg%9=9{r6VtR{+95sd5!yycLLLCH}wg#(jK^*CV*xKFZ|R zME`q3quakw^hs`?O%N{QOWhG--XPjs8$^AR?A#z$yEljxpyhH=L78)duGtBIyRM2K zA*O+8)1+O~U@}C~84=%92z8!|#72*^hnPR&ydFi_J%ryV)_FFHB_5ftRp>^MQlG~M z&70|klL7^7-i_iSuUAns4t-v^WssxSy?(J)0VqT3Mv=^P$$llfuGDEKmW?8&O;xKK zMXnrK%>`!ueyY=gjiOM@+ngX3E~wP~Q$hdbq6Vk?$NVhM0zuF%AamU<9~;W$ih>5G z%S)QQ3pQ|NZm%%I^)Y9CaoS?xVGNTe`MF+qSTx>DD!r>}bPi#b+kd#)bWN#nqhQ@t zLxjhxn!MhJ&E@jL=IO4xb@O}+O0A8u_eV8CH>bbeM3M$$LTL2*T1dgIyzmBJ>pLcp zAJq!=#C)&7-&rg4lDU#^I>-N3oR1w|3}#ZW$mesxXfM?AdPMZe!Y7}sVHo)&T)BfCbQkr&A0^5EZkVs z?3VnbLiQ8I6Z7{0z)86iGoBkxTNIuuDsvw#xWMIDuv&l`=ejY^R(cY)T1UTF0scaj zV3=Jf{-t_>OImQZV7^!|953J-VTp$e=DJ+_>f0pmk8M2nyp69T=6RKKB;P!-#pjON z=KH*}?F(ed+d<~|Tjmn+UBd+~=K>#J>hkgCm37=rChl-OnMEv1%K6(&T!a)XDaXtk zm{~7NCpg2?)49i|3;%{t`Utiq48&T32^xdPbF+zGZvjwvp?-FD7Zv^=& zm(C=Z>Mf5$8C;F;ak0toc-b_=_o!9;#9lPFs?9GyDqg?~wK`~9ZW4)bhuGqG-XU`L zo5gEqna;16Ex8K#67O?n!9`~Kpm)T>!Y^kb^lfwd))wfvSq+eZKR&DA2@W$}6KU~! zS2mC@5&mB|gU~3x%vHI+RLFg;cKT>vqj3*<2LUD)_CkzX1?G4- zai00|19OC5rM>$7XO8@Z+uZ92_C)TOV{Ib^miy))My#?)UNXz_7(FhrIdN=*4bQT< zUCpFcay8euoz3%IvfON!JaTiPe-7Cr+CG1fE4S>L(Blgc`2F(zyWj;uG#;cPgo zv^f_O=z$)&=n+mUtK1dMa3_umzZ8`IP2dI#O|KSl=ElShBbjC$gaOTME`CDM;)8a_1`f!=B@cTR{UyDm29{-9V$&TZ3H+zFBX6om6<* zIr9@Hp^OyGD(B;-Qu%YN{MZ8X?xN=IXyinw-w_Nu@_D<-?T%#--ze z23?!ebwLCC>-i1(3fIC0@ov6+FkUxp@Bw3~{4U|vR@#SSZn4kQ7Et%tT)K{8rF4mn_S>^cLOqUe&9v9mj3%)nYQ0ZG(Kpx|SduEB_)Ai3y z=kF5ATzh$I(#Dx?7j#J*GJCEgre4V4X)}rZdwi+$m{4>HETu}PbJ}s;bqP9tp2_@y zG0(RQ8uhI`gH997t;}+s$-0Qt+oVNgUeOAATy1mW*yh4NYrlzWk)?i9i_vCxSCuF(&pa$T{BM_ zxGL#hbBX+Bz2VKdHt&7r!Un&$Qg67=T(p#MGwO=&HIq*({7#2A0x z(iqPtI6KYy>xm#4_^S;Ka`3V0s?il@GC|DC%nRk9d#QQ0%d?(XE6jSUzFE)vNR4N} z*yL;RU{Pk__%%+;&4v=+0YjvU*tTBD9 z&~#S;|7cB<%Xcx?=J#1R9seb>@#RuJNQ8fwU+a3n%>S!F@FC=R5})QSe^8cWr`PBA zd%X`D`Ds!$H^c2E6;fG|ev^?e;^5IN7tCz(wm7&d$JLHf`Dey5`5s9aB*yQ%O%(#x zhc?n2m!E`c3<22>YIf`xNx3da&C^!;Cmc}Yum`8GV5Xe1xAtQB^!UrW^@0( zkt=cgo6H`I@PebnafB2u;LH6BxGMRY0;e4GU&6IWj-Hwd>B~Cf<7G{Y>%Zu)mhbVD z$ZuPC6RBSC9~PmVw|aCuf4Ua4gw46h%aaFq&P19$(v4iT|NME}9gZ1N2`Q6iRq=0O zC*f8sr9;HDv4+2(s&IL88$LP0uN2;G<6p4Vc)ozH@^$GKN)BlyhX`~oSY9FMr@Ou^ z6fPC`1uYg)H`{}?piP=zZ172*%`YNUboAi->NeecuVaK%`9`qIqi-X^2x;?eURbOz zmaJZ*R54Ay{Jcd=x_pN{Jikz|x$n^v;T>tdIwZ!k{jXN!FIVk-D zB9(vk>#g_e^^LM)np|-Cj0)7;Dm&^p@rc8=`B96&Jwy1v8jX)xR-l0UEsXIeNu9g5 znvBm74#?KG8`17sVm(>QH#%h-ndjS_AmShO8*4P5B!to2K zXbo?Z9+b?@PWeHJzms6|#5|)(mX}C#r1Ppsg>$B4daDFGA_yvuv~3$43AM&Yx69%> z>=@nVY98PEWx2#TP6`)dS*DlEJa@?rBN02{J{5!}Qttkt!?>t~M;PwD!(o?$e=fGZ zRm1TQ=*!$yT)A&hUq1M>L)?i_;Jh14^dCB>w=5H!G0E9LxJ88fm7vF`0P2bUUcu&# zm;^lA$p30ajg-*ydv)Y|A|8?oCfpUy2{*hCDU%lw!w=o&5rlfX$y}FXP$H$GC~;N( zN6N84ui)F9j-ZX>OB-vP+iQu`Xx&)6*z}*eS?(4*gtm`ZpEeSg*S+}^XZQw|SS`DY zJ509|KCZ9x+)jjt3J^|j9_6h&9Kw9r|BG=z> zqjOD*=x^ioOB!WAns7{CF@C6x|BG#|*MF$3RQ}MZtCSji*xoe#xS4;uR;VSWEz_rC zPsc0~!;a=Etc1ze8^Fo*(Cj9stD0~2;l+hAcX5SDLImQGh!r|2fuh=cp5-vBS;Olew-rx%?m^vFXMJ+5IG`ao;(k(p@`^ui>m47V6t%SK~CDu&BdQ z*P#q3Y%F|g<=nDo^LGmP?-1-ftU6yjT3FDa7v2$!1FcPxzd`?~VF`FS1WNeZraE=UP`pu3TQ$KolgCEGYjvVj*W#w_15?CfsLJWi@q|g$*CgM}P z^`DvxUQGQo*gJ+&i%hKV8T44)G}@#Eq{*`1j^pVzCys5d1;k_(jBS>Z-a4;fwBb*0 z`5|XD810sya@GQ4o#koHY6W?ovzCJVD`%|$`5kAiHkMiqy+x3&+KBC&YGV@Zs5Tp||!>(@Kgwfh$Tu>)9 z84K`DlD`_PHxu1-*K(ufeM6ZGD=zukY8|Wt|E#>hIWK^*oXGA1j?c!4;H*W0T7RvswGH4_7Um=i=#}Fd-A0lF1$% zpV*pA(%*P0?oTH7<35)UvP z9j8EhKfNrAog2q5&l+yY^Zv|b9pXquD^aQ98H_>+Q zLOIZfE=4;*w|ps?JPvy5Dzul%yC<2v2DE1{+6Ni~{T1jb&<{ZESEK!C&p}WJ=rPbv z+P-~gA7~HgGoTY+f!v_v8stN}W1xFM$+h4MIsy7F#cxD=OyKhm$>b0yxh0u=47B*R zWbz^Cr}$ft7kamY_JN)N-3@vQ^Z;n=4(JPX_npZkeF|#=v$r zIDwm!4Z!GwM840dIwY3jE=%QdYrziVZn9{`d2^i&GifAMAasLuZs#_#HYiVwq3+AYQV_;yR>9$}@$9yVJlJ1xcS z7GpbXgXZtUpAmXf@_+s8`Kg}gX!&=XIX{)R24nQZ#jqh9Q(5-#ofbPkuE;=TU5M)+ zV9d{9*O{!VEcR|o^g8r(6NqRL`Sv`tWlwtte0H`YOM~QhB2g zu-I=9mssZP*L7MP`}FM=-yXwKOAr67+2U)rINB|9mRRh_i4u^#y8vJ3#d9CvRWqJU z-i2ebJiWc!)t*G=K*1J^@#$xu@1bhgC+z3RLS%1&UMEm?FMN$S9r7!7&{3pc_Eq6V z@dn*~ediv7(8aeF2V1d4#hEc?0|nj6X(!6#E)AI&NG4CB9pY@Y{WsFK+#o>G{URjZ zN87ST-)-5#-)vUe(4NwUr3FgA9RZ(PFJt;Q=GJ4T{;6HQi*n@hWbz1z`0p5@Q+B-6 z@{qt^i1q`5C3hCkalKcByvR?V&rt1br82JVIHk`w9l*6Q9E-Q5@mg;=sDAP88I(VT zJk%!1CQGp$gw7jNf$vW>`eg+Ul%@0C{x2t!Q53sVJcwUF&Tyktpm7`OYV=u}q-hJ+6=osmN?B7J0-$5Sn&tQ->M!GRZ zp5j`Ix3`jLYjKPYlFovmR+OdVa{}cTVxFh*koaM)@~cq(QnftaLrY^+pkDkvkFq_O z>pkdqk*IyddJcoaLh!Z+;w|P00 zuM>Y|P~AmeO(s90@{Og*m*J;6EsKTQa)`H7vHeZpbL^sI@>wcd8~9B5o7I+=c;PZ( z(%~rMtQd8FfML1`aWcA6NfaxDi#z@`#DWZ)18N8M46q+lY>MwnOV5|nn9}kqk@q6R zd>T(Whiw7Y2keWqJo@wo>WBB!d2b)}!+UhAF_ZLK=D_`8`a=fH3Jzqw73IE3d9k@D zeZK7ByHm%}k^;pyJ&!UwZi7h(1h8MT`p+<@t;S^VP%a~-^31`sU3lQL$wSuy zf1+h*-AGiP7_c{hT}^>08_zYzQh7GzljjleX}S5c$@4DC7!ix7%0pm`BCx+#9y;h& zQr@4Tzb-?WIbTngX98FQu)jy1El};N;In^YGKt6an2+8>&C8BBCSY*{EyX>`X@CYV zj&+~$beY9S?R_BziV?E1r34T{l1Z-60tMIT{A4NUIhbaUx@Rrqq><+QNI`yIxEDHtoQ?F&C0X}^KrU+dgFv3+bT13!Dqe@<{v~KN-pEd>J8k3c4pvWLCi0YC_o#^+Ht4=(A|D&;m1bWce+$h1!0GNWlA9^Bf8}+% zjpQalchpFJAyGMO2MpwH zL!B}r{`cSiYT$p41|I5B+j?xZN?&3$#_<0+n)v^trEqgfx=>l5X_(PtTb zh0&9Yo@P`ZVD&TF#HfeSE=JcgI>_jDM)xs#3!{e^J<8~_jK0F?Nk&gIs_$a;Gup(c zhtV!Z*E2fE=ypc;F?tK5hZsG|=(CKz!stmxPcy1t%IasdiBS)uU5u`0bdb^QjP7If z7Df*-dX&*;8GVJ(lZ>8bR8KcNkjfctV${QE7o+PL9b|Mnqx%@Wh0#Nd9%b}dMqgp{ zB%`Mp)pxV{8EslqznbUUN_7`=tjLyR6}^jSt$7It(ju+Ley6+cBfZuhpjTOISA5K0;7kcSpX506>#6zhR4dW^eL5}>&CjRfBGKl9bX-R?znYHgiRO>f zaRbr(dOB_-n*UG7O+=gb(s466#{5t^j(r){FX?z8S)3Ecrz+Vznx5Z6PUOU`S^Y}O z(Oq%JA5%G~$MpAJhTh7Bmyk!^&xMyx&f}?91f8gFvcqKOh%A10U?jg;(Q41i5f6>X@pwN{4+Q^4H`Pq8@P|JZ{1s zWAUq2lDb`rYyCKz6&z;x36_5)!|!3Z<3bhC@ZT_ecLqKU6;u90%uX(2eEJyv63b8T z@z8WV!w+7fp40m!G`+y^9x51;hKmS@Ga}rb1j@V z2r8dUJj(EgSbn+(P19iw->9C`{bZU>GyD{5FWt+f$tlv9wVFg&KeFbMB@7?R!21}! zEdw89_+SRUli}eE{5pmYWZ>Upcz*`|9K*L};BPZLl!2RJZ;D;9^$|{$)SSl-FIF;S zkM{`Uvv{vszL4>`4mj0YjMZyo__tYpZC$4958~R>O8%*Fq{Yv#0?#!rUT1txtYM59 zd6VIG%|ICb0mC!bffl_gr#4Qt{;gH;Y(LWpoa#+mXKJ<6`%ZX)F3Vr#lRv8DpGJ=4 z@O!!1d%2Q-NoSi`>2|LjOq-z@kI(=NDh8Q&8X~Oq zAIyXA%7Z@v+|Fc6CBFeqQa;^eS`RrzXRm&4+ z_?8isfwu1bM}<%JJpS`M{C|@Ne^0?-=hvwGF?^I%WzH>UH}IJ(PbvxL$^RoIe<5*P zuI8rq9celZe42Wg_TP<#HPxGg=}q$wJ;14cinplQZ(^nBeGG~dWAdxoF?bsjz+0;l>pn4zDl(%f>o^WZ_? z^?31Z@lC4U>?$F96nr{a#`tUay?OZjPQfv**|_*M<3n#`6=SzCb6lH%D}LY$YQ`fh ze=Be_Eh}xvgC~IJ8ebE6^52&Se*rj^$Ij$Az$7SwYoqwVaTTD?snFC1JeQnN;ELU{ z^%u-iNjK%me>@NVpTKFoFAb~Zwe`-d^4#^V%7c#pSM(fH`OxQdXu3a7{uf#P#ToYb z7Q-{=SsylPbLnAy9(-KEF;8jpjw%khf#Jv4Jj*VVpD1|tdjH8h{ND$j%O2)pqNMX( z&$ViW+I%+zoZ9~}X8$Ozq&-T0c9KFK2cD~6-evf)Q8lyX#{vjQX#R=KDvoeSNh82> z>Fp~D4m)HD_#(@H7vr;=@nO>oc_t5^Q+e=$8M*Zn0G>;pxPoKSzFrkX)7z0e`QKpq zW7nzqwSF|<1_agfZl-7Lenb-vsDA9s&Kp>Jw=n!5YcJf3l6C>l)xQsD`I+6$Vfo)? z`0f!EpwIKsv`>4XZqPpl%&~JHSk<=cC!2@GV%{HJafL@orlja6de9%uUhYqRYE>w zd=_7+0`wU?nrg6!$R*E4;8g#aCfo3h`s$({^R*8Q(`za0i>ev~l++ z@Lc-&I1j$LQPt0%KC&yd!d)wA40tX+w*#m0WcCYwD}MF5;hxo!BM}}Qi^d0sqdO!w zzF`&%2l3PC;dpqoKP-8=wZo3+DE;a9KtK+!Ot@cFkuz^QSU6z-W4<}2p@ ze=FumFSepLA-M*^gQ4-^L~tNH98QG0(-h?CO@tETV{S^qfk6Lopa*Jbjf~+lUU>5_ zhf4Da>`Jfs%)Q|6jEta@prAEzMO~rMM28&MsVdm(rxl-(!xV$5Vm`-Ni%C=E(kfn! z&qYY?NuieE%WK17*4;iSmm1E=vhdltRLfeC+9Xe(Vx=C%KnJ=9SB^-5Kwx!m(9_qq zD;6H>?h9Vj=LiOd1CpeuNX<`kIJYH+B}aQa9@^DA9O(~t?+j?Nx~Z&ZsweW_Q%_FQ zI47N?t0Z$coD&Otd?7`x_!dKO4CWi@$1sm1BKTb4r8tX@2GNT!`pK~vbVMSp{d91~ z;?Y)xsBOINO0(!LUE8Q&W04kiYJW1*4QaAb5_ zYkw?85+kvlgNdL6$?d{x-t>T``R>%LL1Z2u2n~iMrO;p~u{AQ<8A^oI<^_ZF>bg-) zT}vXPq4=(KiOBGFC4)oF&=pE_jH94BqLeGp!&o8`M4}wlGIh5Ps|6*f0d;kTx5Kde z;GaovblKv=K40kus z>h<`2xibX^(e1$y9Y;asyPIS%9vTS;2gXN6cA-ehnPOULS%bl@we70{!NBUyAUu1p zZ&inO%+$Z8tE)HA7wl_avNC`iof}uTuj=j~aI`zAu?3f|UKb23V{DdnuBAebMT0|% z^C6uZ@a@jQ)?jek&Yi&+J|-F+g=Hsp1-ECpIro}aXLztv3Q*Oj`Az2DJ0$8h+~IgU z8h3R>2g3AJl6n(y*hEhx+#e30q#LRqQ1p%tg5?B1m*>kS>NFRLM0!>X_6+vmKs%!K z%Z{a?#85cigF%&88V?ODjg3PBcyvc_IJymEUTIX%ivGomm#*wy(s9v6L1(KgQx6hUG&wobSzmH=BzAU% zBE#eHZ~&zvYgeFZrY@=sgaObR=KZmB#v|d8{@5;t1?5&rsS9HHwuZ(a4!jL?ivQuz zNOwPQsecGQAQ2Bm5@QGn6&zWy;6`xC%p;TF&K^S2*n5RQJ8Q%guxI@gtno4-+Ia26OV4~rG6l+bO@oN zZ)IqFw0~&v;^D|vdWzs?EZXXC&9X38e>5JJ9Bada9Z@_F5gzDtWE)$y@6I{fQd`V5 z+Y)6sLfFwABk8(<&hhEj@RZW!Ryxm}qjggCrCA-DdHo$j;r>hLTuVoTkGeMH1jDd@ zXNU?zXOgU1)5h1i7_d6{a?ROPvfQJ^+X44Aj)*KXoXs#}YN>teVP)=A>)Sd$nBVl$ zRg2=?xkaGDrN%2hjAn$lp<&6pDs)L$QQn%E>O#7QBLM`CV{2FR!59&>U_>d6b9KR{ zhA;&y&LGA5nEkjgX{Eayj3Z?x84HKv{X?mq21cW~8s(5yz+!dTfw2)DiNQO@!!eA( zXgstn9LQ{HT6?lxf-^Kc9PNjD4ko}W-6$0IknWj!sKb~F$|OIgFj_nsZ%@DpY(@8t z!GR!`>|mD=l%!mveb+7LRS~`1n>}o4w`7iGRhz-B^qbQ7)HIDS`nn}gsvo947?zF8 z{g(v$hc3Y+8yTJ&QQ02T(HU6Z-4O^b?OCVz!&F=FC{A}wa(9nz#~>MKk8c}CJfG-Z zj*e$hEL~BIj|DnoeL=<5^oA4TF)d?mkJB3+9t%V=d+jV1LCqk$sh@2C!7%8c-x=B( z*)B=2$^PLGbeVu}pi9RnEC~}!`;zXU)T%80dJ}$iz(t~P>e*gC;GzK~hD7Sf#EhVh zOevUcn!Qm!)|-*hv2Z-mJsb$E?Tz*Y(^S!lRZ)0!0G$}a{1*-l1TP8i3ijhDfhi)m zZ3Gdo()$QUF#imPN2j`1b(rb)?@(ER=Vy{dNFn3E~n=8tQk+h9p;Q$R;OuhB%kKe zo!PlBzEVbb9v6>ve-iXKW1yYw&Xe!#$bN zNk(&Amnb_Oa3WOPU6NOum(*|`s~$=2#t7(Ua|R3xe$YQ@aoIiD(?$KMPve7$kNQpR zR9OH|ZmDD%%jDD~OKe(Nh0p6_24IGrD>9a{%wLO8QytwTXUV-Xx{YzgnkF=iKBQq| zrYL^u-<3rLBNT&I$p~jN zCi!fwxD_2L!Cqf)nghv-326)a)}K93S`NvT67AD%H-9Hvu}9+=^=k%KMn=O*^^yiRUThAcUmQd3u-T?m8emO+kriDp|B9>K2+QtGv$34~RbJ*lm^qug~&~-RWdp4_~y<;9kL--Al9LwQ6KU4yyvVV}p{xP}{ zpzT1Vk6d-w(pV|0E7N971e*Egf5qcWMQ>ONV7K6G0h|k)X&LJd_EkJZ*wkf5&x%OT zAlu=fhY+-{H@6?pY{TTJ%`-1~yQ1;+p*Y(aNu77jWXKXVv*qkHz-6?d$xk=3m%W-!i-@ zWb9UNe{4r|nJb*fg3Yb$2X_x&)Q5%ra$;&E+hu5M@2rC;t%EbaEu8HW5h-9AzpIyK=z(b@Ar zYLA`GwfR?vcfg3T6{>^^bZMi`Fn?dDGlKP9j!-9Ms#fBu9Mc%xUe%VWnVYMr4NF_l zp2-xI_@AmRt#sBtvs+n_(M_eOx`tJEnx(W&?w-`Wq)eWZev(>${#6l*DNgS*?L%i8 za9U#~`#9H*UnKXAU#c>)0>U${u=322=t^}{qOT8Q=D)9T*^V#k$pGJmNMdMhX!H^U zso1cV6bHg?4ezA4$+LQDa*H%`waENrx_Y(6Z7#LsUizN>u0{6NV9w}5s~SNly&K|= zDB+c|n$5qEoRl+rnaV0T>$aG?e;E6^0rVuk^^J-Z@yepYGx-CQZA;Lt}8|);aEc590|7bFCzl zS@G;)4thm)j$uEX-iaC*I77T5oiX&LeyulqC6v|anV$DE>Fx|h&o(`CnU6O4{=Ls` zWVJFKV~Ku;HpTf5Imr;~Fq6{W{< zLm@Nzqt&MEzG5RH_rCR+#8cb~!7p2lBtlz36LFOeX-A{cM7S0Em#tgrKK1;_08!3{ zLSsXubzs*hN~$ywSFf<2-b_0Q;#xdB9HIr-QEWItT9vhDD-K(?MR7{KA!()01t3#Y z*-mO5-AgcuyWe-)fyZ693)<8!n=NI(%xg&=qam;(|S4l zY2~%&0~Z6MytVS$`|cXmzITEwN}@bc(=wdUZ;xo@wfEvRO5Y=$%O--AwBYunoF~Dt&CQ`Mqk4P>LRpi zwC^pv#L8>=@|FJ+E>M}Y^4fP9US;KD83JeWdmm+~iEHJx?>8*_1RzaJ|C-*l@|qo9 zg)FJ{Yu|ZTmQ+fUM^rNTUn{TCPczEXr>NDW3eJA_K5uygvZdCqeK*1|H3nL} z+Idk%dF}fW%UJn*{il_$LwU+m)1USp+Lm)HKa17x&=@kRm0zEMtF2PoOfS*%j3J_w zc!bq|5p#fbKzs0~wO?x&^$(itNtNaU#)toIE1u{;_ F{{>@ahhhK# literal 0 HcmV?d00001 -- Gitee From 868d9f8c9084bc5a0c1331b066ace4e8981f2a1d Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:34:49 +0000 Subject: [PATCH 59/69] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20co?= =?UTF-8?q?de/2022=5Fautumn/=E5=B4=94=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E?= =?UTF-8?q?3D=E7=82=B9=E4=BA=91=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0?= =?UTF-8?q?=E7=9A=84=E9=9C=87=E5=AE=B3=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86?= =?UTF-8?q?=E5=88=AB/data=5Futils?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../data_utils/.keep" | 0 .../data_utils/ModelNetDataLoader.py" | 103 --- .../data_utils/collect_indoor3d_data.py" | 24 - .../extract and save point cloud .py" | 157 ----- .../data_utils/indoor3d_util.py" | 598 ------------------ .../data_utils/make hdf5_file.py" | 58 -- 6 files changed, 940 deletions(-) delete mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/.keep" delete mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/ModelNetDataLoader.py" delete mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/collect_indoor3d_data.py" delete mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/extract and save point cloud .py" delete mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/indoor3d_util.py" delete mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/make hdf5_file.py" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/.keep" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/.keep" deleted file mode 100644 index e69de29..0000000 diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/ModelNetDataLoader.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/ModelNetDataLoader.py" deleted file mode 100644 index 3e343ef..0000000 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/ModelNetDataLoader.py" +++ /dev/null @@ -1,103 +0,0 @@ -import numpy as np -import warnings -import os -from torch.utils.data import Dataset -warnings.filterwarnings('ignore') - - - -def pc_normalize(pc): - centroid = np.mean(pc, axis=0) - pc = pc - centroid - m = np.max(np.sqrt(np.sum(pc**2, axis=1))) - pc = pc / m - return pc - -def farthest_point_sample(point, npoint): - """ - Input: - xyz: pointcloud data, [N, D] - npoint: number of samples - Return: - centroids: sampled pointcloud index, [npoint, D] - """ - N, D = point.shape - xyz = point[:,:3] - centroids = np.zeros((npoint,)) - distance = np.ones((N,)) * 1e10 - farthest = np.random.randint(0, N) - for i in range(npoint): - centroids[i] = farthest - centroid = xyz[farthest, :] - dist = np.sum((xyz - centroid) ** 2, -1) - mask = dist < distance - distance[mask] = dist[mask] - farthest = np.argmax(distance, -1) - point = point[centroids.astype(np.int32)] - return point - -class ModelNetDataLoader(Dataset): - def __init__(self, root, npoint=1024, split='train', uniform=False, normal_channel=True, cache_size=15000): - self.root = root - self.npoints = npoint - self.uniform = uniform - self.catfile = os.path.join(self.root, 'modelnet40_shape_names.txt') - - self.cat = [line.rstrip() for line in open(self.catfile)] - self.classes = dict(zip(self.cat, range(len(self.cat)))) - self.normal_channel = normal_channel - - shape_ids = {} - shape_ids['train'] = [line.rstrip() for line in open(os.path.join(self.root, 'modelnet40_train.txt'))] - shape_ids['test'] = [line.rstrip() for line in open(os.path.join(self.root, 'modelnet40_test.txt'))] - - assert (split == 'train' or split == 'test') - shape_names = ['_'.join(x.split('_')[0:-1]) for x in shape_ids[split]] - # list of (shape_name, shape_txt_file_path) tuple - self.datapath = [(shape_names[i], os.path.join(self.root, shape_names[i], shape_ids[split][i]) + '.txt') for i - in range(len(shape_ids[split]))] - print('The size of %s data is %d'%(split,len(self.datapath))) - - self.cache_size = cache_size # how many data points to cache in memory - self.cache = {} # from index to (point_set, cls) tuple - - def __len__(self): - return len(self.datapath) - - def _get_item(self, index): - if index in self.cache: - point_set, cls = self.cache[index] - else: - fn = self.datapath[index] - cls = self.classes[self.datapath[index][0]] - cls = np.array([cls]).astype(np.int32) - point_set = np.loadtxt(fn[1], delimiter=',').astype(np.float32) - if self.uniform: - point_set = farthest_point_sample(point_set, self.npoints) - else: - point_set = point_set[0:self.npoints,:] - - point_set[:, 0:3] = pc_normalize(point_set[:, 0:3]) - - if not self.normal_channel: - point_set = point_set[:, 0:3] - - if len(self.cache) < self.cache_size: - self.cache[index] = (point_set, cls) - - return point_set, cls - - def __getitem__(self, index): - return self._get_item(index) - - - - -if __name__ == '__main__': - import torch - - data = ModelNetDataLoader('/data/modelnet40_normal_resampled/',split='train', uniform=False, normal_channel=True,) - DataLoader = torch.utils.data.DataLoader(data, batch_size=12, shuffle=True) - for point,label in DataLoader: - print(point.shape) - print(label.shape) \ No newline at end of file diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/collect_indoor3d_data.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/collect_indoor3d_data.py" deleted file mode 100644 index f1bdeeb..0000000 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/collect_indoor3d_data.py" +++ /dev/null @@ -1,24 +0,0 @@ -import os -import sys -from indoor3d_util import DATA_PATH, collect_point_label - -BASE_DIR = os.path.dirname(os.path.abspath(__file__)) -ROOT_DIR = os.path.dirname(BASE_DIR) -sys.path.append(BASE_DIR) - -anno_paths = [line.rstrip() for line in open(os.path.join(BASE_DIR, 'meta/anno_paths.txt'))] -anno_paths = [os.path.join(DATA_PATH, p) for p in anno_paths] - -output_folder = os.path.join(ROOT_DIR, 'data/stanford_indoor3d') -if not os.path.exists(output_folder): - os.mkdir(output_folder) - -# Note: there is an extra character in the v1.2 data in Area_5/hallway_6. It's fixed manually. -for anno_path in anno_paths: - print(anno_path) - try: - elements = anno_path.split('/') - out_filename = elements[-3]+'_'+elements[-2]+'.npy' # Area_1_hallway_1.npy - collect_point_label(anno_path, os.path.join(output_folder, out_filename), 'numpy') - except: - print(anno_path, 'ERROR!!') diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/extract and save point cloud .py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/extract and save point cloud .py" deleted file mode 100644 index c761dfd..0000000 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/extract and save point cloud .py" +++ /dev/null @@ -1,157 +0,0 @@ -# -*- coding: utf-8 -*- -# 注意此时的laspy,有版本问题,2.0以上的版本就会出现问题,此时的版本是1.7.0 -from laspy.file import File -import os -import arcpy -import shutil -import numpy as np -np.set_printoptions(suppress=True) - -# 获取txt文件中的各种文件 -def GetData_fromFile(list_filename): - return [line.rstrip() for line in open(list_filename,'r')] - -# 只获取文件里面的shpfile中的shp文件 -def Get_specific_File(dir, ext): - need_file = [] - allfiles = os.listdir(dir) - for i in range(len(allfiles)): - name,extend = os.path.splitext(allfiles[i]) - if extend[1:] == ext: - root_shp_path = os.path.join(dir,allfiles[i]) - need_file.append(root_shp_path) - return need_file - -#分割对应点云块的shp文件,以便于下一步提取 -def split_move_shp(shp_path, out_path, label,splitfield): - arcpy.Split_analysis(in_features = shp_path, split_features = shp_path, - split_field = splitfield, out_workspace = out_path) - all_files = os.listdir(out_path) - for k in range(len(label)): - move_path_dir = os.path.join(out_path, label[k]) - for file in all_files: - name_first, ext_first = os.path.splitext(file) # 分离文件名和后缀 - f_name, ext_second = os.path.splitext(name_first) # 再次分离文件名和后缀,因为出现了.shp.xml的文件,对于.shp的不影响 - if f_name[:-4] == label[k]: - shutil.move( os.path.join(out_path,file), move_path_dir) - # 点云规范化 -def PC_NORMLIZE(pc): - centroid = np.mean(pc, axis=0) - pc = pc - centroid - m = np.max(np.sqrt(np.sum(pc**2, axis=1))) - pc = pc / m - return pc - -# 采样到2048 -def sample_data(data, num_sample): - """ data is in N x ... - we want to keep num_samplexC of them. - if N > num_sample, we will randomly keep num_sample of them. - if N < num_sample, we will randomly duplicate samples. - """ - N = data.shape[0] - if (N == num_sample): - return data - elif (N > num_sample): - sample = np.random.choice(N, num_sample) - return data[sample, ...] - else: - # 相当于从N中抽取 num_sample - N个随机数组成一维数组array,成为data的下标索引值 - sample = np.random.choice(N, num_sample - N) - dup_data = data[sample, ...] # 取数据 - # 按行拼接 - return np.concatenate([data, dup_data], axis=0) - # return np.concatenate([data, dup_data], 0), list(range(N)) + list(sample) - - -if __name__ == "__main__": - - las_name = GetData_fromFile('E:\CITYtest\FilelistAdd/lasname.txt') - label_name = GetData_fromFile('E:\CITYtest\FilelistAdd\shapelable.txt') - #自定义需要得到什么类型的文件数据 - file_class = ['Classshpfile', 'Classlasfile','Classtxtfile'] - #输入文件·路径 - input_shp_dir = 'E:\CITYtest\PCinput//' - input_las_dir = 'E:\CITYtest/NDSMlasdata//' - save_dir = 'E:\CITYtest\savepath/' - - - net_num = 2048 - for m in range(len(las_name)): - # 每个类别shp文件的路径 - shp_save_path = save_dir + '/'+ las_name[m] + '/'+ file_class[0] - las_save_path = save_dir + '/'+ las_name[m] + '/'+ file_class[1] - txt_save_path = save_dir + '/'+ las_name[m] + '/'+ file_class[2] - # 获取每整个点云块的shp文件 - # 注意shp_full 是一个列表文件,不是直接变量,引用时需要用shp_full[0,1,2] - shp_full = Get_specific_File(os.path.join(input_shp_dir, las_name[m]), 'shp') - # # # 开始对整个shp文件分割和移动 - if len(shp_full) != 0: - split_move_shp( shp_full[0], shp_save_path, label_name,'newname') - # 数据分割完毕,开始进行las点云提取 - # 得到每个label文件下的shp文件 - # 定义要提取的las路径,开始提取 - all_las_list = os.listdir(input_las_dir) - - las_ndsm_name, extend = os.path.splitext(all_las_list[m]) - - origin_las = os.path.join(input_las_dir, all_las_list[m]) - - for i in range(len(label_name)): - # 获取每个类别的单个样本shp - single_shp_path = os.path.join(shp_save_path, label_name[i]) - shp_list = Get_specific_File(single_shp_path, 'shp') - # 提取单个las的路径 - single_las_save = os.path.join(las_save_path, label_name[i]) - - if len(shp_list) != 0: - for j in range(len(shp_list)): - stir = str(j + 1) - st = stir.zfill(4) # 补零补够四位数 - arcpy.ddd.ExtractLas(in_las_dataset = origin_las, - target_folder = single_las_save, - boundary= shp_list[j], - name_suffix=label_name[i] + st, remove_vlr=True, - rearrange_points='REARRANGE_POINTS', ) - - las_list_path = os.path.join(single_las_save, las_ndsm_name + label_name[i] + st + '.las') - - # 只提取样本las中的X,Y,Z和intensity,raw_classification信息 - f = File(las_list_path, mode='rw') - # 改变样本标签为 0,1,2 - # print(f.x.shape) - point_cloud = np.vstack((f.x, f.y, f.z, f.intensity, f.raw_classification)).T - - point_cloud[:,4] = i - # 给点云加上底面,获取行数 - row_num = point_cloud.shape[0] - # 生成和行数一 样的一维0数组 - z_back = np.zeros(row_num) - # 底面 - point_back = np.vstack((f.x, f.y, z_back, f.intensity, f.raw_classification)).T - # 屋顶和底面放一起 - # print(point_back.shape) - point_cloud_add = np.vstack((point_cloud, point_back)) - # point_cloud_add = np.concatenate((point_cloud, point_back),axis=0) - # 采样到2048 - sample_pc = sample_data(point_cloud_add, net_num) - # 只取数据坐标信息归一化,中心化 - pc_norm = PC_NORMLIZE(sample_pc[:, 0:3]) - # 规范后数据代替原来的坐标 - sample_pc[:, 0:3] = pc_norm - # 存储为txt格式 - single_txt_save = os.path.join(txt_save_path,label_name[i]) - - np.savetxt( os.path.join(single_txt_save , las_name[m] + label_name[i] + st + '.txt'), - sample_pc, fmt="%.6f", delimiter=" ") - else: - print ("该类别无样本数据") - - else: - print('该块las的样本shp为空') - - print('successful') -print('over') - - - diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/indoor3d_util.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/indoor3d_util.py" deleted file mode 100644 index 42a7d97..0000000 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/indoor3d_util.py" +++ /dev/null @@ -1,598 +0,0 @@ -import numpy as np -import glob -import os -import sys - -BASE_DIR = os.path.dirname(os.path.abspath(__file__)) -ROOT_DIR = os.path.dirname(BASE_DIR) -sys.path.append(BASE_DIR) - -DATA_PATH = os.path.join(ROOT_DIR, 'data','s3dis', 'Stanford3dDataset_v1.2_Aligned_Version') -g_classes = [x.rstrip() for x in open(os.path.join(BASE_DIR, 'meta/class_names.txt'))] -g_class2label = {cls: i for i,cls in enumerate(g_classes)} -g_class2color = {'ceiling': [0,255,0], - 'floor': [0,0,255], - 'wall': [0,255,255], - 'beam': [255,255,0], - 'column': [255,0,255], - 'window': [100,100,255], - 'door': [200,200,100], - 'table': [170,120,200], - 'chair': [255,0,0], - 'sofa': [200,100,100], - 'bookcase': [10,200,100], - 'board': [200,200,200], - 'clutter': [50,50,50]} -g_easy_view_labels = [7,8,9,10,11,1] -g_label2color = {g_classes.index(cls): g_class2color[cls] for cls in g_classes} - - -# ----------------------------------------------------------------------------- -# CONVERT ORIGINAL DATA TO OUR DATA_LABEL FILES -# ----------------------------------------------------------------------------- - -def collect_point_label(anno_path, out_filename, file_format='txt'): - """ Convert original dataset files to data_label file (each line is XYZRGBL). - We aggregated all the points from each instance in the room. - - Args: - anno_path: path to annotations. e.g. Area_1/office_2/Annotations/ - out_filename: path to save collected points and labels (each line is XYZRGBL) - file_format: txt or numpy, determines what file format to save. - Returns: - None - Note: - the points are shifted before save, the most negative point is now at origin. - """ - points_list = [] - for f in glob.glob(os.path.join(anno_path, '*.txt')): - cls = os.path.basename(f).split('_')[0] - print(f) - if cls not in g_classes: # note: in some room there is 'staris' class.. - cls = 'clutter' - - points = np.loadtxt(f) - labels = np.ones((points.shape[0],1)) * g_class2label[cls] - points_list.append(np.concatenate([points, labels], 1)) # Nx7 - - data_label = np.concatenate(points_list, 0) - xyz_min = np.amin(data_label, axis=0)[0:3] - data_label[:, 0:3] -= xyz_min - - if file_format=='txt': - fout = open(out_filename, 'w') - for i in range(data_label.shape[0]): - fout.write('%f %f %f %d %d %d %d\n' % \ - (data_label[i,0], data_label[i,1], data_label[i,2], - data_label[i,3], data_label[i,4], data_label[i,5], - data_label[i,6])) - fout.close() - elif file_format=='numpy': - np.save(out_filename, data_label) - else: - print('ERROR!! Unknown file format: %s, please use txt or numpy.' % \ - (file_format)) - exit() - -def data_to_obj(data,name='example.obj',no_wall=True): - fout = open(name, 'w') - label = data[:, -1].astype(int) - for i in range(data.shape[0]): - if no_wall and ((label[i] == 2) or (label[i]==0)): - continue - fout.write('v %f %f %f %d %d %d\n' % \ - (data[i, 0], data[i, 1], data[i, 2], data[i, 3], data[i, 4], data[i, 5])) - fout.close() - -def point_label_to_obj(input_filename, out_filename, label_color=True, easy_view=False, no_wall=False): - """ For visualization of a room from data_label file, - input_filename: each line is X Y Z R G B L - out_filename: OBJ filename, - visualize input file by coloring point with label color - easy_view: only visualize furnitures and floor - """ - data_label = np.loadtxt(input_filename) - data = data_label[:, 0:6] - label = data_label[:, -1].astype(int) - fout = open(out_filename, 'w') - for i in range(data.shape[0]): - color = g_label2color[label[i]] - if easy_view and (label[i] not in g_easy_view_labels): - continue - if no_wall and ((label[i] == 2) or (label[i]==0)): - continue - if label_color: - fout.write('v %f %f %f %d %d %d\n' % \ - (data[i,0], data[i,1], data[i,2], color[0], color[1], color[2])) - else: - fout.write('v %f %f %f %d %d %d\n' % \ - (data[i,0], data[i,1], data[i,2], data[i,3], data[i,4], data[i,5])) - fout.close() - - - -# ----------------------------------------------------------------------------- -# PREPARE BLOCK DATA FOR DEEPNETS TRAINING/TESTING -# ----------------------------------------------------------------------------- - -def sample_data(data, num_sample): - """ data is in N x ... - we want to keep num_samplexC of them. - if N > num_sample, we will randomly keep num_sample of them. - if N < num_sample, we will randomly duplicate samples. - """ - N = data.shape[0] - if (N == num_sample): - return data, range(N) - elif (N > num_sample): - sample = np.random.choice(N, num_sample) - return data[sample, ...], sample - else: - sample = np.random.choice(N, num_sample-N) - dup_data = data[sample, ...] - return np.concatenate([data, dup_data], 0), list(range(N))+list(sample) - -def sample_data_label(data, label, num_sample): - new_data, sample_indices = sample_data(data, num_sample) - new_label = label[sample_indices] - return new_data, new_label - -def room2blocks(data, label, num_point, block_size=1.0, stride=1.0, - random_sample=False, sample_num=None, sample_aug=1): - """ Prepare block training data. - Args: - data: N x 6 numpy array, 012 are XYZ in meters, 345 are RGB in [0,1] - assumes the data is shifted (min point is origin) and aligned - (aligned with XYZ axis) - label: N size uint8 numpy array from 0-12 - num_point: int, how many points to sample in each block - block_size: float, physical size of the block in meters - stride: float, stride for block sweeping - random_sample: bool, if True, we will randomly sample blocks in the room - sample_num: int, if random sample, how many blocks to sample - [default: room area] - sample_aug: if random sample, how much aug - Returns: - block_datas: K x num_point x 6 np array of XYZRGB, RGB is in [0,1] - block_labels: K x num_point x 1 np array of uint8 labels - - TODO: for this version, blocking is in fixed, non-overlapping pattern. - """ - assert(stride<=block_size) - - limit = np.amax(data, 0)[0:3] - - # Get the corner location for our sampling blocks - xbeg_list = [] - ybeg_list = [] - if not random_sample: - num_block_x = int(np.ceil((limit[0] - block_size) / stride)) + 1 - num_block_y = int(np.ceil(collect_point_label(limit[1] - block_size) / stride)) + 1 - for i in range(num_block_x): - for j in range(num_block_y): - xbeg_list.append(i*stride) - ybeg_list.append(j*stride) - else: - num_block_x = int(np.ceil(limit[0] / block_size)) - num_block_y = int(np.ceil(limit[1] / block_size)) - if sample_num is None: - sample_num = num_block_x * num_block_y * sample_aug - for _ in range(sample_num): - xbeg = np.random.uniform(-block_size, limit[0]) - ybeg = np.random.uniform(-block_size, limit[1]) - xbeg_list.append(xbeg) - ybeg_list.append(ybeg) - - # Collect blocks - block_data_list = [] - block_label_list = [] - idx = 0 - for idx in range(len(xbeg_list)): - xbeg = xbeg_list[idx] - ybeg = ybeg_list[idx] - xcond = (data[:,0]<=xbeg+block_size) & (data[:,0]>=xbeg) - ycond = (data[:,1]<=ybeg+block_size) & (data[:,1]>=ybeg) - cond = xcond & ycond - if np.sum(cond) < 100: # discard block if there are less than 100 pts. - continue - - block_data = data[cond, :] - block_label = label[cond] - - # randomly subsample data - block_data_sampled, block_label_sampled = \ - sample_data_label(block_data, block_label, num_point) - block_data_list.append(np.expand_dims(block_data_sampled, 0)) - block_label_list.append(np.expand_dims(block_label_sampled, 0)) - - return np.concatenate(block_data_list, 0), \ - np.concatenate(block_label_list, 0) - - -def room2blocks_plus(data_label, num_point, block_size, stride, - random_sample, sample_num, sample_aug): - """ room2block with input filename and RGB preprocessing. - """ - data = data_label[:,0:6] - data[:,3:6] /= 255.0 - label = data_label[:,-1].astype(np.uint8) - - return room2blocks(data, label, num_point, block_size, stride, - random_sample, sample_num, sample_aug) - -def room2blocks_wrapper(data_label_filename, num_point, block_size=1.0, stride=1.0, - random_sample=False, sample_num=None, sample_aug=1): - if data_label_filename[-3:] == 'txt': - data_label = np.loadtxt(data_label_filename) - elif data_label_filename[-3:] == 'npy': - data_label = np.load(data_label_filename) - else: - print('Unknown file type! exiting.') - exit() - return room2blocks_plus(data_label, num_point, block_size, stride, - random_sample, sample_num, sample_aug) - -def room2blocks_plus_normalized(data_label, num_point, block_size, stride, - random_sample, sample_num, sample_aug): - """ room2block, with input filename and RGB preprocessing. - for each block centralize XYZ, add normalized XYZ as 678 channels - """ - data = data_label[:,0:6] - data[:,3:6] /= 255.0 - label = data_label[:,-1].astype(np.uint8) - max_room_x = max(data[:,0]) - max_room_y = max(data[:,1]) - max_room_z = max(data[:,2]) - - data_batch, label_batch = room2blocks(data, label, num_point, block_size, stride, - random_sample, sample_num, sample_aug) - new_data_batch = np.zeros((data_batch.shape[0], num_point, 9)) - for b in range(data_batch.shape[0]): - new_data_batch[b, :, 6] = data_batch[b, :, 0]/max_room_x - new_data_batch[b, :, 7] = data_batch[b, :, 1]/max_room_y - new_data_batch[b, :, 8] = data_batch[b, :, 2]/max_room_z - minx = min(data_batch[b, :, 0]) - miny = min(data_batch[b, :, 1]) - data_batch[b, :, 0] -= (minx+block_size/2) - data_batch[b, :, 1] -= (miny+block_size/2) - new_data_batch[:, :, 0:6] = data_batch - return new_data_batch, label_batch - - -def room2blocks_wrapper_normalized(data_label_filename, num_point, block_size=1.0, stride=1.0, - random_sample=False, sample_num=None, sample_aug=1): - if data_label_filename[-3:] == 'txt': - data_label = np.loadtxt(data_label_filename) - elif data_label_filename[-3:] == 'npy': - data_label = np.load(data_label_filename) - else: - print('Unknown file type! exiting.') - exit() - return room2blocks_plus_normalized(data_label, num_point, block_size, stride, - random_sample, sample_num, sample_aug) - -def room2samples(data, label, sample_num_point): - """ Prepare whole room samples. - - Args: - data: N x 6 numpy array, 012 are XYZ in meters, 345 are RGB in [0,1] - assumes the data is shifted (min point is origin) and - aligned (aligned with XYZ axis) - label: N size uint8 numpy array from 0-12 - sample_num_point: int, how many points to sample in each sample - Returns: - sample_datas: K x sample_num_point x 9 - numpy array of XYZRGBX'Y'Z', RGB is in [0,1] - sample_labels: K x sample_num_point x 1 np array of uint8 labels - """ - N = data.shape[0] - order = np.arange(N) - np.random.shuffle(order) - data = data[order, :] - label = label[order] - - batch_num = int(np.ceil(N / float(sample_num_point))) - sample_datas = np.zeros((batch_num, sample_num_point, 6)) - sample_labels = np.zeros((batch_num, sample_num_point, 1)) - - for i in range(batch_num): - beg_idx = i*sample_num_point - end_idx = min((i+1)*sample_num_point, N) - num = end_idx - beg_idx - sample_datas[i,0:num,:] = data[beg_idx:end_idx, :] - sample_labels[i,0:num,0] = label[beg_idx:end_idx] - if num < sample_num_point: - makeup_indices = np.random.choice(N, sample_num_point - num) - sample_datas[i,num:,:] = data[makeup_indices, :] - sample_labels[i,num:,0] = label[makeup_indices] - return sample_datas, sample_labels - -def room2samples_plus_normalized(data_label, num_point): - """ room2sample, with input filename and RGB preprocessing. - for each block centralize XYZ, add normalized XYZ as 678 channels - """ - data = data_label[:,0:6] - data[:,3:6] /= 255.0 - label = data_label[:,-1].astype(np.uint8) - max_room_x = max(data[:,0]) - max_room_y = max(data[:,1]) - max_room_z = max(data[:,2]) - #print(max_room_x, max_room_y, max_room_z) - - data_batch, label_batch = room2samples(data, label, num_point) - new_data_batch = np.zeros((data_batch.shape[0], num_point, 9)) - for b in range(data_batch.shape[0]): - new_data_batch[b, :, 6] = data_batch[b, :, 0]/max_room_x - new_data_batch[b, :, 7] = data_batch[b, :, 1]/max_room_y - new_data_batch[b, :, 8] = data_batch[b, :, 2]/max_room_z - #minx = min(data_batch[b, :, 0]) - #miny = min(data_batch[b, :, 1]) - #data_batch[b, :, 0] -= (minx+block_size/2) - #data_batch[b, :, 1] -= (miny+block_size/2) - new_data_batch[:, :, 0:6] = data_batch - return new_data_batch, label_batch - - -def room2samples_wrapper_normalized(data_label_filename, num_point): - if data_label_filename[-3:] == 'txt': - data_label = np.loadtxt(data_label_filename) - elif data_label_filename[-3:] == 'npy': - data_label = np.load(data_label_filename) - else: - print('Unknown file type! exiting.') - exit() - return room2samples_plus_normalized(data_label, num_point) - - -# ----------------------------------------------------------------------------- -# EXTRACT INSTANCE BBOX FROM ORIGINAL DATA (for detection evaluation) -# ----------------------------------------------------------------------------- - -def collect_bounding_box(anno_path, out_filename): - """ Compute bounding boxes from each instance in original dataset files on - one room. **We assume the bbox is aligned with XYZ coordinate.** - - Args: - anno_path: path to annotations. e.g. Area_1/office_2/Annotations/ - out_filename: path to save instance bounding boxes for that room. - each line is x1 y1 z1 x2 y2 z2 label, - where (x1,y1,z1) is the point on the diagonal closer to origin - Returns: - None - Note: - room points are shifted, the most negative point is now at origin. - """ - bbox_label_list = [] - - for f in glob.glob(os.path.join(anno_path, '*.txt')): - cls = os.path.basename(f).split('_')[0] - if cls not in g_classes: # note: in some room there is 'staris' class.. - cls = 'clutter' - points = np.loadtxt(f) - label = g_class2label[cls] - # Compute tightest axis aligned bounding box - xyz_min = np.amin(points[:, 0:3], axis=0) - xyz_max = np.amax(points[:, 0:3], axis=0) - ins_bbox_label = np.expand_dims( - np.concatenate([xyz_min, xyz_max, np.array([label])], 0), 0) - bbox_label_list.append(ins_bbox_label) - - bbox_label = np.concatenate(bbox_label_list, 0) - room_xyz_min = np.amin(bbox_label[:, 0:3], axis=0) - bbox_label[:, 0:3] -= room_xyz_min - bbox_label[:, 3:6] -= room_xyz_min - - fout = open(out_filename, 'w') - for i in range(bbox_label.shape[0]): - fout.write('%f %f %f %f %f %f %d\n' % \ - (bbox_label[i,0], bbox_label[i,1], bbox_label[i,2], - bbox_label[i,3], bbox_label[i,4], bbox_label[i,5], - bbox_label[i,6])) - fout.close() - -def bbox_label_to_obj(input_filename, out_filename_prefix, easy_view=False): - """ Visualization of bounding boxes. - - Args: - input_filename: each line is x1 y1 z1 x2 y2 z2 label - out_filename_prefix: OBJ filename prefix, - visualize object by g_label2color - easy_view: if True, only visualize furniture and floor - Returns: - output a list of OBJ file and MTL files with the same prefix - """ - bbox_label = np.loadtxt(input_filename) - bbox = bbox_label[:, 0:6] - label = bbox_label[:, -1].astype(int) - v_cnt = 0 # count vertex - ins_cnt = 0 # count instance - for i in range(bbox.shape[0]): - if easy_view and (label[i] not in g_easy_view_labels): - continue - obj_filename = out_filename_prefix+'_'+g_classes[label[i]]+'_'+str(ins_cnt)+'.obj' - mtl_filename = out_filename_prefix+'_'+g_classes[label[i]]+'_'+str(ins_cnt)+'.mtl' - fout_obj = open(obj_filename, 'w') - fout_mtl = open(mtl_filename, 'w') - fout_obj.write('mtllib %s\n' % (os.path.basename(mtl_filename))) - - length = bbox[i, 3:6] - bbox[i, 0:3] - a = length[0] - b = length[1] - c = length[2] - x = bbox[i, 0] - y = bbox[i, 1] - z = bbox[i, 2] - color = np.array(g_label2color[label[i]], dtype=float) / 255.0 - - material = 'material%d' % (ins_cnt) - fout_obj.write('usemtl %s\n' % (material)) - fout_obj.write('v %f %f %f\n' % (x,y,z+c)) - fout_obj.write('v %f %f %f\n' % (x,y+b,z+c)) - fout_obj.write('v %f %f %f\n' % (x+a,y+b,z+c)) - fout_obj.write('v %f %f %f\n' % (x+a,y,z+c)) - fout_obj.write('v %f %f %f\n' % (x,y,z)) - fout_obj.write('v %f %f %f\n' % (x,y+b,z)) - fout_obj.write('v %f %f %f\n' % (x+a,y+b,z)) - fout_obj.write('v %f %f %f\n' % (x+a,y,z)) - fout_obj.write('g default\n') - v_cnt = 0 # for individual box - fout_obj.write('f %d %d %d %d\n' % (4+v_cnt, 3+v_cnt, 2+v_cnt, 1+v_cnt)) - fout_obj.write('f %d %d %d %d\n' % (1+v_cnt, 2+v_cnt, 6+v_cnt, 5+v_cnt)) - fout_obj.write('f %d %d %d %d\n' % (7+v_cnt, 6+v_cnt, 2+v_cnt, 3+v_cnt)) - fout_obj.write('f %d %d %d %d\n' % (4+v_cnt, 8+v_cnt, 7+v_cnt, 3+v_cnt)) - fout_obj.write('f %d %d %d %d\n' % (5+v_cnt, 8+v_cnt, 4+v_cnt, 1+v_cnt)) - fout_obj.write('f %d %d %d %d\n' % (5+v_cnt, 6+v_cnt, 7+v_cnt, 8+v_cnt)) - fout_obj.write('\n') - - fout_mtl.write('newmtl %s\n' % (material)) - fout_mtl.write('Kd %f %f %f\n' % (color[0], color[1], color[2])) - fout_mtl.write('\n') - fout_obj.close() - fout_mtl.close() - - v_cnt += 8 - ins_cnt += 1 - -def bbox_label_to_obj_room(input_filename, out_filename_prefix, easy_view=False, permute=None, center=False, exclude_table=False): - """ Visualization of bounding boxes. - - Args: - input_filename: each line is x1 y1 z1 x2 y2 z2 label - out_filename_prefix: OBJ filename prefix, - visualize object by g_label2color - easy_view: if True, only visualize furniture and floor - permute: if not None, permute XYZ for rendering, e.g. [0 2 1] - center: if True, move obj to have zero origin - Returns: - output a list of OBJ file and MTL files with the same prefix - """ - bbox_label = np.loadtxt(input_filename) - bbox = bbox_label[:, 0:6] - if permute is not None: - assert(len(permute)==3) - permute = np.array(permute) - bbox[:,0:3] = bbox[:,permute] - bbox[:,3:6] = bbox[:,permute+3] - if center: - xyz_max = np.amax(bbox[:,3:6], 0) - bbox[:,0:3] -= (xyz_max/2.0) - bbox[:,3:6] -= (xyz_max/2.0) - bbox /= np.max(xyz_max/2.0) - label = bbox_label[:, -1].astype(int) - obj_filename = out_filename_prefix+'.obj' - mtl_filename = out_filename_prefix+'.mtl' - - fout_obj = open(obj_filename, 'w') - fout_mtl = open(mtl_filename, 'w') - fout_obj.write('mtllib %s\n' % (os.path.basename(mtl_filename))) - v_cnt = 0 # count vertex - ins_cnt = 0 # count instance - for i in range(bbox.shape[0]): - if easy_view and (label[i] not in g_easy_view_labels): - continue - if exclude_table and label[i] == g_classes.index('table'): - continue - - length = bbox[i, 3:6] - bbox[i, 0:3] - a = length[0] - b = length[1] - c = length[2] - x = bbox[i, 0] - y = bbox[i, 1] - z = bbox[i, 2] - color = np.array(g_label2color[label[i]], dtype=float) / 255.0 - - material = 'material%d' % (ins_cnt) - fout_obj.write('usemtl %s\n' % (material)) - fout_obj.write('v %f %f %f\n' % (x,y,z+c)) - fout_obj.write('v %f %f %f\n' % (x,y+b,z+c)) - fout_obj.write('v %f %f %f\n' % (x+a,y+b,z+c)) - fout_obj.write('v %f %f %f\n' % (x+a,y,z+c)) - fout_obj.write('v %f %f %f\n' % (x,y,z)) - fout_obj.write('v %f %f %f\n' % (x,y+b,z)) - fout_obj.write('v %f %f %f\n' % (x+a,y+b,z)) - fout_obj.write('v %f %f %f\n' % (x+a,y,z)) - fout_obj.write('g default\n') - fout_obj.write('f %d %d %d %d\n' % (4+v_cnt, 3+v_cnt, 2+v_cnt, 1+v_cnt)) - fout_obj.write('f %d %d %d %d\n' % (1+v_cnt, 2+v_cnt, 6+v_cnt, 5+v_cnt)) - fout_obj.write('f %d %d %d %d\n' % (7+v_cnt, 6+v_cnt, 2+v_cnt, 3+v_cnt)) - fout_obj.write('f %d %d %d %d\n' % (4+v_cnt, 8+v_cnt, 7+v_cnt, 3+v_cnt)) - fout_obj.write('f %d %d %d %d\n' % (5+v_cnt, 8+v_cnt, 4+v_cnt, 1+v_cnt)) - fout_obj.write('f %d %d %d %d\n' % (5+v_cnt, 6+v_cnt, 7+v_cnt, 8+v_cnt)) - fout_obj.write('\n') - - fout_mtl.write('newmtl %s\n' % (material)) - fout_mtl.write('Kd %f %f %f\n' % (color[0], color[1], color[2])) - fout_mtl.write('\n') - - v_cnt += 8 - ins_cnt += 1 - - fout_obj.close() - fout_mtl.close() - - -def collect_point_bounding_box(anno_path, out_filename, file_format): - """ Compute bounding boxes from each instance in original dataset files on - one room. **We assume the bbox is aligned with XYZ coordinate.** - Save both the point XYZRGB and the bounding box for the point's - parent element. - - Args: - anno_path: path to annotations. e.g. Area_1/office_2/Annotations/ - out_filename: path to save instance bounding boxes for each point, - plus the point's XYZRGBL - each line is XYZRGBL offsetX offsetY offsetZ a b c, - where cx = X+offsetX, cy=X+offsetY, cz=Z+offsetZ - where (cx,cy,cz) is center of the box, a,b,c are distances from center - to the surfaces of the box, i.e. x1 = cx-a, x2 = cx+a, y1=cy-b etc. - file_format: output file format, txt or numpy - Returns: - None - - Note: - room points are shifted, the most negative point is now at origin. - """ - point_bbox_list = [] - - for f in glob.glob(os.path.join(anno_path, '*.txt')): - cls = os.path.basename(f).split('_')[0] - if cls not in g_classes: # note: in some room there is 'staris' class.. - cls = 'clutter' - points = np.loadtxt(f) # Nx6 - label = g_class2label[cls] # N, - # Compute tightest axis aligned bounding box - xyz_min = np.amin(points[:, 0:3], axis=0) # 3, - xyz_max = np.amax(points[:, 0:3], axis=0) # 3, - xyz_center = (xyz_min + xyz_max) / 2 - dimension = (xyz_max - xyz_min) / 2 - - xyz_offsets = xyz_center - points[:,0:3] # Nx3 - dimensions = np.ones((points.shape[0],3)) * dimension # Nx3 - labels = np.ones((points.shape[0],1)) * label # N - point_bbox_list.append(np.concatenate([points, labels, - xyz_offsets, dimensions], 1)) # Nx13 - - point_bbox = np.concatenate(point_bbox_list, 0) # KxNx13 - room_xyz_min = np.amin(point_bbox[:, 0:3], axis=0) - point_bbox[:, 0:3] -= room_xyz_min - - if file_format == 'txt': - fout = open(out_filename, 'w') - for i in range(point_bbox.shape[0]): - fout.write('%f %f %f %d %d %d %d %f %f %f %f %f %f\n' % \ - (point_bbox[i,0], point_bbox[i,1], point_bbox[i,2], - point_bbox[i,3], point_bbox[i,4], point_bbox[i,5], - point_bbox[i,6], - point_bbox[i,7], point_bbox[i,8], point_bbox[i,9], - point_bbox[i,10], point_bbox[i,11], point_bbox[i,12])) - - fout.close() - elif file_format == 'numpy': - np.save(out_filename, point_bbox) - else: - print('ERROR!! Unknown file format: %s, please use txt or numpy.' % \ - (file_format)) - exit() - - diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/make hdf5_file.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/make hdf5_file.py" deleted file mode 100644 index 8fad1d8..0000000 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data_utils/make hdf5_file.py" +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: UTF-8 -*- -import os -import numpy as np -import h5py -np.set_printoptions(suppress=True) - -def getDataFiles(path_root): - filelist = os.listdir(path_root) - for i in range(len(filelist)): - filelist[i] = os.path.join(path_root,filelist[i]) - return filelist - -#得到的返回值是一维字符串数组 -def loadDataFile(path): - data = np.loadtxt(path) - point_xyz = data[:, 0:3] - label=data[:,4] - label_int = label.astype(int) - return point_xyz, label_int - -if __name__ == "__main__": - - train_file = ['train0', 'test0'] - net_num = 2048 - train_dir = 'E:\CITYtest\Trainfile//' - - for i in range(len(train_file)): - root_file = os.path.join(train_dir,train_file[i]) - DATA_FILES = getDataFiles(root_file) - - DATA_ALL = [] - LABEL_ALL = [] - for fn in range(len(DATA_FILES)): - pre_data, his_label = loadDataFile(DATA_FILES[fn]) - pre_label = his_label.reshape(net_num, 1) # 重塑为num行1列的数据 得到num个点云的标签 - - # data_label = np.hstack((pre_data, pre_label)) - DATA_ALL.append(pre_data) #列表元素 - # label一个样本中的类别是一样的,取一个数字就可以了 - LABEL_ALL.append(pre_label[0]) - - # 把DATA_ALL和LABEL_ALL的列表转换成数组格式, - out_data = np.vstack(DATA_ALL) - out_label = np.vstack(LABEL_ALL) - - # 重塑为三维数组,2048 个 2048*4 的三个数组 - data_reshape = out_data.reshape(net_num, net_num, 3) - # 写入训练数据 - filename= train_dir +'/'+ 'point_data_' + train_file[i] + '.h5' - if not os.path.exists(filename): - with h5py.File(filename, 'w') as f: - f['data'] = data_reshape - f['label'] = out_label - f.close() - else: - print('hdf5文件已存在') - -print("over") -- Gitee From 14bb6385e4f9360141958362f368fa2ff630471c Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:35:19 +0000 Subject: [PATCH 60/69] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20utils?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../utils/.keep" | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/.keep" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/.keep" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/.keep" new file mode 100644 index 0000000..e69de29 -- Gitee From 7c1571fdf1de4d56e5d0bcbafc2df9f2235e7158 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:35:58 +0000 Subject: [PATCH 61/69] =?UTF-8?q?=E7=89=B9=E5=BE=81=E6=8F=90=E5=8F=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../utils/README.md" | 6 + .../utils/compile_render_balls_so.sh" | 2 + .../utils/eulerangles.py" | 418 ++++++++ .../utils/pc_util.py" | 315 ++++++ .../utils/plyfile.py" | 916 ++++++++++++++++++ .../utils/pointnet_util.py" | 230 +++++ .../utils/provider.py" | 247 +++++ .../utils/render_balls_so.cpp" | 58 ++ .../utils/show3d_balls.py" | 161 +++ .../utils/tf_util.py" | 616 ++++++++++++ 10 files changed, 2969 insertions(+) create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/README.md" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/compile_render_balls_so.sh" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/eulerangles.py" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/pc_util.py" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/plyfile.py" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/pointnet_util.py" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/provider.py" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/render_balls_so.cpp" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/show3d_balls.py" create mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/tf_util.py" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/README.md" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/README.md" new file mode 100644 index 0000000..6d2bfad --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/README.md" @@ -0,0 +1,6 @@ +## Utilility Functions for 3D Point Cloud Deep Learning + +### visualization tool + + sh compile_render_balls_so.sh + python show3d_balls.py diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/compile_render_balls_so.sh" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/compile_render_balls_so.sh" new file mode 100644 index 0000000..dc493f6 --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/compile_render_balls_so.sh" @@ -0,0 +1,2 @@ +g++ -std=c++11 render_balls_so.cpp -o render_balls_so.so -shared -fPIC -O2 -D_GLIBCXX_USE_CXX11_ABI=0 + diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/eulerangles.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/eulerangles.py" new file mode 100644 index 0000000..87bd605 --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/eulerangles.py" @@ -0,0 +1,418 @@ +# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +# +# See COPYING file distributed along with the NiBabel package for the +# copyright and license terms. +# +### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## +''' Module implementing Euler angle rotations and their conversions + +See: + +* http://en.wikipedia.org/wiki/Rotation_matrix +* http://en.wikipedia.org/wiki/Euler_angles +* http://mathworld.wolfram.com/EulerAngles.html + +See also: *Representing Attitude with Euler Angles and Quaternions: A +Reference* (2006) by James Diebel. A cached PDF link last found here: + +http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.110.5134 + +Euler's rotation theorem tells us that any rotation in 3D can be +described by 3 angles. Let's call the 3 angles the *Euler angle vector* +and call the angles in the vector :math:`alpha`, :math:`beta` and +:math:`gamma`. The vector is [ :math:`alpha`, +:math:`beta`. :math:`gamma` ] and, in this description, the order of the +parameters specifies the order in which the rotations occur (so the +rotation corresponding to :math:`alpha` is applied first). + +In order to specify the meaning of an *Euler angle vector* we need to +specify the axes around which each of the rotations corresponding to +:math:`alpha`, :math:`beta` and :math:`gamma` will occur. + +There are therefore three axes for the rotations :math:`alpha`, +:math:`beta` and :math:`gamma`; let's call them :math:`i` :math:`j`, +:math:`k`. + +Let us express the rotation :math:`alpha` around axis `i` as a 3 by 3 +rotation matrix `A`. Similarly :math:`beta` around `j` becomes 3 x 3 +matrix `B` and :math:`gamma` around `k` becomes matrix `G`. Then the +whole rotation expressed by the Euler angle vector [ :math:`alpha`, +:math:`beta`. :math:`gamma` ], `R` is given by:: + + R = np.dot(G, np.dot(B, A)) + +See http://mathworld.wolfram.com/EulerAngles.html + +The order :math:`G B A` expresses the fact that the rotations are +performed in the order of the vector (:math:`alpha` around axis `i` = +`A` first). + +To convert a given Euler angle vector to a meaningful rotation, and a +rotation matrix, we need to define: + +* the axes `i`, `j`, `k` +* whether a rotation matrix should be applied on the left of a vector to + be transformed (vectors are column vectors) or on the right (vectors + are row vectors). +* whether the rotations move the axes as they are applied (intrinsic + rotations) - compared the situation where the axes stay fixed and the + vectors move within the axis frame (extrinsic) +* the handedness of the coordinate system + +See: http://en.wikipedia.org/wiki/Rotation_matrix#Ambiguities + +We are using the following conventions: + +* axes `i`, `j`, `k` are the `z`, `y`, and `x` axes respectively. Thus + an Euler angle vector [ :math:`alpha`, :math:`beta`. :math:`gamma` ] + in our convention implies a :math:`alpha` radian rotation around the + `z` axis, followed by a :math:`beta` rotation around the `y` axis, + followed by a :math:`gamma` rotation around the `x` axis. +* the rotation matrix applies on the left, to column vectors on the + right, so if `R` is the rotation matrix, and `v` is a 3 x N matrix + with N column vectors, the transformed vector set `vdash` is given by + ``vdash = np.dot(R, v)``. +* extrinsic rotations - the axes are fixed, and do not move with the + rotations. +* a right-handed coordinate system + +The convention of rotation around ``z``, followed by rotation around +``y``, followed by rotation around ``x``, is known (confusingly) as +"xyz", pitch-roll-yaw, Cardan angles, or Tait-Bryan angles. +''' + +import math + +import sys +if sys.version_info >= (3,0): + from functools import reduce + +import numpy as np + + +_FLOAT_EPS_4 = np.finfo(float).eps * 4.0 + + +def euler2mat(z=0, y=0, x=0): + ''' Return matrix for rotations around z, y and x axes + + Uses the z, then y, then x convention above + + Parameters + ---------- + z : scalar + Rotation angle in radians around z-axis (performed first) + y : scalar + Rotation angle in radians around y-axis + x : scalar + Rotation angle in radians around x-axis (performed last) + + Returns + ------- + M : array shape (3,3) + Rotation matrix giving same rotation as for given angles + + Examples + -------- + >>> zrot = 1.3 # radians + >>> yrot = -0.1 + >>> xrot = 0.2 + >>> M = euler2mat(zrot, yrot, xrot) + >>> M.shape == (3, 3) + True + + The output rotation matrix is equal to the composition of the + individual rotations + + >>> M1 = euler2mat(zrot) + >>> M2 = euler2mat(0, yrot) + >>> M3 = euler2mat(0, 0, xrot) + >>> composed_M = np.dot(M3, np.dot(M2, M1)) + >>> np.allclose(M, composed_M) + True + + You can specify rotations by named arguments + + >>> np.all(M3 == euler2mat(x=xrot)) + True + + When applying M to a vector, the vector should column vector to the + right of M. If the right hand side is a 2D array rather than a + vector, then each column of the 2D array represents a vector. + + >>> vec = np.array([1, 0, 0]).reshape((3,1)) + >>> v2 = np.dot(M, vec) + >>> vecs = np.array([[1, 0, 0],[0, 1, 0]]).T # giving 3x2 array + >>> vecs2 = np.dot(M, vecs) + + Rotations are counter-clockwise. + + >>> zred = np.dot(euler2mat(z=np.pi/2), np.eye(3)) + >>> np.allclose(zred, [[0, -1, 0],[1, 0, 0], [0, 0, 1]]) + True + >>> yred = np.dot(euler2mat(y=np.pi/2), np.eye(3)) + >>> np.allclose(yred, [[0, 0, 1],[0, 1, 0], [-1, 0, 0]]) + True + >>> xred = np.dot(euler2mat(x=np.pi/2), np.eye(3)) + >>> np.allclose(xred, [[1, 0, 0],[0, 0, -1], [0, 1, 0]]) + True + + Notes + ----- + The direction of rotation is given by the right-hand rule (orient + the thumb of the right hand along the axis around which the rotation + occurs, with the end of the thumb at the positive end of the axis; + curl your fingers; the direction your fingers curl is the direction + of rotation). Therefore, the rotations are counterclockwise if + looking along the axis of rotation from positive to negative. + ''' + Ms = [] + if z: + cosz = math.cos(z) + sinz = math.sin(z) + Ms.append(np.array( + [[cosz, -sinz, 0], + [sinz, cosz, 0], + [0, 0, 1]])) + if y: + cosy = math.cos(y) + siny = math.sin(y) + Ms.append(np.array( + [[cosy, 0, siny], + [0, 1, 0], + [-siny, 0, cosy]])) + if x: + cosx = math.cos(x) + sinx = math.sin(x) + Ms.append(np.array( + [[1, 0, 0], + [0, cosx, -sinx], + [0, sinx, cosx]])) + if Ms: + return reduce(np.dot, Ms[::-1]) + return np.eye(3) + + +def mat2euler(M, cy_thresh=None): + ''' Discover Euler angle vector from 3x3 matrix + + Uses the conventions above. + + Parameters + ---------- + M : array-like, shape (3,3) + cy_thresh : None or scalar, optional + threshold below which to give up on straightforward arctan for + estimating x rotation. If None (default), estimate from + precision of input. + + Returns + ------- + z : scalar + y : scalar + x : scalar + Rotations in radians around z, y, x axes, respectively + + Notes + ----- + If there was no numerical error, the routine could be derived using + Sympy expression for z then y then x rotation matrix, which is:: + + [ cos(y)*cos(z), -cos(y)*sin(z), sin(y)], + [cos(x)*sin(z) + cos(z)*sin(x)*sin(y), cos(x)*cos(z) - sin(x)*sin(y)*sin(z), -cos(y)*sin(x)], + [sin(x)*sin(z) - cos(x)*cos(z)*sin(y), cos(z)*sin(x) + cos(x)*sin(y)*sin(z), cos(x)*cos(y)] + + with the obvious derivations for z, y, and x + + z = atan2(-r12, r11) + y = asin(r13) + x = atan2(-r23, r33) + + Problems arise when cos(y) is close to zero, because both of:: + + z = atan2(cos(y)*sin(z), cos(y)*cos(z)) + x = atan2(cos(y)*sin(x), cos(x)*cos(y)) + + will be close to atan2(0, 0), and highly unstable. + + The ``cy`` fix for numerical instability below is from: *Graphics + Gems IV*, Paul Heckbert (editor), Academic Press, 1994, ISBN: + 0123361559. Specifically it comes from EulerAngles.c by Ken + Shoemake, and deals with the case where cos(y) is close to zero: + + See: http://www.graphicsgems.org/ + + The code appears to be licensed (from the website) as "can be used + without restrictions". + ''' + M = np.asarray(M) + if cy_thresh is None: + try: + cy_thresh = np.finfo(M.dtype).eps * 4 + except ValueError: + cy_thresh = _FLOAT_EPS_4 + r11, r12, r13, r21, r22, r23, r31, r32, r33 = M.flat + # cy: sqrt((cos(y)*cos(z))**2 + (cos(x)*cos(y))**2) + cy = math.sqrt(r33*r33 + r23*r23) + if cy > cy_thresh: # cos(y) not close to zero, standard form + z = math.atan2(-r12, r11) # atan2(cos(y)*sin(z), cos(y)*cos(z)) + y = math.atan2(r13, cy) # atan2(sin(y), cy) + x = math.atan2(-r23, r33) # atan2(cos(y)*sin(x), cos(x)*cos(y)) + else: # cos(y) (close to) zero, so x -> 0.0 (see above) + # so r21 -> sin(z), r22 -> cos(z) and + z = math.atan2(r21, r22) + y = math.atan2(r13, cy) # atan2(sin(y), cy) + x = 0.0 + return z, y, x + + +def euler2quat(z=0, y=0, x=0): + ''' Return quaternion corresponding to these Euler angles + + Uses the z, then y, then x convention above + + Parameters + ---------- + z : scalar + Rotation angle in radians around z-axis (performed first) + y : scalar + Rotation angle in radians around y-axis + x : scalar + Rotation angle in radians around x-axis (performed last) + + Returns + ------- + quat : array shape (4,) + Quaternion in w, x, y z (real, then vector) format + + Notes + ----- + We can derive this formula in Sympy using: + + 1. Formula giving quaternion corresponding to rotation of theta radians + about arbitrary axis: + http://mathworld.wolfram.com/EulerParameters.html + 2. Generated formulae from 1.) for quaternions corresponding to + theta radians rotations about ``x, y, z`` axes + 3. Apply quaternion multiplication formula - + http://en.wikipedia.org/wiki/Quaternions#Hamilton_product - to + formulae from 2.) to give formula for combined rotations. + ''' + z = z/2.0 + y = y/2.0 + x = x/2.0 + cz = math.cos(z) + sz = math.sin(z) + cy = math.cos(y) + sy = math.sin(y) + cx = math.cos(x) + sx = math.sin(x) + return np.array([ + cx*cy*cz - sx*sy*sz, + cx*sy*sz + cy*cz*sx, + cx*cz*sy - sx*cy*sz, + cx*cy*sz + sx*cz*sy]) + + +def quat2euler(q): + ''' Return Euler angles corresponding to quaternion `q` + + Parameters + ---------- + q : 4 element sequence + w, x, y, z of quaternion + + Returns + ------- + z : scalar + Rotation angle in radians around z-axis (performed first) + y : scalar + Rotation angle in radians around y-axis + x : scalar + Rotation angle in radians around x-axis (performed last) + + Notes + ----- + It's possible to reduce the amount of calculation a little, by + combining parts of the ``quat2mat`` and ``mat2euler`` functions, but + the reduction in computation is small, and the code repetition is + large. + ''' + # delayed import to avoid cyclic dependencies + import nibabel.quaternions as nq + return mat2euler(nq.quat2mat(q)) + + +def euler2angle_axis(z=0, y=0, x=0): + ''' Return angle, axis corresponding to these Euler angles + + Uses the z, then y, then x convention above + + Parameters + ---------- + z : scalar + Rotation angle in radians around z-axis (performed first) + y : scalar + Rotation angle in radians around y-axis + x : scalar + Rotation angle in radians around x-axis (performed last) + + Returns + ------- + theta : scalar + angle of rotation + vector : array shape (3,) + axis around which rotation occurs + + Examples + -------- + >>> theta, vec = euler2angle_axis(0, 1.5, 0) + >>> print(theta) + 1.5 + >>> np.allclose(vec, [0, 1, 0]) + True + ''' + # delayed import to avoid cyclic dependencies + import nibabel.quaternions as nq + return nq.quat2angle_axis(euler2quat(z, y, x)) + + +def angle_axis2euler(theta, vector, is_normalized=False): + ''' Convert angle, axis pair to Euler angles + + Parameters + ---------- + theta : scalar + angle of rotation + vector : 3 element sequence + vector specifying axis for rotation. + is_normalized : bool, optional + True if vector is already normalized (has norm of 1). Default + False + + Returns + ------- + z : scalar + y : scalar + x : scalar + Rotations in radians around z, y, x axes, respectively + + Examples + -------- + >>> z, y, x = angle_axis2euler(0, [1, 0, 0]) + >>> np.allclose((z, y, x), 0) + True + + Notes + ----- + It's possible to reduce the amount of calculation a little, by + combining parts of the ``angle_axis2mat`` and ``mat2euler`` + functions, but the reduction in computation is small, and the code + repetition is large. + ''' + # delayed import to avoid cyclic dependencies + import nibabel.quaternions as nq + M = nq.angle_axis2mat(theta, vector, is_normalized) + return mat2euler(M) diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/pc_util.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/pc_util.py" new file mode 100644 index 0000000..81f63d8 --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/pc_util.py" @@ -0,0 +1,315 @@ +""" Utility functions for processing point clouds. + +Author: Charles R. Qi, Hao Su +Date: November 2016 +""" + +import os +import sys +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) + +# Draw point cloud +from eulerangles import euler2mat + +# Point cloud IO +import numpy as np +from plyfile import PlyData, PlyElement + + +# ---------------------------------------- +# Point Cloud/Volume Conversions +# ---------------------------------------- + +def point_cloud_to_volume_batch(point_clouds, vsize=12, radius=1.0, flatten=True): + """ Input is BxNx3 batch of point cloud + Output is Bx(vsize^3) + """ + vol_list = [] + for b in range(point_clouds.shape[0]): + vol = point_cloud_to_volume(np.squeeze(point_clouds[b,:,:]), vsize, radius) + if flatten: + vol_list.append(vol.flatten()) + else: + vol_list.append(np.expand_dims(np.expand_dims(vol, -1), 0)) + if flatten: + return np.vstack(vol_list) + else: + return np.concatenate(vol_list, 0) + + +def point_cloud_to_volume(points, vsize, radius=1.0): + """ input is Nx3 points. + output is vsize*vsize*vsize + assumes points are in range [-radius, radius] + """ + vol = np.zeros((vsize,vsize,vsize)) + voxel = 2*radius/float(vsize) + locations = (points + radius)/voxel + locations = locations.astype(int) + vol[locations[:,0],locations[:,1],locations[:,2]] = 1.0 + return vol + +#a = np.zeros((16,1024,3)) +#print point_cloud_to_volume_batch(a, 12, 1.0, False).shape + +def volume_to_point_cloud(vol): + """ vol is occupancy grid (value = 0 or 1) of size vsize*vsize*vsize + return Nx3 numpy array. + """ + vsize = vol.shape[0] + assert(vol.shape[1] == vsize and vol.shape[1] == vsize) + points = [] + for a in range(vsize): + for b in range(vsize): + for c in range(vsize): + if vol[a,b,c] == 1: + points.append(np.array([a,b,c])) + if len(points) == 0: + return np.zeros((0,3)) + points = np.vstack(points) + return points + +def point_cloud_to_volume_v2_batch(point_clouds, vsize=12, radius=1.0, num_sample=128): + """ Input is BxNx3 a batch of point cloud + Output is BxVxVxVxnum_samplex3 + Added on Feb 19 + """ + vol_list = [] + for b in range(point_clouds.shape[0]): + vol = point_cloud_to_volume_v2(point_clouds[b,:,:], vsize, radius, num_sample) + vol_list.append(np.expand_dims(vol, 0)) + return np.concatenate(vol_list, 0) + +def point_cloud_to_volume_v2(points, vsize, radius=1.0, num_sample=128): + """ input is Nx3 points + output is vsize*vsize*vsize*num_sample*3 + assumes points are in range [-radius, radius] + samples num_sample points in each voxel, if there are less than + num_sample points, replicate the points + Added on Feb 19 + """ + vol = np.zeros((vsize,vsize,vsize,num_sample,3)) + voxel = 2*radius/float(vsize) + locations = (points + radius)/voxel + locations = locations.astype(int) + loc2pc = {} + for n in range(points.shape[0]): + loc = tuple(locations[n,:]) + if loc not in loc2pc: + loc2pc[loc] = [] + loc2pc[loc].append(points[n,:]) + #print loc2pc + + for i in range(vsize): + for j in range(vsize): + for k in range(vsize): + if (i,j,k) not in loc2pc: + vol[i,j,k,:,:] = np.zeros((num_sample,3)) + else: + pc = loc2pc[(i,j,k)] # a list of (3,) arrays + pc = np.vstack(pc) # kx3 + # Sample/pad to num_sample points + if pc.shape[0]>num_sample: + choices = np.random.choice(pc.shape[0], num_sample, replace=False) + pc = pc[choices,:] + elif pc.shape[0]num_sample: + choices = np.random.choice(pc.shape[0], num_sample, replace=False) + pc = pc[choices,:] + elif pc.shape[0] 0) + dx = mask[:, 0] + dy = mask[:, 1] + dv = disk[disk > 0] + + # Order points by z-buffer + zorder = np.argsort(points[:, 2]) + points = points[zorder, :] + points[:, 2] = (points[:, 2] - np.min(points[:, 2])) / (np.max(points[:, 2] - np.min(points[:, 2]))) + max_depth = np.max(points[:, 2]) + + for i in range(points.shape[0]): + j = points.shape[0] - i - 1 + x = points[j, 0] + y = points[j, 1] + xc = canvasSize/2 + (x*space) + yc = canvasSize/2 + (y*space) + xc = int(np.round(xc)) + yc = int(np.round(yc)) + + px = dx + xc + py = dy + yc + + image[px, py] = image[px, py] * 0.7 + dv * (max_depth - points[j, 2]) * 0.3 + + image = image / np.max(image) + return image + +def point_cloud_three_views(points): + """ input points Nx3 numpy array (+y is up direction). + return an numpy array gray image of size 500x1500. """ + # +y is up direction + # xrot is azimuth + # yrot is in-plane + # zrot is elevation + img1 = draw_point_cloud(points, zrot=110/180.0*np.pi, xrot=45/180.0*np.pi, yrot=0/180.0*np.pi) + img2 = draw_point_cloud(points, zrot=70/180.0*np.pi, xrot=135/180.0*np.pi, yrot=0/180.0*np.pi) + img3 = draw_point_cloud(points, zrot=180.0/180.0*np.pi, xrot=90/180.0*np.pi, yrot=0/180.0*np.pi) + image_large = np.concatenate([img1, img2, img3], 1) + return image_large + + +def point_cloud_three_views_demo(): + """ Demo for draw_point_cloud function """ + from PIL import Image + points = read_ply('../third_party/mesh_sampling/piano.ply') + im_array = point_cloud_three_views(points) + img = Image.fromarray(np.uint8(im_array*255.0)) + img.save('piano.jpg') + +if __name__=="__main__": + point_cloud_three_views_demo() + + +def pyplot_draw_point_cloud(points, output_filename): + """ points is a Nx3 numpy array """ + import matplotlib.pyplot as plt + fig = plt.figure() + ax = fig.add_subplot(111, projection='3d') + ax.scatter(points[:,0], points[:,1], points[:,2]) + ax.set_xlabel('x') + ax.set_ylabel('y') + ax.set_zlabel('z') + #savefig(output_filename) + +def pyplot_draw_volume(vol, output_filename): + """ vol is of size vsize*vsize*vsize + output an image to output_filename + """ + points = volume_to_point_cloud(vol) + pyplot_draw_point_cloud(points, output_filename) + +def write_ply_color(points, labels, out_filename, num_classes=None): + """ Color (N,3) points with labels (N) within range 0 ~ num_classes-1 as OBJ file """ + import matplotlib.pyplot as pyplot + labels = labels.astype(int) + N = points.shape[0] + if num_classes is None: + num_classes = np.max(labels)+1 + else: + assert(num_classes>np.max(labels)) + fout = open(out_filename, 'w') + #colors = [pyplot.cm.hsv(i/float(num_classes)) for i in range(num_classes)] + colors = [pyplot.cm.jet(i/float(num_classes)) for i in range(num_classes)] + for i in range(N): + c = colors[labels[i]] + c = [int(x*255) for x in c] + fout.write('v %f %f %f %d %d %d\n' % (points[i,0],points[i,1],points[i,2],c[0],c[1],c[2])) + fout.close() diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/plyfile.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/plyfile.py" new file mode 100644 index 0000000..69c2aa9 --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/plyfile.py" @@ -0,0 +1,916 @@ +# Copyright 2014 Darsh Ranjan +# +# This file is part of python-plyfile. +# +# python-plyfile is free software: you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# python-plyfile is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with python-plyfile. If not, see +# . + +from itertools import islice as _islice + +import numpy as _np +from sys import byteorder as _byteorder + + +try: + _range = xrange +except NameError: + _range = range + + +# Many-many relation +_data_type_relation = [ + ('int8', 'i1'), + ('char', 'i1'), + ('uint8', 'u1'), + ('uchar', 'b1'), + ('uchar', 'u1'), + ('int16', 'i2'), + ('short', 'i2'), + ('uint16', 'u2'), + ('ushort', 'u2'), + ('int32', 'i4'), + ('int', 'i4'), + ('uint32', 'u4'), + ('uint', 'u4'), + ('float32', 'f4'), + ('float', 'f4'), + ('float64', 'f8'), + ('double', 'f8') +] + +_data_types = dict(_data_type_relation) +_data_type_reverse = dict((b, a) for (a, b) in _data_type_relation) + +_types_list = [] +_types_set = set() +for (_a, _b) in _data_type_relation: + if _a not in _types_set: + _types_list.append(_a) + _types_set.add(_a) + if _b not in _types_set: + _types_list.append(_b) + _types_set.add(_b) + + +_byte_order_map = { + 'ascii': '=', + 'binary_little_endian': '<', + 'binary_big_endian': '>' +} + +_byte_order_reverse = { + '<': 'binary_little_endian', + '>': 'binary_big_endian' +} + +_native_byte_order = {'little': '<', 'big': '>'}[_byteorder] + + +def _lookup_type(type_str): + if type_str not in _data_type_reverse: + try: + type_str = _data_types[type_str] + except KeyError: + raise ValueError("field type %r not in %r" % + (type_str, _types_list)) + + return _data_type_reverse[type_str] + + +def _split_line(line, n): + fields = line.split(None, n) + if len(fields) == n: + fields.append('') + + assert len(fields) == n + 1 + + return fields + + +def make2d(array, cols=None, dtype=None): + ''' + Make a 2D array from an array of arrays. The `cols' and `dtype' + arguments can be omitted if the array is not empty. + + ''' + if (cols is None or dtype is None) and not len(array): + raise RuntimeError("cols and dtype must be specified for empty " + "array") + + if cols is None: + cols = len(array[0]) + + if dtype is None: + dtype = array[0].dtype + + return _np.fromiter(array, [('_', dtype, (cols,))], + count=len(array))['_'] + + +class PlyParseError(Exception): + + ''' + Raised when a PLY file cannot be parsed. + + The attributes `element', `row', `property', and `message' give + additional information. + + ''' + + def __init__(self, message, element=None, row=None, prop=None): + self.message = message + self.element = element + self.row = row + self.prop = prop + + s = '' + if self.element: + s += 'element %r: ' % self.element.name + if self.row is not None: + s += 'row %d: ' % self.row + if self.prop: + s += 'property %r: ' % self.prop.name + s += self.message + + Exception.__init__(self, s) + + def __repr__(self): + return ('PlyParseError(%r, element=%r, row=%r, prop=%r)' % + self.message, self.element, self.row, self.prop) + + +class PlyData(object): + + ''' + PLY file header and data. + + A PlyData instance is created in one of two ways: by the static + method PlyData.read (to read a PLY file), or directly from __init__ + given a sequence of elements (which can then be written to a PLY + file). + + ''' + + def __init__(self, elements=[], text=False, byte_order='=', + comments=[], obj_info=[]): + ''' + elements: sequence of PlyElement instances. + + text: whether the resulting PLY file will be text (True) or + binary (False). + + byte_order: '<' for little-endian, '>' for big-endian, or '=' + for native. This is only relevant if `text' is False. + + comments: sequence of strings that will be placed in the header + between the 'ply' and 'format ...' lines. + + obj_info: like comments, but will be placed in the header with + "obj_info ..." instead of "comment ...". + + ''' + if byte_order == '=' and not text: + byte_order = _native_byte_order + + self.byte_order = byte_order + self.text = text + + self.comments = list(comments) + self.obj_info = list(obj_info) + self.elements = elements + + def _get_elements(self): + return self._elements + + def _set_elements(self, elements): + self._elements = tuple(elements) + self._index() + + elements = property(_get_elements, _set_elements) + + def _get_byte_order(self): + return self._byte_order + + def _set_byte_order(self, byte_order): + if byte_order not in ['<', '>', '=']: + raise ValueError("byte order must be '<', '>', or '='") + + self._byte_order = byte_order + + byte_order = property(_get_byte_order, _set_byte_order) + + def _index(self): + self._element_lookup = dict((elt.name, elt) for elt in + self._elements) + if len(self._element_lookup) != len(self._elements): + raise ValueError("two elements with same name") + + @staticmethod + def _parse_header(stream): + ''' + Parse a PLY header from a readable file-like stream. + + ''' + lines = [] + comments = {'comment': [], 'obj_info': []} + while True: + line = stream.readline().decode('ascii').strip() + fields = _split_line(line, 1) + + if fields[0] == 'end_header': + break + + elif fields[0] in comments.keys(): + lines.append(fields) + else: + lines.append(line.split()) + + a = 0 + if lines[a] != ['ply']: + raise PlyParseError("expected 'ply'") + + a += 1 + while lines[a][0] in comments.keys(): + comments[lines[a][0]].append(lines[a][1]) + a += 1 + + if lines[a][0] != 'format': + raise PlyParseError("expected 'format'") + + if lines[a][2] != '1.0': + raise PlyParseError("expected version '1.0'") + + if len(lines[a]) != 3: + raise PlyParseError("too many fields after 'format'") + + fmt = lines[a][1] + + if fmt not in _byte_order_map: + raise PlyParseError("don't understand format %r" % fmt) + + byte_order = _byte_order_map[fmt] + text = fmt == 'ascii' + + a += 1 + while a < len(lines) and lines[a][0] in comments.keys(): + comments[lines[a][0]].append(lines[a][1]) + a += 1 + + return PlyData(PlyElement._parse_multi(lines[a:]), + text, byte_order, + comments['comment'], comments['obj_info']) + + @staticmethod + def read(stream): + ''' + Read PLY data from a readable file-like object or filename. + + ''' + (must_close, stream) = _open_stream(stream, 'read') + try: + data = PlyData._parse_header(stream) + for elt in data: + elt._read(stream, data.text, data.byte_order) + finally: + if must_close: + stream.close() + + return data + + def write(self, stream): + ''' + Write PLY data to a writeable file-like object or filename. + + ''' + (must_close, stream) = _open_stream(stream, 'write') + try: + stream.write(self.header.encode('ascii')) + stream.write(b'\r\n') + for elt in self: + elt._write(stream, self.text, self.byte_order) + finally: + if must_close: + stream.close() + + @property + def header(self): + ''' + Provide PLY-formatted metadata for the instance. + + ''' + lines = ['ply'] + + if self.text: + lines.append('format ascii 1.0') + else: + lines.append('format ' + + _byte_order_reverse[self.byte_order] + + ' 1.0') + + # Some information is lost here, since all comments are placed + # between the 'format' line and the first element. + for c in self.comments: + lines.append('comment ' + c) + + for c in self.obj_info: + lines.append('obj_info ' + c) + + lines.extend(elt.header for elt in self.elements) + lines.append('end_header') + return '\r\n'.join(lines) + + def __iter__(self): + return iter(self.elements) + + def __len__(self): + return len(self.elements) + + def __contains__(self, name): + return name in self._element_lookup + + def __getitem__(self, name): + return self._element_lookup[name] + + def __str__(self): + return self.header + + def __repr__(self): + return ('PlyData(%r, text=%r, byte_order=%r, ' + 'comments=%r, obj_info=%r)' % + (self.elements, self.text, self.byte_order, + self.comments, self.obj_info)) + + +def _open_stream(stream, read_or_write): + if hasattr(stream, read_or_write): + return (False, stream) + try: + return (True, open(stream, read_or_write[0] + 'b')) + except TypeError: + raise RuntimeError("expected open file or filename") + + +class PlyElement(object): + + ''' + PLY file element. + + A client of this library doesn't normally need to instantiate this + directly, so the following is only for the sake of documenting the + internals. + + Creating a PlyElement instance is generally done in one of two ways: + as a byproduct of PlyData.read (when reading a PLY file) and by + PlyElement.describe (before writing a PLY file). + + ''' + + def __init__(self, name, properties, count, comments=[]): + ''' + This is not part of the public interface. The preferred methods + of obtaining PlyElement instances are PlyData.read (to read from + a file) and PlyElement.describe (to construct from a numpy + array). + + ''' + self._name = str(name) + self._check_name() + self._count = count + + self._properties = tuple(properties) + self._index() + + self.comments = list(comments) + + self._have_list = any(isinstance(p, PlyListProperty) + for p in self.properties) + + @property + def count(self): + return self._count + + def _get_data(self): + return self._data + + def _set_data(self, data): + self._data = data + self._count = len(data) + self._check_sanity() + + data = property(_get_data, _set_data) + + def _check_sanity(self): + for prop in self.properties: + if prop.name not in self._data.dtype.fields: + raise ValueError("dangling property %r" % prop.name) + + def _get_properties(self): + return self._properties + + def _set_properties(self, properties): + self._properties = tuple(properties) + self._check_sanity() + self._index() + + properties = property(_get_properties, _set_properties) + + def _index(self): + self._property_lookup = dict((prop.name, prop) + for prop in self._properties) + if len(self._property_lookup) != len(self._properties): + raise ValueError("two properties with same name") + + def ply_property(self, name): + return self._property_lookup[name] + + @property + def name(self): + return self._name + + def _check_name(self): + if any(c.isspace() for c in self._name): + msg = "element name %r contains spaces" % self._name + raise ValueError(msg) + + def dtype(self, byte_order='='): + ''' + Return the numpy dtype of the in-memory representation of the + data. (If there are no list properties, and the PLY format is + binary, then this also accurately describes the on-disk + representation of the element.) + + ''' + return [(prop.name, prop.dtype(byte_order)) + for prop in self.properties] + + @staticmethod + def _parse_multi(header_lines): + ''' + Parse a list of PLY element definitions. + + ''' + elements = [] + while header_lines: + (elt, header_lines) = PlyElement._parse_one(header_lines) + elements.append(elt) + + return elements + + @staticmethod + def _parse_one(lines): + ''' + Consume one element definition. The unconsumed input is + returned along with a PlyElement instance. + + ''' + a = 0 + line = lines[a] + + if line[0] != 'element': + raise PlyParseError("expected 'element'") + if len(line) > 3: + raise PlyParseError("too many fields after 'element'") + if len(line) < 3: + raise PlyParseError("too few fields after 'element'") + + (name, count) = (line[1], int(line[2])) + + comments = [] + properties = [] + while True: + a += 1 + if a >= len(lines): + break + + if lines[a][0] == 'comment': + comments.append(lines[a][1]) + elif lines[a][0] == 'property': + properties.append(PlyProperty._parse_one(lines[a])) + else: + break + + return (PlyElement(name, properties, count, comments), + lines[a:]) + + @staticmethod + def describe(data, name, len_types={}, val_types={}, + comments=[]): + ''' + Construct a PlyElement from an array's metadata. + + len_types and val_types can be given as mappings from list + property names to type strings (like 'u1', 'f4', etc., or + 'int8', 'float32', etc.). These can be used to define the length + and value types of list properties. List property lengths + always default to type 'u1' (8-bit unsigned integer), and value + types default to 'i4' (32-bit integer). + + ''' + if not isinstance(data, _np.ndarray): + raise TypeError("only numpy arrays are supported") + + if len(data.shape) != 1: + raise ValueError("only one-dimensional arrays are " + "supported") + + count = len(data) + + properties = [] + descr = data.dtype.descr + + for t in descr: + if not isinstance(t[1], str): + raise ValueError("nested records not supported") + + if not t[0]: + raise ValueError("field with empty name") + + if len(t) != 2 or t[1][1] == 'O': + # non-scalar field, which corresponds to a list + # property in PLY. + + if t[1][1] == 'O': + if len(t) != 2: + raise ValueError("non-scalar object fields not " + "supported") + + len_str = _data_type_reverse[len_types.get(t[0], 'u1')] + if t[1][1] == 'O': + val_type = val_types.get(t[0], 'i4') + val_str = _lookup_type(val_type) + else: + val_str = _lookup_type(t[1][1:]) + + prop = PlyListProperty(t[0], len_str, val_str) + else: + val_str = _lookup_type(t[1][1:]) + prop = PlyProperty(t[0], val_str) + + properties.append(prop) + + elt = PlyElement(name, properties, count, comments) + elt.data = data + + return elt + + def _read(self, stream, text, byte_order): + ''' + Read the actual data from a PLY file. + + ''' + if text: + self._read_txt(stream) + else: + if self._have_list: + # There are list properties, so a simple load is + # impossible. + self._read_bin(stream, byte_order) + else: + # There are no list properties, so loading the data is + # much more straightforward. + self._data = _np.fromfile(stream, + self.dtype(byte_order), + self.count) + + if len(self._data) < self.count: + k = len(self._data) + del self._data + raise PlyParseError("early end-of-file", self, k) + + self._check_sanity() + + def _write(self, stream, text, byte_order): + ''' + Write the data to a PLY file. + + ''' + if text: + self._write_txt(stream) + else: + if self._have_list: + # There are list properties, so serialization is + # slightly complicated. + self._write_bin(stream, byte_order) + else: + # no list properties, so serialization is + # straightforward. + self.data.astype(self.dtype(byte_order), + copy=False).tofile(stream) + + def _read_txt(self, stream): + ''' + Load a PLY element from an ASCII-format PLY file. The element + may contain list properties. + + ''' + self._data = _np.empty(self.count, dtype=self.dtype()) + + k = 0 + for line in _islice(iter(stream.readline, b''), self.count): + fields = iter(line.strip().split()) + for prop in self.properties: + try: + self._data[prop.name][k] = prop._from_fields(fields) + except StopIteration: + raise PlyParseError("early end-of-line", + self, k, prop) + except ValueError: + raise PlyParseError("malformed input", + self, k, prop) + try: + next(fields) + except StopIteration: + pass + else: + raise PlyParseError("expected end-of-line", self, k) + k += 1 + + if k < self.count: + del self._data + raise PlyParseError("early end-of-file", self, k) + + def _write_txt(self, stream): + ''' + Save a PLY element to an ASCII-format PLY file. The element may + contain list properties. + + ''' + for rec in self.data: + fields = [] + for prop in self.properties: + fields.extend(prop._to_fields(rec[prop.name])) + + _np.savetxt(stream, [fields], '%.18g', newline='\r\n') + + def _read_bin(self, stream, byte_order): + ''' + Load a PLY element from a binary PLY file. The element may + contain list properties. + + ''' + self._data = _np.empty(self.count, dtype=self.dtype(byte_order)) + + for k in _range(self.count): + for prop in self.properties: + try: + self._data[prop.name][k] = \ + prop._read_bin(stream, byte_order) + except StopIteration: + raise PlyParseError("early end-of-file", + self, k, prop) + + def _write_bin(self, stream, byte_order): + ''' + Save a PLY element to a binary PLY file. The element may + contain list properties. + + ''' + for rec in self.data: + for prop in self.properties: + prop._write_bin(rec[prop.name], stream, byte_order) + + @property + def header(self): + ''' + Format this element's metadata as it would appear in a PLY + header. + + ''' + lines = ['element %s %d' % (self.name, self.count)] + + # Some information is lost here, since all comments are placed + # between the 'element' line and the first property definition. + for c in self.comments: + lines.append('comment ' + c) + + lines.extend(list(map(str, self.properties))) + + return '\r\n'.join(lines) + + def __getitem__(self, key): + return self.data[key] + + def __setitem__(self, key, value): + self.data[key] = value + + def __str__(self): + return self.header + + def __repr__(self): + return ('PlyElement(%r, %r, count=%d, comments=%r)' % + (self.name, self.properties, self.count, + self.comments)) + + +class PlyProperty(object): + + ''' + PLY property description. This class is pure metadata; the data + itself is contained in PlyElement instances. + + ''' + + def __init__(self, name, val_dtype): + self._name = str(name) + self._check_name() + self.val_dtype = val_dtype + + def _get_val_dtype(self): + return self._val_dtype + + def _set_val_dtype(self, val_dtype): + self._val_dtype = _data_types[_lookup_type(val_dtype)] + + val_dtype = property(_get_val_dtype, _set_val_dtype) + + @property + def name(self): + return self._name + + def _check_name(self): + if any(c.isspace() for c in self._name): + msg = "Error: property name %r contains spaces" % self._name + raise RuntimeError(msg) + + @staticmethod + def _parse_one(line): + assert line[0] == 'property' + + if line[1] == 'list': + if len(line) > 5: + raise PlyParseError("too many fields after " + "'property list'") + if len(line) < 5: + raise PlyParseError("too few fields after " + "'property list'") + + return PlyListProperty(line[4], line[2], line[3]) + + else: + if len(line) > 3: + raise PlyParseError("too many fields after " + "'property'") + if len(line) < 3: + raise PlyParseError("too few fields after " + "'property'") + + return PlyProperty(line[2], line[1]) + + def dtype(self, byte_order='='): + ''' + Return the numpy dtype description for this property (as a tuple + of strings). + + ''' + return byte_order + self.val_dtype + + def _from_fields(self, fields): + ''' + Parse from generator. Raise StopIteration if the property could + not be read. + + ''' + return _np.dtype(self.dtype()).type(next(fields)) + + def _to_fields(self, data): + ''' + Return generator over one item. + + ''' + yield _np.dtype(self.dtype()).type(data) + + def _read_bin(self, stream, byte_order): + ''' + Read data from a binary stream. Raise StopIteration if the + property could not be read. + + ''' + try: + return _np.fromfile(stream, self.dtype(byte_order), 1)[0] + except IndexError: + raise StopIteration + + def _write_bin(self, data, stream, byte_order): + ''' + Write data to a binary stream. + + ''' + _np.dtype(self.dtype(byte_order)).type(data).tofile(stream) + + def __str__(self): + val_str = _data_type_reverse[self.val_dtype] + return 'property %s %s' % (val_str, self.name) + + def __repr__(self): + return 'PlyProperty(%r, %r)' % (self.name, + _lookup_type(self.val_dtype)) + + +class PlyListProperty(PlyProperty): + + ''' + PLY list property description. + + ''' + + def __init__(self, name, len_dtype, val_dtype): + PlyProperty.__init__(self, name, val_dtype) + + self.len_dtype = len_dtype + + def _get_len_dtype(self): + return self._len_dtype + + def _set_len_dtype(self, len_dtype): + self._len_dtype = _data_types[_lookup_type(len_dtype)] + + len_dtype = property(_get_len_dtype, _set_len_dtype) + + def dtype(self, byte_order='='): + ''' + List properties always have a numpy dtype of "object". + + ''' + return '|O' + + def list_dtype(self, byte_order='='): + ''' + Return the pair (len_dtype, val_dtype) (both numpy-friendly + strings). + + ''' + return (byte_order + self.len_dtype, + byte_order + self.val_dtype) + + def _from_fields(self, fields): + (len_t, val_t) = self.list_dtype() + + n = int(_np.dtype(len_t).type(next(fields))) + + data = _np.loadtxt(list(_islice(fields, n)), val_t, ndmin=1) + if len(data) < n: + raise StopIteration + + return data + + def _to_fields(self, data): + ''' + Return generator over the (numerical) PLY representation of the + list data (length followed by actual data). + + ''' + (len_t, val_t) = self.list_dtype() + + data = _np.asarray(data, dtype=val_t).ravel() + + yield _np.dtype(len_t).type(data.size) + for x in data: + yield x + + def _read_bin(self, stream, byte_order): + (len_t, val_t) = self.list_dtype(byte_order) + + try: + n = _np.fromfile(stream, len_t, 1)[0] + except IndexError: + raise StopIteration + + data = _np.fromfile(stream, val_t, n) + if len(data) < n: + raise StopIteration + + return data + + def _write_bin(self, data, stream, byte_order): + ''' + Write data to a binary stream. + + ''' + (len_t, val_t) = self.list_dtype(byte_order) + + data = _np.asarray(data, dtype=val_t).ravel() + + _np.array(data.size, dtype=len_t).tofile(stream) + data.tofile(stream) + + def __str__(self): + len_str = _data_type_reverse[self.len_dtype] + val_str = _data_type_reverse[self.val_dtype] + return 'property list %s %s %s' % (len_str, val_str, self.name) + + def __repr__(self): + return ('PlyListProperty(%r, %r, %r)' % + (self.name, + _lookup_type(self.len_dtype), + _lookup_type(self.val_dtype))) diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/pointnet_util.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/pointnet_util.py" new file mode 100644 index 0000000..88ff701 --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/pointnet_util.py" @@ -0,0 +1,230 @@ +""" PointNet++ Layers +Author: Charles R. Qi +Date: November 2017 +""" +import os +import sys +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +ROOT_DIR = os.path.dirname(BASE_DIR) +sys.path.append(os.path.join(ROOT_DIR, 'utils')) +sys.path.append(os.path.join(ROOT_DIR, 'tf_ops/sampling')) +sys.path.append(os.path.join(ROOT_DIR, 'tf_ops/grouping')) +sys.path.append(os.path.join(ROOT_DIR, 'tf_ops/3d_interpolation')) +from tf_sampling import farthest_point_sample, gather_point +from tf_grouping import query_ball_point, group_point, knn_point +from tf_interpolate import three_nn, three_interpolate +import tensorflow as tf + +import numpy as np +import tf_util + +def sample_and_group(npoint, radius, nsample, xyz, points, knn=False, use_xyz=True): + ''' + Input: + npoint: int32 + radius: float32 + nsample: int32 + xyz: (batch_size, ndataset, 3) TF tensor + points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points + knn: bool, if True use kNN instead of radius search + use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features + Output: + new_xyz: (batch_size, npoint, 3) TF tensor + new_points: (batch_size, npoint, nsample, 3+channel) TF tensor + idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points + grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs + (subtracted by seed point XYZ) in local regions + ''' + + new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz)) # (batch_size, npoint, 3) + if knn: + _,idx = knn_point(nsample, xyz, new_xyz) + else: + idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz) + grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, nsample, 3) + grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]) # translation normalization + if points is not None: + grouped_points = group_point(points, idx) # (batch_size, npoint, nsample, channel) + if use_xyz: + new_points = tf.concat([grouped_xyz, grouped_points], axis=-1) # (batch_size, npoint, nample, 3+channel) + else: + new_points = grouped_points + else: + new_points = grouped_xyz + + return new_xyz, new_points, idx, grouped_xyz + + +def sample_and_group_all(xyz, points, use_xyz=True): + ''' + Inputs: + xyz: (batch_size, ndataset, 3) TF tensor + points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points + use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features + Outputs: + new_xyz: (batch_size, 1, 3) as (0,0,0) + new_points: (batch_size, 1, ndataset, 3+channel) TF tensor + Note: + Equivalent to sample_and_group with npoint=1, radius=inf, use (0,0,0) as the centroid + ''' + batch_size = xyz.get_shape()[0].value + nsample = xyz.get_shape()[1].value + new_xyz = tf.constant(np.tile(np.array([0,0,0]).reshape((1,1,3)), (batch_size,1,1)),dtype=tf.float32) # (batch_size, 1, 3) + idx = tf.constant(np.tile(np.array(range(nsample)).reshape((1,1,nsample)), (batch_size,1,1))) + grouped_xyz = tf.reshape(xyz, (batch_size, 1, nsample, 3)) # (batch_size, npoint=1, nsample, 3) + if points is not None: + if use_xyz: + new_points = tf.concat([xyz, points], axis=2) # (batch_size, 16, 259) + else: + new_points = points + new_points = tf.expand_dims(new_points, 1) # (batch_size, 1, 16, 259) + else: + new_points = grouped_xyz + return new_xyz, new_points, idx, grouped_xyz + + +def pointnet_sa_module(xyz, points, npoint, radius, nsample, mlp, mlp2, group_all, is_training, bn_decay, scope, bn=True, pooling='max', knn=False, use_xyz=True, use_nchw=False): + ''' PointNet Set Abstraction (SA) Module + Input: + xyz: (batch_size, ndataset, 3) TF tensor + points: (batch_size, ndataset, channel) TF tensor + npoint: int32 -- #points sampled in farthest point sampling + radius: float32 -- search radius in local region + nsample: int32 -- how many points in each local region + mlp: list of int32 -- output size for MLP on each point + mlp2: list of int32 -- output size for MLP on each region + group_all: bool -- group all points into one PC if set true, OVERRIDE + npoint, radius and nsample settings + use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features + use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format + Return: + new_xyz: (batch_size, npoint, 3) TF tensor + new_points: (batch_size, npoint, mlp[-1] or mlp2[-1]) TF tensor + idx: (batch_size, npoint, nsample) int32 -- indices for local regions + ''' + data_format = 'NCHW' if use_nchw else 'NHWC' + with tf.variable_scope(scope) as sc: + # Sample and Grouping + if group_all: + nsample = xyz.get_shape()[1].value + new_xyz, new_points, idx, grouped_xyz = sample_and_group_all(xyz, points, use_xyz) + else: + new_xyz, new_points, idx, grouped_xyz = sample_and_group(npoint, radius, nsample, xyz, points, knn, use_xyz) + + # Point Feature Embedding + if use_nchw: new_points = tf.transpose(new_points, [0,3,1,2]) + for i, num_out_channel in enumerate(mlp): + new_points = tf_util.conv2d(new_points, num_out_channel, [1,1], + padding='VALID', stride=[1,1], + bn=bn, is_training=is_training, + scope='conv%d'%(i), bn_decay=bn_decay, + data_format=data_format) + if use_nchw: new_points = tf.transpose(new_points, [0,2,3,1]) + + # Pooling in Local Regions + if pooling=='max': + new_points = tf.reduce_max(new_points, axis=[2], keepdims=True, name='maxpool') + elif pooling=='avg': + new_points = tf.reduce_mean(new_points, axis=[2], keepdims=True, name='avgpool') + elif pooling=='weighted_avg': + with tf.variable_scope('weighted_avg'): + dists = tf.norm(grouped_xyz,axis=-1,ord=2,keepdims=True) + exp_dists = tf.exp(-dists * 5) + weights = exp_dists/tf.reduce_sum(exp_dists,axis=2,keepdims=True) # (batch_size, npoint, nsample, 1) + new_points *= weights # (batch_size, npoint, nsample, mlp[-1]) + new_points = tf.reduce_sum(new_points, axis=2, keepdims=True) + elif pooling=='max_and_avg': + max_points = tf.reduce_max(new_points, axis=[2], keepdims=True, name='maxpool') + avg_points = tf.reduce_mean(new_points, axis=[2], keepdims=True, name='avgpool') + new_points = tf.concat([avg_points, max_points], axis=-1) + + # [Optional] Further Processing + if mlp2 is not None: + if use_nchw: new_points = tf.transpose(new_points, [0,3,1,2]) + for i, num_out_channel in enumerate(mlp2): + new_points = tf_util.conv2d(new_points, num_out_channel, [1,1], + padding='VALID', stride=[1,1], + bn=bn, is_training=is_training, + scope='conv_post_%d'%(i), bn_decay=bn_decay, + data_format=data_format) + if use_nchw: new_points = tf.transpose(new_points, [0,2,3,1]) + + new_points = tf.squeeze(new_points, [2]) # (batch_size, npoints, mlp2[-1]) + return new_xyz, new_points, idx + +def pointnet_sa_module_msg(xyz, points, npoint, radius_list, nsample_list, mlp_list, is_training, bn_decay, scope, bn=True, use_xyz=True, use_nchw=False): + ''' PointNet Set Abstraction (SA) module with Multi-Scale Grouping (MSG) + Input: + xyz: (batch_size, ndataset, 3) TF tensor + points: (batch_size, ndataset, channel) TF tensor + npoint: int32 -- #points sampled in farthest point sampling + radius: list of float32 -- search radius in local region + nsample: list of int32 -- how many points in each local region + mlp: list of list of int32 -- output size for MLP on each point + use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features + use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format + Return: + new_xyz: (batch_size, npoint, 3) TF tensor + new_points: (batch_size, npoint, \sum_k{mlp[k][-1]}) TF tensor + ''' + data_format = 'NCHW' if use_nchw else 'NHWC' + with tf.variable_scope(scope) as sc: + new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz)) + new_points_list = [] + for i in range(len(radius_list)): + radius = radius_list[i] + nsample = nsample_list[i] + idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz) + grouped_xyz = group_point(xyz, idx) + grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]) + if points is not None: + grouped_points = group_point(points, idx) + if use_xyz: + grouped_points = tf.concat([grouped_points, grouped_xyz], axis=-1) + else: + grouped_points = grouped_xyz + if use_nchw: grouped_points = tf.transpose(grouped_points, [0,3,1,2]) + for j,num_out_channel in enumerate(mlp_list[i]): + grouped_points = tf_util.conv2d(grouped_points, num_out_channel, [1,1], + padding='VALID', stride=[1,1], bn=bn, is_training=is_training, + scope='conv%d_%d'%(i,j), bn_decay=bn_decay) + if use_nchw: grouped_points = tf.transpose(grouped_points, [0,2,3,1]) + new_points = tf.reduce_max(grouped_points, axis=[2]) + new_points_list.append(new_points) + new_points_concat = tf.concat(new_points_list, axis=-1) + return new_xyz, new_points_concat + + +def pointnet_fp_module(xyz1, xyz2, points1, points2, mlp, is_training, bn_decay, scope, bn=True): + ''' PointNet Feature Propogation (FP) Module + Input: + xyz1: (batch_size, ndataset1, 3) TF tensor + xyz2: (batch_size, ndataset2, 3) TF tensor, sparser than xyz1 + points1: (batch_size, ndataset1, nchannel1) TF tensor + points2: (batch_size, ndataset2, nchannel2) TF tensor + mlp: list of int32 -- output size for MLP on each point + Return: + new_points: (batch_size, ndataset1, mlp[-1]) TF tensor + ''' + with tf.variable_scope(scope) as sc: + dist, idx = three_nn(xyz1, xyz2) + dist = tf.maximum(dist, 1e-10) + #修改, + norm = tf.reduce_sum((1.0/dist),axis=2,keepdims=True) + + norm = tf.tile(norm,[1,1,3]) + weight = (1.0/dist) / norm + interpolated_points = three_interpolate(points2, idx, weight) + + if points1 is not None: + new_points1 = tf.concat(axis=2, values=[interpolated_points, points1]) # B,ndataset1,nchannel1+nchannel2 + else: + new_points1 = interpolated_points + new_points1 = tf.expand_dims(new_points1, 2) + for i, num_out_channel in enumerate(mlp): + new_points1 = tf_util.conv2d(new_points1, num_out_channel, [1,1], + padding='VALID', stride=[1,1], + bn=bn, is_training=is_training, + scope='conv_%d'%(i), bn_decay=bn_decay) + new_points1 = tf.squeeze(new_points1, [2]) # B,ndataset1,mlp[-1] + return new_points1 diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/provider.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/provider.py" new file mode 100644 index 0000000..95118f5 --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/provider.py" @@ -0,0 +1,247 @@ +import os +import sys +import numpy as np +import h5py +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) + +def shuffle_data(data, labels): + """ Shuffle data and labels. + Input: + data: B,N,... numpy array + label: B,... numpy array + Return: + shuffled data, label and shuffle indices + """ + idx = np.arange(len(labels)) + np.random.shuffle(idx) + return data[idx, ...], labels[idx], idx + +def shuffle_points(batch_data): + """ Shuffle orders of points in each point cloud -- changes FPS behavior. + Use the same shuffling idx for the entire batch. + Input: + BxNxC array + Output: + BxNxC array + """ + idx = np.arange(batch_data.shape[1]) + np.random.shuffle(idx) + return batch_data[:,idx,:] + +def rotate_point_cloud(batch_data): + """ Randomly rotate the point clouds to augument the dataset + rotation is per shape based along up direction + Input: + BxNx3 array, original batch of point clouds + Return: + BxNx3 array, rotated batch of point clouds + """ + rotated_data = np.zeros(batch_data.shape, dtype=np.float32) + for k in range(batch_data.shape[0]): + rotation_angle = np.random.uniform() * 2 * np.pi + cosval = np.cos(rotation_angle) + sinval = np.sin(rotation_angle) + rotation_matrix = np.array([[cosval, 0, sinval], + [0, 1, 0], + [-sinval, 0, cosval]]) + shape_pc = batch_data[k, ...] + rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix) + return rotated_data + +def rotate_point_cloud_z(batch_data): + """ Randomly rotate the point clouds to augument the dataset + rotation is per shape based along up direction + Input: + BxNx3 array, original batch of point clouds + Return: + BxNx3 array, rotated batch of point clouds + """ + rotated_data = np.zeros(batch_data.shape, dtype=np.float32) + for k in range(batch_data.shape[0]): + rotation_angle = np.random.uniform() * 2 * np.pi + cosval = np.cos(rotation_angle) + sinval = np.sin(rotation_angle) + rotation_matrix = np.array([[cosval, sinval, 0], + [-sinval, cosval, 0], + [0, 0, 1]]) + shape_pc = batch_data[k, ...] + rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix) + return rotated_data + +def rotate_point_cloud_with_normal(batch_xyz_normal): + ''' Randomly rotate XYZ, normal point cloud. + Input: + batch_xyz_normal: B,N,6, first three channels are XYZ, last 3 all normal + Output: + B,N,6, rotated XYZ, normal point cloud + ''' + for k in range(batch_xyz_normal.shape[0]): + rotation_angle = np.random.uniform() * 2 * np.pi + cosval = np.cos(rotation_angle) + sinval = np.sin(rotation_angle) + rotation_matrix = np.array([[cosval, 0, sinval], + [0, 1, 0], + [-sinval, 0, cosval]]) + shape_pc = batch_xyz_normal[k,:,0:3] + shape_normal = batch_xyz_normal[k,:,3:6] + batch_xyz_normal[k,:,0:3] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix) + batch_xyz_normal[k,:,3:6] = np.dot(shape_normal.reshape((-1, 3)), rotation_matrix) + return batch_xyz_normal + +def rotate_perturbation_point_cloud_with_normal(batch_data, angle_sigma=0.06, angle_clip=0.18): + """ Randomly perturb the point clouds by small rotations + Input: + BxNx6 array, original batch of point clouds and point normals + Return: + BxNx3 array, rotated batch of point clouds + """ + rotated_data = np.zeros(batch_data.shape, dtype=np.float32) + for k in range(batch_data.shape[0]): + angles = np.clip(angle_sigma*np.random.randn(3), -angle_clip, angle_clip) + Rx = np.array([[1,0,0], + [0,np.cos(angles[0]),-np.sin(angles[0])], + [0,np.sin(angles[0]),np.cos(angles[0])]]) + Ry = np.array([[np.cos(angles[1]),0,np.sin(angles[1])], + [0,1,0], + [-np.sin(angles[1]),0,np.cos(angles[1])]]) + Rz = np.array([[np.cos(angles[2]),-np.sin(angles[2]),0], + [np.sin(angles[2]),np.cos(angles[2]),0], + [0,0,1]]) + R = np.dot(Rz, np.dot(Ry,Rx)) + shape_pc = batch_data[k,:,0:3] + shape_normal = batch_data[k,:,3:6] + rotated_data[k,:,0:3] = np.dot(shape_pc.reshape((-1, 3)), R) + rotated_data[k,:,3:6] = np.dot(shape_normal.reshape((-1, 3)), R) + return rotated_data + + +def rotate_point_cloud_by_angle(batch_data, rotation_angle): + """ Rotate the point cloud along up direction with certain angle. + Input: + BxNx3 array, original batch of point clouds + Return: + BxNx3 array, rotated batch of point clouds + """ + rotated_data = np.zeros(batch_data.shape, dtype=np.float32) + for k in range(batch_data.shape[0]): + #rotation_angle = np.random.uniform() * 2 * np.pi + cosval = np.cos(rotation_angle) + sinval = np.sin(rotation_angle) + rotation_matrix = np.array([[cosval, 0, sinval], + [0, 1, 0], + [-sinval, 0, cosval]]) + shape_pc = batch_data[k,:,0:3] + rotated_data[k,:,0:3] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix) + return rotated_data + +def rotate_point_cloud_by_angle_with_normal(batch_data, rotation_angle): + """ Rotate the point cloud along up direction with certain angle. + Input: + BxNx6 array, original batch of point clouds with normal + scalar, angle of rotation + Return: + BxNx6 array, rotated batch of point clouds iwth normal + """ + rotated_data = np.zeros(batch_data.shape, dtype=np.float32) + for k in range(batch_data.shape[0]): + #rotation_angle = np.random.uniform() * 2 * np.pi + cosval = np.cos(rotation_angle) + sinval = np.sin(rotation_angle) + rotation_matrix = np.array([[cosval, 0, sinval], + [0, 1, 0], + [-sinval, 0, cosval]]) + shape_pc = batch_data[k,:,0:3] + shape_normal = batch_data[k,:,3:6] + rotated_data[k,:,0:3] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix) + rotated_data[k,:,3:6] = np.dot(shape_normal.reshape((-1,3)), rotation_matrix) + return rotated_data + + + +def rotate_perturbation_point_cloud(batch_data, angle_sigma=0.06, angle_clip=0.18): + """ Randomly perturb the point clouds by small rotations + Input: + BxNx3 array, original batch of point clouds + Return: + BxNx3 array, rotated batch of point clouds + """ + rotated_data = np.zeros(batch_data.shape, dtype=np.float32) + for k in range(batch_data.shape[0]): + angles = np.clip(angle_sigma*np.random.randn(3), -angle_clip, angle_clip) + Rx = np.array([[1,0,0], + [0,np.cos(angles[0]),-np.sin(angles[0])], + [0,np.sin(angles[0]),np.cos(angles[0])]]) + Ry = np.array([[np.cos(angles[1]),0,np.sin(angles[1])], + [0,1,0], + [-np.sin(angles[1]),0,np.cos(angles[1])]]) + Rz = np.array([[np.cos(angles[2]),-np.sin(angles[2]),0], + [np.sin(angles[2]),np.cos(angles[2]),0], + [0,0,1]]) + R = np.dot(Rz, np.dot(Ry,Rx)) + shape_pc = batch_data[k, ...] + rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), R) + return rotated_data + + +def jitter_point_cloud(batch_data, sigma=0.01, clip=0.05): + """ Randomly jitter points. jittering is per point. + Input: + BxNx3 array, original batch of point clouds + Return: + BxNx3 array, jittered batch of point clouds + """ + B, N, C = batch_data.shape + assert(clip > 0) + jittered_data = np.clip(sigma * np.random.randn(B, N, C), -1*clip, clip) + jittered_data += batch_data + return jittered_data + +def shift_point_cloud(batch_data, shift_range=0.1): + """ Randomly shift point cloud. Shift is per point cloud. + Input: + BxNx3 array, original batch of point clouds + Return: + BxNx3 array, shifted batch of point clouds + """ + B, N, C = batch_data.shape + shifts = np.random.uniform(-shift_range, shift_range, (B,3)) + for batch_index in range(B): + batch_data[batch_index,:,:] += shifts[batch_index,:] + return batch_data + + +def random_scale_point_cloud(batch_data, scale_low=0.8, scale_high=1.25): + """ Randomly scale the point cloud. Scale is per point cloud. + Input: + BxNx3 array, original batch of point clouds + Return: + BxNx3 array, scaled batch of point clouds + """ + B, N, C = batch_data.shape + scales = np.random.uniform(scale_low, scale_high, B) + for batch_index in range(B): + batch_data[batch_index,:,:] *= scales[batch_index] + return batch_data + +def random_point_dropout(batch_pc, max_dropout_ratio=0.875): + ''' batch_pc: BxNx3 ''' + for b in range(batch_pc.shape[0]): + dropout_ratio = np.random.random()*max_dropout_ratio # 0~0.875 + drop_idx = np.where(np.random.random((batch_pc.shape[1]))<=dropout_ratio)[0] + if len(drop_idx)>0: + batch_pc[b,drop_idx,:] = batch_pc[b,0,:] # set to the first point + return batch_pc + + +def getDataFiles(list_filename): + return [line.rstrip() for line in open(list_filename)] + +def load_h5(h5_filename): + f = h5py.File(h5_filename) + data = f['data'][:] + label = f['label'][:] + return (data, label) + +def loadDataFile(filename): + return load_h5(filename) diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/render_balls_so.cpp" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/render_balls_so.cpp" new file mode 100644 index 0000000..e95aeba --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/render_balls_so.cpp" @@ -0,0 +1,58 @@ +#include +#include +#include +#include +using namespace std; + +struct PointInfo{ + int x,y,z; + float r,g,b; +}; + +extern "C"{ + +void render_ball(int h,int w,unsigned char * show,int n,int * xyzs,float * c0,float * c1,float * c2,int r){ + r=max(r,1); + vector depth(h*w,-2100000000); + vector pattern; + for (int dx=-r;dx<=r;dx++) + for (int dy=-r;dy<=r;dy++) + if (dx*dx+dy*dy=h || y2<0 || y2>=w) && depth[x2*w+y2]0: + show[:,:,0]=np.maximum(show[:,:,0],np.roll(show[:,:,0],1,axis=0)) + if magnifyBlue>=2: + show[:,:,0]=np.maximum(show[:,:,0],np.roll(show[:,:,0],-1,axis=0)) + show[:,:,0]=np.maximum(show[:,:,0],np.roll(show[:,:,0],1,axis=1)) + if magnifyBlue>=2: + show[:,:,0]=np.maximum(show[:,:,0],np.roll(show[:,:,0],-1,axis=1)) + if showrot: + cv2.putText(show,'xangle %d'%(int(xangle/np.pi*180)),(30,showsz-30),0,0.5,cv2.cv.CV_RGB(255,0,0)) + cv2.putText(show,'yangle %d'%(int(yangle/np.pi*180)),(30,showsz-50),0,0.5,cv2.cv.CV_RGB(255,0,0)) + cv2.putText(show,'zoom %d%%'%(int(zoom*100)),(30,showsz-70),0,0.5,cv2.cv.CV_RGB(255,0,0)) + changed=True + while True: + if changed: + render() + changed=False + cv2.imshow('show3d',show) + if waittime==0: + cmd=cv2.waitKey(10)%256 + else: + cmd=cv2.waitKey(waittime)%256 + if cmd==ord('q'): + break + elif cmd==ord('Q'): + sys.exit(0) + + if cmd==ord('t') or cmd == ord('p'): + if cmd == ord('t'): + if c_gt is None: + c0=np.zeros((len(xyz),),dtype='float32')+255 + c1=np.zeros((len(xyz),),dtype='float32')+255 + c2=np.zeros((len(xyz),),dtype='float32')+255 + else: + c0=c_gt[:,0] + c1=c_gt[:,1] + c2=c_gt[:,2] + else: + if c_pred is None: + c0=np.zeros((len(xyz),),dtype='float32')+255 + c1=np.zeros((len(xyz),),dtype='float32')+255 + c2=np.zeros((len(xyz),),dtype='float32')+255 + else: + c0=c_pred[:,0] + c1=c_pred[:,1] + c2=c_pred[:,2] + if normalizecolor: + c0/=(c0.max()+1e-14)/255.0 + c1/=(c1.max()+1e-14)/255.0 + c2/=(c2.max()+1e-14)/255.0 + c0=np.require(c0,'float32','C') + c1=np.require(c1,'float32','C') + c2=np.require(c2,'float32','C') + changed = True + + + + if cmd==ord('n'): + zoom*=1.1 + changed=True + elif cmd==ord('m'): + zoom/=1.1 + changed=True + elif cmd==ord('r'): + zoom=1.0 + changed=True + elif cmd==ord('s'): + cv2.imwrite('show3d.png',show) + if waittime!=0: + break + return cmd +if __name__=='__main__': + np.random.seed(100) + showpoints(np.random.randn(2500,3)) + diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/tf_util.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/tf_util.py" new file mode 100644 index 0000000..2f58cb0 --- /dev/null +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/utils/tf_util.py" @@ -0,0 +1,616 @@ +""" Wrapper functions for TensorFlow layers. + +Author: Charles R. Qi +Date: November 2017 +""" + +import numpy as np +import tensorflow as tf + +def _variable_on_cpu(name, shape, initializer, use_fp16=False): + """Helper to create a Variable stored on CPU memory. + Args: + name: name of the variable + shape: list of ints + initializer: initializer for Variable + Returns: + Variable Tensor + """ + with tf.device("/cpu:0"): + dtype = tf.float16 if use_fp16 else tf.float32 + var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype) + return var + +def _variable_with_weight_decay(name, shape, stddev, wd, use_xavier=True): + """Helper to create an initialized Variable with weight decay. + + Note that the Variable is initialized with a truncated normal distribution. + A weight decay is added only if one is specified. + + Args: + name: name of the variable + shape: list of ints + stddev: standard deviation of a truncated Gaussian + wd: add L2Loss weight decay multiplied by this float. If None, weight + decay is not added for this Variable. + use_xavier: bool, whether to use xavier initializer + + Returns: + Variable Tensor + """ + if use_xavier: + initializer = tf.contrib.layers.xavier_initializer() + else: + initializer = tf.truncated_normal_initializer(stddev=stddev) + var = _variable_on_cpu(name, shape, initializer) + if wd is not None: + weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss') + tf.add_to_collection('losses', weight_decay) + return var + + +def conv1d(inputs, + num_output_channels, + kernel_size, + scope, + stride=1, + padding='SAME', + data_format='NHWC', + use_xavier=True, + stddev=1e-3, + weight_decay=None, + activation_fn=tf.nn.relu, + bn=False, + bn_decay=None, + is_training=None): + """ 1D convolution with non-linear operation. + + Args: + inputs: 3-D tensor variable BxLxC + num_output_channels: int + kernel_size: int + scope: string + stride: int + padding: 'SAME' or 'VALID' + data_format: 'NHWC' or 'NCHW' + use_xavier: bool, use xavier_initializer if true + stddev: float, stddev for truncated_normal init + weight_decay: float + activation_fn: function + bn: bool, whether to use batch norm + bn_decay: float or float tensor variable in [0,1] + is_training: bool Tensor variable + + Returns: + Variable tensor + """ + with tf.variable_scope(scope) as sc: + assert(data_format=='NHWC' or data_format=='NCHW') + if data_format == 'NHWC': + num_in_channels = inputs.get_shape()[-1].value + elif data_format=='NCHW': + num_in_channels = inputs.get_shape()[1].value + kernel_shape = [kernel_size, + num_in_channels, num_output_channels] + kernel = _variable_with_weight_decay('weights', + shape=kernel_shape, + use_xavier=use_xavier, + stddev=stddev, + wd=weight_decay) + outputs = tf.nn.conv1d(inputs, kernel, + stride=stride, + padding=padding, + data_format=data_format) + biases = _variable_on_cpu('biases', [num_output_channels], + tf.constant_initializer(0.0)) + outputs = tf.nn.bias_add(outputs, biases, data_format=data_format) + + if bn: + outputs = batch_norm_for_conv1d(outputs, is_training, + bn_decay=bn_decay, scope='bn', + data_format=data_format) + + if activation_fn is not None: + outputs = activation_fn(outputs) + return outputs + + + + +def conv2d(inputs, + num_output_channels, + kernel_size, + scope, + stride=[1, 1], + padding='SAME', + data_format='NHWC', + use_xavier=True, + stddev=1e-3, + weight_decay=None, + activation_fn=tf.nn.relu, + bn=False, + bn_decay=None, + is_training=None): + """ 2D convolution with non-linear operation. + + Args: + inputs: 4-D tensor variable BxHxWxC + num_output_channels: int + kernel_size: a list of 2 ints + scope: string + stride: a list of 2 ints + padding: 'SAME' or 'VALID' + data_format: 'NHWC' or 'NCHW' + use_xavier: bool, use xavier_initializer if true + stddev: float, stddev for truncated_normal init + weight_decay: float + activation_fn: function + bn: bool, whether to use batch norm + bn_decay: float or float tensor variable in [0,1] + is_training: bool Tensor variable + + Returns: + Variable tensor + """ + with tf.variable_scope(scope) as sc: + kernel_h, kernel_w = kernel_size + assert(data_format=='NHWC' or data_format=='NCHW') + if data_format == 'NHWC': + num_in_channels = inputs.get_shape()[-1].value + elif data_format=='NCHW': + num_in_channels = inputs.get_shape()[1].value + kernel_shape = [kernel_h, kernel_w, + num_in_channels, num_output_channels] + kernel = _variable_with_weight_decay('weights', + shape=kernel_shape, + use_xavier=use_xavier, + stddev=stddev, + wd=weight_decay) + stride_h, stride_w = stride + outputs = tf.nn.conv2d(inputs, kernel, + [1, stride_h, stride_w, 1], + padding=padding, + data_format=data_format) + biases = _variable_on_cpu('biases', [num_output_channels], + tf.constant_initializer(0.0)) + outputs = tf.nn.bias_add(outputs, biases, data_format=data_format) + + if bn: + outputs = batch_norm_for_conv2d(outputs, is_training, + bn_decay=bn_decay, scope='bn', + data_format=data_format) + + if activation_fn is not None: + outputs = activation_fn(outputs) + return outputs + + +def conv2d_transpose(inputs, + num_output_channels, + kernel_size, + scope, + stride=[1, 1], + padding='SAME', + use_xavier=True, + stddev=1e-3, + weight_decay=None, + activation_fn=tf.nn.relu, + bn=False, + bn_decay=None, + is_training=None): + """ 2D convolution transpose with non-linear operation. + + Args: + inputs: 4-D tensor variable BxHxWxC + num_output_channels: int + kernel_size: a list of 2 ints + scope: string + stride: a list of 2 ints + padding: 'SAME' or 'VALID' + use_xavier: bool, use xavier_initializer if true + stddev: float, stddev for truncated_normal init + weight_decay: float + activation_fn: function + bn: bool, whether to use batch norm + bn_decay: float or float tensor variable in [0,1] + is_training: bool Tensor variable + + Returns: + Variable tensor + + Note: conv2d(conv2d_transpose(a, num_out, ksize, stride), a.shape[-1], ksize, stride) == a + """ + with tf.variable_scope(scope) as sc: + kernel_h, kernel_w = kernel_size + num_in_channels = inputs.get_shape()[-1].value + kernel_shape = [kernel_h, kernel_w, + num_output_channels, num_in_channels] # reversed to conv2d + kernel = _variable_with_weight_decay('weights', + shape=kernel_shape, + use_xavier=use_xavier, + stddev=stddev, + wd=weight_decay) + stride_h, stride_w = stride + + # from slim.convolution2d_transpose + def get_deconv_dim(dim_size, stride_size, kernel_size, padding): + dim_size *= stride_size + + if padding == 'VALID' and dim_size is not None: + dim_size += max(kernel_size - stride_size, 0) + return dim_size + + # caculate output shape + batch_size = inputs.get_shape()[0].value + height = inputs.get_shape()[1].value + width = inputs.get_shape()[2].value + out_height = get_deconv_dim(height, stride_h, kernel_h, padding) + out_width = get_deconv_dim(width, stride_w, kernel_w, padding) + output_shape = [batch_size, out_height, out_width, num_output_channels] + + outputs = tf.nn.conv2d_transpose(inputs, kernel, output_shape, + [1, stride_h, stride_w, 1], + padding=padding) + biases = _variable_on_cpu('biases', [num_output_channels], + tf.constant_initializer(0.0)) + outputs = tf.nn.bias_add(outputs, biases) + + if bn: + outputs = batch_norm_for_conv2d(outputs, is_training, + bn_decay=bn_decay, scope='bn') + + if activation_fn is not None: + outputs = activation_fn(outputs) + return outputs + + + +def conv3d(inputs, + num_output_channels, + kernel_size, + scope, + stride=[1, 1, 1], + padding='SAME', + use_xavier=True, + stddev=1e-3, + weight_decay=None, + activation_fn=tf.nn.relu, + bn=False, + bn_decay=None, + is_training=None): + """ 3D convolution with non-linear operation. + + Args: + inputs: 5-D tensor variable BxDxHxWxC + num_output_channels: int + kernel_size: a list of 3 ints + scope: string + stride: a list of 3 ints + padding: 'SAME' or 'VALID' + use_xavier: bool, use xavier_initializer if true + stddev: float, stddev for truncated_normal init + weight_decay: float + activation_fn: function + bn: bool, whether to use batch norm + bn_decay: float or float tensor variable in [0,1] + is_training: bool Tensor variable + + Returns: + Variable tensor + """ + with tf.variable_scope(scope) as sc: + kernel_d, kernel_h, kernel_w = kernel_size + num_in_channels = inputs.get_shape()[-1].value + kernel_shape = [kernel_d, kernel_h, kernel_w, + num_in_channels, num_output_channels] + kernel = _variable_with_weight_decay('weights', + shape=kernel_shape, + use_xavier=use_xavier, + stddev=stddev, + wd=weight_decay) + stride_d, stride_h, stride_w = stride + outputs = tf.nn.conv3d(inputs, kernel, + [1, stride_d, stride_h, stride_w, 1], + padding=padding) + biases = _variable_on_cpu('biases', [num_output_channels], + tf.constant_initializer(0.0)) + outputs = tf.nn.bias_add(outputs, biases) + + if bn: + outputs = batch_norm_for_conv3d(outputs, is_training, + bn_decay=bn_decay, scope='bn') + + if activation_fn is not None: + outputs = activation_fn(outputs) + return outputs + +def fully_connected(inputs, + num_outputs, + scope, + use_xavier=True, + stddev=1e-3, + weight_decay=None, + activation_fn=tf.nn.relu, + bn=False, + bn_decay=None, + is_training=None): + """ Fully connected layer with non-linear operation. + + Args: + inputs: 2-D tensor BxN + num_outputs: int + + Returns: + Variable tensor of size B x num_outputs. + """ + with tf.variable_scope(scope) as sc: + num_input_units = inputs.get_shape()[-1].value + weights = _variable_with_weight_decay('weights', + shape=[num_input_units, num_outputs], + use_xavier=use_xavier, + stddev=stddev, + wd=weight_decay) + outputs = tf.matmul(inputs, weights) + biases = _variable_on_cpu('biases', [num_outputs], + tf.constant_initializer(0.0)) + outputs = tf.nn.bias_add(outputs, biases) + + if bn: + outputs = batch_norm_for_fc(outputs, is_training, bn_decay, 'bn') + + if activation_fn is not None: + outputs = activation_fn(outputs) + return outputs + + +def max_pool2d(inputs, + kernel_size, + scope, + stride=[2, 2], + padding='VALID'): + """ 2D max pooling. + + Args: + inputs: 4-D tensor BxHxWxC + kernel_size: a list of 2 ints + stride: a list of 2 ints + + Returns: + Variable tensor + """ + with tf.variable_scope(scope) as sc: + kernel_h, kernel_w = kernel_size + stride_h, stride_w = stride + outputs = tf.nn.max_pool(inputs, + ksize=[1, kernel_h, kernel_w, 1], + strides=[1, stride_h, stride_w, 1], + padding=padding, + name=sc.name) + return outputs + +def avg_pool2d(inputs, + kernel_size, + scope, + stride=[2, 2], + padding='VALID'): + """ 2D avg pooling. + + Args: + inputs: 4-D tensor BxHxWxC + kernel_size: a list of 2 ints + stride: a list of 2 ints + + Returns: + Variable tensor + """ + with tf.variable_scope(scope) as sc: + kernel_h, kernel_w = kernel_size + stride_h, stride_w = stride + outputs = tf.nn.avg_pool(inputs, + ksize=[1, kernel_h, kernel_w, 1], + strides=[1, stride_h, stride_w, 1], + padding=padding, + name=sc.name) + return outputs + + +def max_pool3d(inputs, + kernel_size, + scope, + stride=[2, 2, 2], + padding='VALID'): + """ 3D max pooling. + + Args: + inputs: 5-D tensor BxDxHxWxC + kernel_size: a list of 3 ints + stride: a list of 3 ints + + Returns: + Variable tensor + """ + with tf.variable_scope(scope) as sc: + kernel_d, kernel_h, kernel_w = kernel_size + stride_d, stride_h, stride_w = stride + outputs = tf.nn.max_pool3d(inputs, + ksize=[1, kernel_d, kernel_h, kernel_w, 1], + strides=[1, stride_d, stride_h, stride_w, 1], + padding=padding, + name=sc.name) + return outputs + +def avg_pool3d(inputs, + kernel_size, + scope, + stride=[2, 2, 2], + padding='VALID'): + """ 3D avg pooling. + + Args: + inputs: 5-D tensor BxDxHxWxC + kernel_size: a list of 3 ints + stride: a list of 3 ints + + Returns: + Variable tensor + """ + with tf.variable_scope(scope) as sc: + kernel_d, kernel_h, kernel_w = kernel_size + stride_d, stride_h, stride_w = stride + outputs = tf.nn.avg_pool3d(inputs, + ksize=[1, kernel_d, kernel_h, kernel_w, 1], + strides=[1, stride_d, stride_h, stride_w, 1], + padding=padding, + name=sc.name) + return outputs + + +def batch_norm_template_unused(inputs, is_training, scope, moments_dims, bn_decay): + """ NOTE: this is older version of the util func. it is deprecated. + Batch normalization on convolutional maps and beyond... + Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow + + Args: + inputs: Tensor, k-D input ... x C could be BC or BHWC or BDHWC + is_training: boolean tf.Varialbe, true indicates training phase + scope: string, variable scope + moments_dims: a list of ints, indicating dimensions for moments calculation + bn_decay: float or float tensor variable, controling moving average weight + Return: + normed: batch-normalized maps + """ + with tf.variable_scope(scope) as sc: + num_channels = inputs.get_shape()[-1].value + beta = _variable_on_cpu(name='beta',shape=[num_channels], + initializer=tf.constant_initializer(0)) + gamma = _variable_on_cpu(name='gamma',shape=[num_channels], + initializer=tf.constant_initializer(1.0)) + batch_mean, batch_var = tf.nn.moments(inputs, moments_dims, name='moments') + decay = bn_decay if bn_decay is not None else 0.9 + ema = tf.train.ExponentialMovingAverage(decay=decay) + # Operator that maintains moving averages of variables. + # Need to set reuse=False, otherwise if reuse, will see moments_1/mean/ExponentialMovingAverage/ does not exist + # https://github.com/shekkizh/WassersteinGAN.tensorflow/issues/3 + with tf.variable_scope(tf.get_variable_scope(), reuse=False): + ema_apply_op = tf.cond(is_training, + lambda: ema.apply([batch_mean, batch_var]), + lambda: tf.no_op()) + + # Update moving average and return current batch's avg and var. + def mean_var_with_update(): + with tf.control_dependencies([ema_apply_op]): + return tf.identity(batch_mean), tf.identity(batch_var) + + # ema.average returns the Variable holding the average of var. + mean, var = tf.cond(is_training, + mean_var_with_update, + lambda: (ema.average(batch_mean), ema.average(batch_var))) + normed = tf.nn.batch_normalization(inputs, mean, var, beta, gamma, 1e-3) + return normed + + +def batch_norm_template(inputs, is_training, scope, moments_dims_unused, bn_decay, data_format='NHWC'): + """ Batch normalization on convolutional maps and beyond... + Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow + + Args: + inputs: Tensor, k-D input ... x C could be BC or BHWC or BDHWC + is_training: boolean tf.Varialbe, true indicates training phase + scope: string, variable scope + moments_dims: a list of ints, indicating dimensions for moments calculation + bn_decay: float or float tensor variable, controling moving average weight + data_format: 'NHWC' or 'NCHW' + Return: + normed: batch-normalized maps + """ + bn_decay = bn_decay if bn_decay is not None else 0.9 + return tf.contrib.layers.batch_norm(inputs, + center=True, scale=True, + is_training=is_training, decay=bn_decay,updates_collections=None, + scope=scope, + data_format=data_format) + + +def batch_norm_for_fc(inputs, is_training, bn_decay, scope): + """ Batch normalization on FC data. + + Args: + inputs: Tensor, 2D BxC input + is_training: boolean tf.Varialbe, true indicates training phase + bn_decay: float or float tensor variable, controling moving average weight + scope: string, variable scope + Return: + normed: batch-normalized maps + """ + return batch_norm_template(inputs, is_training, scope, [0,], bn_decay) + + +def batch_norm_for_conv1d(inputs, is_training, bn_decay, scope, data_format): + """ Batch normalization on 1D convolutional maps. + + Args: + inputs: Tensor, 3D BLC input maps + is_training: boolean tf.Varialbe, true indicates training phase + bn_decay: float or float tensor variable, controling moving average weight + scope: string, variable scope + data_format: 'NHWC' or 'NCHW' + Return: + normed: batch-normalized maps + """ + return batch_norm_template(inputs, is_training, scope, [0,1], bn_decay, data_format) + + + + +def batch_norm_for_conv2d(inputs, is_training, bn_decay, scope, data_format): + """ Batch normalization on 2D convolutional maps. + + Args: + inputs: Tensor, 4D BHWC input maps + is_training: boolean tf.Varialbe, true indicates training phase + bn_decay: float or float tensor variable, controling moving average weight + scope: string, variable scope + data_format: 'NHWC' or 'NCHW' + Return: + normed: batch-normalized maps + """ + return batch_norm_template(inputs, is_training, scope, [0,1,2], bn_decay, data_format) + + +def batch_norm_for_conv3d(inputs, is_training, bn_decay, scope): + """ Batch normalization on 3D convolutional maps. + + Args: + inputs: Tensor, 5D BDHWC input maps + is_training: boolean tf.Varialbe, true indicates training phase + bn_decay: float or float tensor variable, controling moving average weight + scope: string, variable scope + Return: + normed: batch-normalized maps + """ + return batch_norm_template(inputs, is_training, scope, [0,1,2,3], bn_decay) + + +def dropout(inputs, + is_training, + scope, + keep_prob=0.5, + noise_shape=None): + """ Dropout layer. + + Args: + inputs: tensor + is_training: boolean tf.Variable + scope: string + keep_prob: float in [0,1] + noise_shape: list of ints + + Returns: + tensor variable + """ + with tf.variable_scope(scope) as sc: + outputs = tf.cond(is_training, + # lambda: tf.nn.dropout(inputs, keep_prob, noise_shape), + lambda: tf.nn.dropout(inputs, keep_prob, noise_shape), + lambda: inputs) + return outputs -- Gitee From 5455c4f01f3837f86163802ba3909c9ec684be74 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:36:16 +0000 Subject: [PATCH 62/69] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20co?= =?UTF-8?q?de/2022=5Fautumn/=E5=B4=94=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E?= =?UTF-8?q?3D=E7=82=B9=E4=BA=91=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0?= =?UTF-8?q?=E7=9A=84=E9=9C=87=E5=AE=B3=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86?= =?UTF-8?q?=E5=88=AB/models/pointnet=5Fcls.py=20for=20torch?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../models/pointnet_cls.py for torch" | 40 ------------------- 1 file changed, 40 deletions(-) delete mode 100644 "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet_cls.py for torch" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet_cls.py for torch" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet_cls.py for torch" deleted file mode 100644 index 520e8f2..0000000 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet_cls.py for torch" +++ /dev/null @@ -1,40 +0,0 @@ -import torch.nn as nn -import torch.utils.data -import torch.nn.functional as F -from pointnet import PointNetEncoder, feature_transform_reguliarzer - -class get_model(nn.Module): - def __init__(self, k=40, normal_channel=True): - super(get_model, self).__init__() - if normal_channel: - channel = 6 - else: - channel = 3 - self.feat = PointNetEncoder(global_feat=True, feature_transform=True, channel=channel) - self.fc1 = nn.Linear(1024, 512) - self.fc2 = nn.Linear(512, 256) - self.fc3 = nn.Linear(256, k) - self.dropout = nn.Dropout(p=0.4) - self.bn1 = nn.BatchNorm1d(512) - self.bn2 = nn.BatchNorm1d(256) - self.relu = nn.ReLU() - - def forward(self, x): - x, trans, trans_feat = self.feat(x) - x = F.relu(self.bn1(self.fc1(x))) - x = F.relu(self.bn2(self.dropout(self.fc2(x)))) - x = self.fc3(x) - x = F.log_softmax(x, dim=1) - return x, trans_feat - -class get_loss(torch.nn.Module): - def __init__(self, mat_diff_loss_scale=0.001): - super(get_loss, self).__init__() - self.mat_diff_loss_scale = mat_diff_loss_scale - - def forward(self, pred, target, trans_feat): - loss = F.nll_loss(pred, target) - mat_diff_loss = feature_transform_reguliarzer(trans_feat) - - total_loss = loss + mat_diff_loss * self.mat_diff_loss_scale - return total_loss -- Gitee From 888993f215872ab48b89e3d460161e7912c76bb6 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:36:35 +0000 Subject: [PATCH 63/69] =?UTF-8?q?=E9=87=8D=E5=91=BD=E5=90=8D=20code/2022?= =?UTF-8?q?=5Fautumn/=E5=B4=94=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D?= =?UTF-8?q?=E7=82=B9=E4=BA=91=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84?= =?UTF-8?q?=E9=9C=87=E5=AE=B3=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB?= =?UTF-8?q?/models/pointnet2=5Fcls=5Fssg.py=20for=20tf=20=E4=B8=BA=20code/?= =?UTF-8?q?2022=5Fautumn/=E5=B4=94=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D?= =?UTF-8?q?=E7=82=B9=E4=BA=91=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84?= =?UTF-8?q?=E9=9C=87=E5=AE=B3=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB?= =?UTF-8?q?/models/pointnet2=5Fcls=5Fssg.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../models/pointnet2_cls_ssg.py" | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet2_cls_ssg.py for tf" => "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet2_cls_ssg.py" (100%) diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet2_cls_ssg.py for tf" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet2_cls_ssg.py" similarity index 100% rename from "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet2_cls_ssg.py for tf" rename to "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/models/pointnet2_cls_ssg.py" -- Gitee From 65f0d9eccd1012f3d37a0ac323f6ef5f1b6f9cb3 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:37:28 +0000 Subject: [PATCH 64/69] =?UTF-8?q?=E9=87=8D=E5=91=BD=E5=90=8D=20code/2022?= =?UTF-8?q?=5Fautumn/=E5=B4=94=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D?= =?UTF-8?q?=E7=82=B9=E4=BA=91=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84?= =?UTF-8?q?=E9=9C=87=E5=AE=B3=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB?= =?UTF-8?q?/data/modelnet40=20=E4=B8=BA=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB/data/damaged-buil?= =?UTF-8?q?ding=20data?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../data/damaged-building data/.keep" | 0 .../data/damaged-building data/download" | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/modelnet40/.keep" => "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/damaged-building data/.keep" (100%) rename "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/modelnet40/download" => "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/damaged-building data/download" (100%) diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/modelnet40/.keep" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/damaged-building data/.keep" similarity index 100% rename from "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/modelnet40/.keep" rename to "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/damaged-building data/.keep" diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/modelnet40/download" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/damaged-building data/download" similarity index 100% rename from "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/modelnet40/download" rename to "code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/damaged-building data/download" -- Gitee From c61f4331e05246963ee90c86ec10155a21b748bc Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Fri, 28 Oct 2022 11:42:57 +0000 Subject: [PATCH 65/69] =?UTF-8?q?update=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB/data/damaged-buil?= =?UTF-8?q?ding=20data/download.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../data/damaged-building data/download" | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/damaged-building data/download" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/damaged-building data/download" index d850c61..5830004 100644 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/damaged-building data/download" +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/damaged-building data/download" @@ -1,2 +1,2 @@ -链接:https://pan.baidu.com/s/1J1svTCLW_Iy7blMan0YiPQ -提取码:20yl \ No newline at end of file +链接:https://pan.baidu.com/s/16XXSZuG9wIMVRpyxWsV2vA +提取码:hqs6 \ No newline at end of file -- Gitee From 691fc50cae8effc195c89ac44714398cecedebc5 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Sat, 29 Oct 2022 03:41:17 +0000 Subject: [PATCH 66/69] =?UTF-8?q?update=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB/train.py.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../train.py" | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/train.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/train.py" index 0c03076..574f25b 100644 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/train.py" +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/train.py" @@ -86,7 +86,7 @@ BN_DECAY_CLIP = 0.99 HOSTNAME = socket.gethostname() -NUM_CLASSES = 40 +NUM_CLASSES = 3 # Shapenet official train/test split -- Gitee From 4ad04bd7eed9459c96c88f5d726af5cc5a5d2a74 Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Sat, 29 Oct 2022 03:41:52 +0000 Subject: [PATCH 67/69] =?UTF-8?q?update=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB/evaluate.py.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../evaluate.py" | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/evaluate.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/evaluate.py" index 224d65b..c4319f8 100644 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/evaluate.py" +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/evaluate.py" @@ -44,9 +44,9 @@ if not os.path.exists(DUMP_DIR): os.mkdir(DUMP_DIR) LOG_FOUT = open(os.path.join(DUMP_DIR, 'log_evaluate.txt'), 'w') LOG_FOUT.write(str(FLAGS)+'\n') -NUM_CLASSES = 40 +NUM_CLASSES = 3 SHAPE_NAMES = [line.rstrip() for line in \ - open(os.path.join(ROOT_DIR, 'data/modelnet40_ply_hdf5_2048/shape_names.txt'))] + open(os.path.join(ROOT_DIR, 'data/modelnet40_ply_hdf5_2048/file_names.txt'))] HOSTNAME = socket.gethostname() -- Gitee From 040d4d029f917e8a37246895def900ac6a9a594e Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Sat, 29 Oct 2022 03:48:40 +0000 Subject: [PATCH 68/69] =?UTF-8?q?update=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB/data/damaged-buil?= =?UTF-8?q?ding=20data/download.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../data/damaged-building data/download" | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/damaged-building data/download" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/damaged-building data/download" index 5830004..0e817f9 100644 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/damaged-building data/download" +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/data/damaged-building data/download" @@ -1,2 +1,2 @@ -链接:https://pan.baidu.com/s/16XXSZuG9wIMVRpyxWsV2vA -提取码:hqs6 \ No newline at end of file +链接:https://pan.baidu.com/s/15n8BRLCaa-N3joRPzXZsTg +提取码:79d7 \ No newline at end of file -- Gitee From ad3b598534f3bc9d3dcea31cb164889e6b7a54ca Mon Sep 17 00:00:00 2001 From: yining7964 <11652807+yining7964@user.noreply.gitee.com> Date: Sat, 29 Oct 2022 03:49:07 +0000 Subject: [PATCH 69/69] =?UTF-8?q?update=20code/2022=5Fautumn/=E5=B4=94?= =?UTF-8?q?=E9=A9=BF=E5=AE=81-=E5=9F=BA=E4=BA=8E3D=E7=82=B9=E4=BA=91?= =?UTF-8?q?=E6=B7=B1=E5=BA=A6=E5=AD=A6=E4=B9=A0=E7=9A=84=E9=9C=87=E5=AE=B3?= =?UTF-8?q?=E5=BB=BA=E7=AD=91=E7=89=A9=E8=AF=86=E5=88=AB/evaluate.py.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: yining7964 <11652807+yining7964@user.noreply.gitee.com> --- .../evaluate.py" | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/evaluate.py" "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/evaluate.py" index c4319f8..abc5a52 100644 --- "a/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/evaluate.py" +++ "b/code/2022_autumn/\345\264\224\351\251\277\345\256\201-\345\237\272\344\272\2163D\347\202\271\344\272\221\346\267\261\345\272\246\345\255\246\344\271\240\347\232\204\351\234\207\345\256\263\345\273\272\347\255\221\347\211\251\350\257\206\345\210\253/evaluate.py" @@ -46,7 +46,7 @@ LOG_FOUT.write(str(FLAGS)+'\n') NUM_CLASSES = 3 SHAPE_NAMES = [line.rstrip() for line in \ - open(os.path.join(ROOT_DIR, 'data/modelnet40_ply_hdf5_2048/file_names.txt'))] + open(os.path.join(ROOT_DIR, 'data/modelnet40_ply_hdf5_2048/shape_names.txt'))] HOSTNAME = socket.gethostname() -- Gitee